xref: /linux/fs/ext4/super.c (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/super.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/inode.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  Big-endian to little-endian byte-swapping/bitmaps by
17  *        David S. Miller (davem@caip.rutgers.edu), 1995
18  */
19 
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/parser.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/vfs.h>
33 #include <linux/random.h>
34 #include <linux/mount.h>
35 #include <linux/namei.h>
36 #include <linux/quotaops.h>
37 #include <linux/seq_file.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <linux/dax.h>
42 #include <linux/uaccess.h>
43 #include <linux/iversion.h>
44 #include <linux/unicode.h>
45 #include <linux/part_stat.h>
46 #include <linux/kthread.h>
47 #include <linux/freezer.h>
48 #include <linux/fsnotify.h>
49 #include <linux/fs_context.h>
50 #include <linux/fs_parser.h>
51 #include <linux/fserror.h>
52 
53 #include "ext4.h"
54 #include "ext4_extents.h"	/* Needed for trace points definition */
55 #include "ext4_jbd2.h"
56 #include "xattr.h"
57 #include "acl.h"
58 #include "mballoc.h"
59 #include "fsmap.h"
60 
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/ext4.h>
63 
64 static struct ext4_lazy_init *ext4_li_info;
65 static DEFINE_MUTEX(ext4_li_mtx);
66 static struct ratelimit_state ext4_mount_msg_ratelimit;
67 
68 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
69 			     unsigned long journal_devnum);
70 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
71 static void ext4_update_super(struct super_block *sb);
72 static int ext4_commit_super(struct super_block *sb);
73 static int ext4_mark_recovery_complete(struct super_block *sb,
74 					struct ext4_super_block *es);
75 static int ext4_clear_journal_err(struct super_block *sb,
76 				  struct ext4_super_block *es);
77 static int ext4_sync_fs(struct super_block *sb, int wait);
78 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
79 static int ext4_unfreeze(struct super_block *sb);
80 static int ext4_freeze(struct super_block *sb);
81 static inline int ext2_feature_set_ok(struct super_block *sb);
82 static inline int ext3_feature_set_ok(struct super_block *sb);
83 static void ext4_unregister_li_request(struct super_block *sb);
84 static void ext4_clear_request_list(void);
85 static struct inode *ext4_get_journal_inode(struct super_block *sb,
86 					    unsigned int journal_inum);
87 static int ext4_validate_options(struct fs_context *fc);
88 static int ext4_check_opt_consistency(struct fs_context *fc,
89 				      struct super_block *sb);
90 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb);
91 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param);
92 static int ext4_get_tree(struct fs_context *fc);
93 static int ext4_reconfigure(struct fs_context *fc);
94 static void ext4_fc_free(struct fs_context *fc);
95 static int ext4_init_fs_context(struct fs_context *fc);
96 static void ext4_kill_sb(struct super_block *sb);
97 static const struct fs_parameter_spec ext4_param_specs[];
98 
99 /*
100  * Lock ordering
101  *
102  * page fault path:
103  * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
104  *   -> page lock -> i_data_sem (rw)
105  *
106  * buffered write path:
107  * sb_start_write -> i_mutex -> mmap_lock
108  * sb_start_write -> i_mutex -> transaction start -> page lock ->
109  *   i_data_sem (rw)
110  *
111  * truncate:
112  * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
113  *   page lock
114  * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
115  *   i_data_sem (rw)
116  *
117  * direct IO:
118  * sb_start_write -> i_mutex -> mmap_lock
119  * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
120  *
121  * writepages:
122  * transaction start -> page lock(s) -> i_data_sem (rw)
123  */
124 
125 static const struct fs_context_operations ext4_context_ops = {
126 	.parse_param	= ext4_parse_param,
127 	.get_tree	= ext4_get_tree,
128 	.reconfigure	= ext4_reconfigure,
129 	.free		= ext4_fc_free,
130 };
131 
132 
133 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
134 static struct file_system_type ext2_fs_type = {
135 	.owner			= THIS_MODULE,
136 	.name			= "ext2",
137 	.init_fs_context	= ext4_init_fs_context,
138 	.parameters		= ext4_param_specs,
139 	.kill_sb		= ext4_kill_sb,
140 	.fs_flags		= FS_REQUIRES_DEV,
141 };
142 MODULE_ALIAS_FS("ext2");
143 MODULE_ALIAS("ext2");
144 #define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
145 #else
146 #define IS_EXT2_SB(sb) (0)
147 #endif
148 
149 
150 static struct file_system_type ext3_fs_type = {
151 	.owner			= THIS_MODULE,
152 	.name			= "ext3",
153 	.init_fs_context	= ext4_init_fs_context,
154 	.parameters		= ext4_param_specs,
155 	.kill_sb		= ext4_kill_sb,
156 	.fs_flags		= FS_REQUIRES_DEV,
157 };
158 MODULE_ALIAS_FS("ext3");
159 MODULE_ALIAS("ext3");
160 #define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
161 
162 
163 static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
164 				  bh_end_io_t *end_io, bool simu_fail)
165 {
166 	if (simu_fail) {
167 		clear_buffer_uptodate(bh);
168 		unlock_buffer(bh);
169 		return;
170 	}
171 
172 	/*
173 	 * buffer's verified bit is no longer valid after reading from
174 	 * disk again due to write out error, clear it to make sure we
175 	 * recheck the buffer contents.
176 	 */
177 	clear_buffer_verified(bh);
178 
179 	bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
180 	get_bh(bh);
181 	submit_bh(REQ_OP_READ | op_flags, bh);
182 }
183 
184 void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
185 			 bh_end_io_t *end_io, bool simu_fail)
186 {
187 	BUG_ON(!buffer_locked(bh));
188 
189 	if (ext4_buffer_uptodate(bh)) {
190 		unlock_buffer(bh);
191 		return;
192 	}
193 	__ext4_read_bh(bh, op_flags, end_io, simu_fail);
194 }
195 
196 int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
197 		 bh_end_io_t *end_io, bool simu_fail)
198 {
199 	BUG_ON(!buffer_locked(bh));
200 
201 	if (ext4_buffer_uptodate(bh)) {
202 		unlock_buffer(bh);
203 		return 0;
204 	}
205 
206 	__ext4_read_bh(bh, op_flags, end_io, simu_fail);
207 
208 	wait_on_buffer(bh);
209 	if (buffer_uptodate(bh))
210 		return 0;
211 	return -EIO;
212 }
213 
214 int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
215 {
216 	lock_buffer(bh);
217 	if (!wait) {
218 		ext4_read_bh_nowait(bh, op_flags, NULL, false);
219 		return 0;
220 	}
221 	return ext4_read_bh(bh, op_flags, NULL, false);
222 }
223 
224 /*
225  * This works like __bread_gfp() except it uses ERR_PTR for error
226  * returns.  Currently with sb_bread it's impossible to distinguish
227  * between ENOMEM and EIO situations (since both result in a NULL
228  * return.
229  */
230 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
231 					       sector_t block,
232 					       blk_opf_t op_flags, gfp_t gfp)
233 {
234 	struct buffer_head *bh;
235 	int ret;
236 
237 	bh = sb_getblk_gfp(sb, block, gfp);
238 	if (bh == NULL)
239 		return ERR_PTR(-ENOMEM);
240 	if (ext4_buffer_uptodate(bh))
241 		return bh;
242 
243 	ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
244 	if (ret) {
245 		put_bh(bh);
246 		return ERR_PTR(ret);
247 	}
248 	return bh;
249 }
250 
251 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
252 				   blk_opf_t op_flags)
253 {
254 	gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
255 			~__GFP_FS) | __GFP_MOVABLE;
256 
257 	return __ext4_sb_bread_gfp(sb, block, op_flags, gfp);
258 }
259 
260 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
261 					    sector_t block)
262 {
263 	gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
264 			~__GFP_FS);
265 
266 	return __ext4_sb_bread_gfp(sb, block, 0, gfp);
267 }
268 
269 struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
270 					 sector_t block)
271 {
272 	gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
273 			~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL;
274 
275 	return __ext4_sb_bread_gfp(sb, block, 0, gfp);
276 }
277 
278 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
279 {
280 	struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,
281 			sb->s_blocksize, GFP_NOWAIT);
282 
283 	if (likely(bh)) {
284 		if (trylock_buffer(bh))
285 			ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL, false);
286 		brelse(bh);
287 	}
288 }
289 
290 static int ext4_verify_csum_type(struct super_block *sb,
291 				 struct ext4_super_block *es)
292 {
293 	if (!ext4_has_feature_metadata_csum(sb))
294 		return 1;
295 
296 	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
297 }
298 
299 __le32 ext4_superblock_csum(struct ext4_super_block *es)
300 {
301 	int offset = offsetof(struct ext4_super_block, s_checksum);
302 	__u32 csum;
303 
304 	csum = ext4_chksum(~0, (char *)es, offset);
305 
306 	return cpu_to_le32(csum);
307 }
308 
309 static int ext4_superblock_csum_verify(struct super_block *sb,
310 				       struct ext4_super_block *es)
311 {
312 	if (!ext4_has_feature_metadata_csum(sb))
313 		return 1;
314 
315 	return es->s_checksum == ext4_superblock_csum(es);
316 }
317 
318 void ext4_superblock_csum_set(struct super_block *sb)
319 {
320 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
321 
322 	if (!ext4_has_feature_metadata_csum(sb))
323 		return;
324 
325 	es->s_checksum = ext4_superblock_csum(es);
326 }
327 
328 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
329 			       struct ext4_group_desc *bg)
330 {
331 	return le32_to_cpu(bg->bg_block_bitmap_lo) |
332 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
333 		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
334 }
335 
336 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
337 			       struct ext4_group_desc *bg)
338 {
339 	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
340 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
341 		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
342 }
343 
344 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
345 			      struct ext4_group_desc *bg)
346 {
347 	return le32_to_cpu(bg->bg_inode_table_lo) |
348 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
349 		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
350 }
351 
352 __u32 ext4_free_group_clusters(struct super_block *sb,
353 			       struct ext4_group_desc *bg)
354 {
355 	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
356 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
357 		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
358 }
359 
360 __u32 ext4_free_inodes_count(struct super_block *sb,
361 			      struct ext4_group_desc *bg)
362 {
363 	return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) |
364 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
365 		 (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0);
366 }
367 
368 __u32 ext4_used_dirs_count(struct super_block *sb,
369 			      struct ext4_group_desc *bg)
370 {
371 	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
372 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
373 		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
374 }
375 
376 __u32 ext4_itable_unused_count(struct super_block *sb,
377 			      struct ext4_group_desc *bg)
378 {
379 	return le16_to_cpu(bg->bg_itable_unused_lo) |
380 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
381 		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
382 }
383 
384 void ext4_block_bitmap_set(struct super_block *sb,
385 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
386 {
387 	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
388 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
389 		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
390 }
391 
392 void ext4_inode_bitmap_set(struct super_block *sb,
393 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
394 {
395 	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
396 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
397 		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
398 }
399 
400 void ext4_inode_table_set(struct super_block *sb,
401 			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
402 {
403 	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
404 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
405 		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
406 }
407 
408 void ext4_free_group_clusters_set(struct super_block *sb,
409 				  struct ext4_group_desc *bg, __u32 count)
410 {
411 	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
412 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
413 		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
414 }
415 
416 void ext4_free_inodes_set(struct super_block *sb,
417 			  struct ext4_group_desc *bg, __u32 count)
418 {
419 	WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count));
420 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
421 		WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16));
422 }
423 
424 void ext4_used_dirs_set(struct super_block *sb,
425 			  struct ext4_group_desc *bg, __u32 count)
426 {
427 	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
428 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
429 		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
430 }
431 
432 void ext4_itable_unused_set(struct super_block *sb,
433 			  struct ext4_group_desc *bg, __u32 count)
434 {
435 	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
436 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
437 		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
438 }
439 
440 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now)
441 {
442 	now = clamp_val(now, 0, (1ull << 40) - 1);
443 
444 	*lo = cpu_to_le32(lower_32_bits(now));
445 	*hi = upper_32_bits(now);
446 }
447 
448 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
449 {
450 	return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
451 }
452 #define ext4_update_tstamp(es, tstamp) \
453 	__ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
454 			     ktime_get_real_seconds())
455 #define ext4_get_tstamp(es, tstamp) \
456 	__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
457 
458 /*
459  * The ext4_maybe_update_superblock() function checks and updates the
460  * superblock if needed.
461  *
462  * This function is designed to update the on-disk superblock only under
463  * certain conditions to prevent excessive disk writes and unnecessary
464  * waking of the disk from sleep. The superblock will be updated if:
465  * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last
466  *    superblock update
467  * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the
468  *    last superblock update.
469  *
470  * @sb: The superblock
471  */
472 static void ext4_maybe_update_superblock(struct super_block *sb)
473 {
474 	struct ext4_sb_info *sbi = EXT4_SB(sb);
475 	struct ext4_super_block *es = sbi->s_es;
476 	journal_t *journal = sbi->s_journal;
477 	time64_t now;
478 	__u64 last_update;
479 	__u64 lifetime_write_kbytes;
480 	__u64 diff_size;
481 
482 	if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
483 	    !(sb->s_flags & SB_ACTIVE) || !journal ||
484 	    journal->j_flags & JBD2_UNMOUNT)
485 		return;
486 
487 	now = ktime_get_real_seconds();
488 	last_update = ext4_get_tstamp(es, s_wtime);
489 
490 	if (likely(now - last_update < sbi->s_sb_update_sec))
491 		return;
492 
493 	lifetime_write_kbytes = sbi->s_kbytes_written +
494 		((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
495 		  sbi->s_sectors_written_start) >> 1);
496 
497 	/* Get the number of kilobytes not written to disk to account
498 	 * for statistics and compare with a multiple of 16 MB. This
499 	 * is used to determine when the next superblock commit should
500 	 * occur (i.e. not more often than once per 16MB if there was
501 	 * less written in an hour).
502 	 */
503 	diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written);
504 
505 	if (diff_size > sbi->s_sb_update_kb)
506 		schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
507 }
508 
509 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
510 {
511 	struct super_block		*sb = journal->j_private;
512 
513 	BUG_ON(txn->t_state == T_FINISHED);
514 
515 	ext4_process_freed_data(sb, txn->t_tid);
516 	ext4_maybe_update_superblock(sb);
517 }
518 
519 static bool ext4_journalled_writepage_needs_redirty(struct jbd2_inode *jinode,
520 		struct folio *folio)
521 {
522 	struct buffer_head *bh, *head;
523 	struct journal_head *jh;
524 
525 	bh = head = folio_buffers(folio);
526 	do {
527 		/*
528 		 * We have to redirty a page in these cases:
529 		 * 1) If buffer is dirty, it means the page was dirty because it
530 		 * contains a buffer that needs checkpointing. So the dirty bit
531 		 * needs to be preserved so that checkpointing writes the buffer
532 		 * properly.
533 		 * 2) If buffer is not part of the committing transaction
534 		 * (we may have just accidentally come across this buffer because
535 		 * inode range tracking is not exact) or if the currently running
536 		 * transaction already contains this buffer as well, dirty bit
537 		 * needs to be preserved so that the buffer gets writeprotected
538 		 * properly on running transaction's commit.
539 		 */
540 		jh = bh2jh(bh);
541 		if (buffer_dirty(bh) ||
542 		    (jh && (jh->b_transaction != jinode->i_transaction ||
543 			    jh->b_next_transaction)))
544 			return true;
545 	} while ((bh = bh->b_this_page) != head);
546 
547 	return false;
548 }
549 
550 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
551 {
552 	struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
553 	struct writeback_control wbc = {
554 		.sync_mode =  WB_SYNC_ALL,
555 		.nr_to_write = LONG_MAX,
556 		.range_start = jinode->i_dirty_start,
557 		.range_end = jinode->i_dirty_end,
558         };
559 	struct folio *folio = NULL;
560 	int error;
561 
562 	/*
563 	 * writeback_iter() already checks for dirty pages and calls
564 	 * folio_clear_dirty_for_io(), which we want to write protect the
565 	 * folios.
566 	 *
567 	 * However, we may have to redirty a folio sometimes.
568 	 */
569 	while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
570 		if (ext4_journalled_writepage_needs_redirty(jinode, folio))
571 			folio_redirty_for_writepage(&wbc, folio);
572 		folio_unlock(folio);
573 	}
574 
575 	return error;
576 }
577 
578 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
579 {
580 	int ret;
581 
582 	if (ext4_should_journal_data(jinode->i_vfs_inode))
583 		ret = ext4_journalled_submit_inode_data_buffers(jinode);
584 	else
585 		ret = ext4_normal_submit_inode_data_buffers(jinode);
586 	return ret;
587 }
588 
589 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
590 {
591 	int ret = 0;
592 
593 	if (!ext4_should_journal_data(jinode->i_vfs_inode))
594 		ret = jbd2_journal_finish_inode_data_buffers(jinode);
595 
596 	return ret;
597 }
598 
599 static bool system_going_down(void)
600 {
601 	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
602 		|| system_state == SYSTEM_RESTART;
603 }
604 
605 struct ext4_err_translation {
606 	int code;
607 	int errno;
608 };
609 
610 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }
611 
612 static struct ext4_err_translation err_translation[] = {
613 	EXT4_ERR_TRANSLATE(EIO),
614 	EXT4_ERR_TRANSLATE(ENOMEM),
615 	EXT4_ERR_TRANSLATE(EFSBADCRC),
616 	EXT4_ERR_TRANSLATE(EFSCORRUPTED),
617 	EXT4_ERR_TRANSLATE(ENOSPC),
618 	EXT4_ERR_TRANSLATE(ENOKEY),
619 	EXT4_ERR_TRANSLATE(EROFS),
620 	EXT4_ERR_TRANSLATE(EFBIG),
621 	EXT4_ERR_TRANSLATE(EEXIST),
622 	EXT4_ERR_TRANSLATE(ERANGE),
623 	EXT4_ERR_TRANSLATE(EOVERFLOW),
624 	EXT4_ERR_TRANSLATE(EBUSY),
625 	EXT4_ERR_TRANSLATE(ENOTDIR),
626 	EXT4_ERR_TRANSLATE(ENOTEMPTY),
627 	EXT4_ERR_TRANSLATE(ESHUTDOWN),
628 	EXT4_ERR_TRANSLATE(EFAULT),
629 };
630 
631 static int ext4_errno_to_code(int errno)
632 {
633 	int i;
634 
635 	for (i = 0; i < ARRAY_SIZE(err_translation); i++)
636 		if (err_translation[i].errno == errno)
637 			return err_translation[i].code;
638 	return EXT4_ERR_UNKNOWN;
639 }
640 
641 static void save_error_info(struct super_block *sb, int error,
642 			    __u32 ino, __u64 block,
643 			    const char *func, unsigned int line)
644 {
645 	struct ext4_sb_info *sbi = EXT4_SB(sb);
646 
647 	/* We default to EFSCORRUPTED error... */
648 	if (error == 0)
649 		error = EFSCORRUPTED;
650 
651 	spin_lock(&sbi->s_error_lock);
652 	sbi->s_add_error_count++;
653 	sbi->s_last_error_code = error;
654 	sbi->s_last_error_line = line;
655 	sbi->s_last_error_ino = ino;
656 	sbi->s_last_error_block = block;
657 	sbi->s_last_error_func = func;
658 	sbi->s_last_error_time = ktime_get_real_seconds();
659 	if (!sbi->s_first_error_time) {
660 		sbi->s_first_error_code = error;
661 		sbi->s_first_error_line = line;
662 		sbi->s_first_error_ino = ino;
663 		sbi->s_first_error_block = block;
664 		sbi->s_first_error_func = func;
665 		sbi->s_first_error_time = sbi->s_last_error_time;
666 	}
667 	spin_unlock(&sbi->s_error_lock);
668 }
669 
670 /* Deal with the reporting of failure conditions on a filesystem such as
671  * inconsistencies detected or read IO failures.
672  *
673  * On ext2, we can store the error state of the filesystem in the
674  * superblock.  That is not possible on ext4, because we may have other
675  * write ordering constraints on the superblock which prevent us from
676  * writing it out straight away; and given that the journal is about to
677  * be aborted, we can't rely on the current, or future, transactions to
678  * write out the superblock safely.
679  *
680  * We'll just use the jbd2_journal_abort() error code to record an error in
681  * the journal instead.  On recovery, the journal will complain about
682  * that error until we've noted it down and cleared it.
683  *
684  * If force_ro is set, we unconditionally force the filesystem into an
685  * ABORT|READONLY state, unless the error response on the fs has been set to
686  * panic in which case we take the easy way out and panic immediately. This is
687  * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
688  * at a critical moment in log management.
689  */
690 static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
691 			      __u32 ino, __u64 block,
692 			      const char *func, unsigned int line)
693 {
694 	journal_t *journal = EXT4_SB(sb)->s_journal;
695 	bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
696 
697 	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
698 	if (test_opt(sb, WARN_ON_ERROR))
699 		WARN_ON_ONCE(1);
700 
701 	if (!continue_fs && !ext4_emergency_ro(sb) && journal)
702 		jbd2_journal_abort(journal, -error);
703 
704 	if (!bdev_read_only(sb->s_bdev)) {
705 		save_error_info(sb, error, ino, block, func, line);
706 		/*
707 		 * In case the fs should keep running, we need to writeout
708 		 * superblock through the journal. Due to lock ordering
709 		 * constraints, it may not be safe to do it right here so we
710 		 * defer superblock flushing to a workqueue. We just need to be
711 		 * careful when the journal is already shutting down. If we get
712 		 * here in that case, just update the sb directly as the last
713 		 * transaction won't commit anyway.
714 		 */
715 		if (continue_fs && journal &&
716 		    !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
717 			schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
718 		else
719 			ext4_commit_super(sb);
720 	}
721 
722 	/*
723 	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
724 	 * could panic during 'reboot -f' as the underlying device got already
725 	 * disabled.
726 	 */
727 	if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
728 		panic("EXT4-fs (device %s): panic forced after error\n",
729 			sb->s_id);
730 	}
731 
732 	if (ext4_emergency_ro(sb) || continue_fs)
733 		return;
734 
735 	ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
736 	/*
737 	 * We don't set SB_RDONLY because that requires sb->s_umount
738 	 * semaphore and setting it without proper remount procedure is
739 	 * confusing code such as freeze_super() leading to deadlocks
740 	 * and other problems.
741 	 */
742 	set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
743 }
744 
745 static void update_super_work(struct work_struct *work)
746 {
747 	struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
748 						s_sb_upd_work);
749 	journal_t *journal = sbi->s_journal;
750 	handle_t *handle;
751 
752 	/*
753 	 * If the journal is still running, we have to write out superblock
754 	 * through the journal to avoid collisions of other journalled sb
755 	 * updates.
756 	 *
757 	 * We use directly jbd2 functions here to avoid recursing back into
758 	 * ext4 error handling code during handling of previous errors.
759 	 */
760 	if (!ext4_emergency_state(sbi->s_sb) &&
761 	    !sb_rdonly(sbi->s_sb) && journal) {
762 		struct buffer_head *sbh = sbi->s_sbh;
763 		bool call_notify_err = false;
764 
765 		handle = jbd2_journal_start(journal, 1);
766 		if (IS_ERR(handle))
767 			goto write_directly;
768 		if (jbd2_journal_get_write_access(handle, sbh)) {
769 			jbd2_journal_stop(handle);
770 			goto write_directly;
771 		}
772 
773 		if (sbi->s_add_error_count > 0)
774 			call_notify_err = true;
775 
776 		ext4_update_super(sbi->s_sb);
777 		if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
778 			ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
779 				 "superblock detected");
780 			clear_buffer_write_io_error(sbh);
781 			set_buffer_uptodate(sbh);
782 		}
783 
784 		if (jbd2_journal_dirty_metadata(handle, sbh)) {
785 			jbd2_journal_stop(handle);
786 			goto write_directly;
787 		}
788 		jbd2_journal_stop(handle);
789 
790 		if (call_notify_err)
791 			ext4_notify_error_sysfs(sbi);
792 
793 		return;
794 	}
795 write_directly:
796 	/*
797 	 * Write through journal failed. Write sb directly to get error info
798 	 * out and hope for the best.
799 	 */
800 	ext4_commit_super(sbi->s_sb);
801 	ext4_notify_error_sysfs(sbi);
802 }
803 
804 #define ext4_error_ratelimit(sb)					\
805 		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
806 			     "EXT4-fs error")
807 
808 void __ext4_error(struct super_block *sb, const char *function,
809 		  unsigned int line, bool force_ro, int error, __u64 block,
810 		  const char *fmt, ...)
811 {
812 	struct va_format vaf;
813 	va_list args;
814 
815 	if (unlikely(ext4_emergency_state(sb)))
816 		return;
817 
818 	trace_ext4_error(sb, function, line);
819 	if (ext4_error_ratelimit(sb)) {
820 		va_start(args, fmt);
821 		vaf.fmt = fmt;
822 		vaf.va = &args;
823 		printk(KERN_CRIT
824 		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
825 		       sb->s_id, function, line, current->comm, &vaf);
826 		va_end(args);
827 	}
828 	fserror_report_metadata(sb, error ? -abs(error) : -EFSCORRUPTED,
829 				GFP_ATOMIC);
830 
831 	ext4_handle_error(sb, force_ro, error, 0, block, function, line);
832 }
833 
834 void __ext4_error_inode(struct inode *inode, const char *function,
835 			unsigned int line, ext4_fsblk_t block, int error,
836 			const char *fmt, ...)
837 {
838 	va_list args;
839 	struct va_format vaf;
840 
841 	if (unlikely(ext4_emergency_state(inode->i_sb)))
842 		return;
843 
844 	trace_ext4_error(inode->i_sb, function, line);
845 	if (ext4_error_ratelimit(inode->i_sb)) {
846 		va_start(args, fmt);
847 		vaf.fmt = fmt;
848 		vaf.va = &args;
849 		if (block)
850 			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
851 			       "inode #%lu: block %llu: comm %s: %pV\n",
852 			       inode->i_sb->s_id, function, line, inode->i_ino,
853 			       block, current->comm, &vaf);
854 		else
855 			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
856 			       "inode #%lu: comm %s: %pV\n",
857 			       inode->i_sb->s_id, function, line, inode->i_ino,
858 			       current->comm, &vaf);
859 		va_end(args);
860 	}
861 	fserror_report_file_metadata(inode,
862 				     error ? -abs(error) : -EFSCORRUPTED,
863 				     GFP_ATOMIC);
864 
865 	ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
866 			  function, line);
867 }
868 
869 void __ext4_error_file(struct file *file, const char *function,
870 		       unsigned int line, ext4_fsblk_t block,
871 		       const char *fmt, ...)
872 {
873 	va_list args;
874 	struct va_format vaf;
875 	struct inode *inode = file_inode(file);
876 	char pathname[80], *path;
877 
878 	if (unlikely(ext4_emergency_state(inode->i_sb)))
879 		return;
880 
881 	trace_ext4_error(inode->i_sb, function, line);
882 	if (ext4_error_ratelimit(inode->i_sb)) {
883 		path = file_path(file, pathname, sizeof(pathname));
884 		if (IS_ERR(path))
885 			path = "(unknown)";
886 		va_start(args, fmt);
887 		vaf.fmt = fmt;
888 		vaf.va = &args;
889 		if (block)
890 			printk(KERN_CRIT
891 			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
892 			       "block %llu: comm %s: path %s: %pV\n",
893 			       inode->i_sb->s_id, function, line, inode->i_ino,
894 			       block, current->comm, path, &vaf);
895 		else
896 			printk(KERN_CRIT
897 			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
898 			       "comm %s: path %s: %pV\n",
899 			       inode->i_sb->s_id, function, line, inode->i_ino,
900 			       current->comm, path, &vaf);
901 		va_end(args);
902 	}
903 	fserror_report_file_metadata(inode, -EFSCORRUPTED, GFP_ATOMIC);
904 
905 	ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
906 			  function, line);
907 }
908 
909 const char *ext4_decode_error(struct super_block *sb, int errno,
910 			      char nbuf[16])
911 {
912 	char *errstr = NULL;
913 
914 	switch (errno) {
915 	case -EFSCORRUPTED:
916 		errstr = "Corrupt filesystem";
917 		break;
918 	case -EFSBADCRC:
919 		errstr = "Filesystem failed CRC";
920 		break;
921 	case -EIO:
922 		errstr = "IO failure";
923 		break;
924 	case -ENOMEM:
925 		errstr = "Out of memory";
926 		break;
927 	case -EROFS:
928 		if (!sb || (EXT4_SB(sb)->s_journal &&
929 			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
930 			errstr = "Journal has aborted";
931 		else
932 			errstr = "Readonly filesystem";
933 		break;
934 	default:
935 		/* If the caller passed in an extra buffer for unknown
936 		 * errors, textualise them now.  Else we just return
937 		 * NULL. */
938 		if (nbuf) {
939 			/* Check for truncated error codes... */
940 			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
941 				errstr = nbuf;
942 		}
943 		break;
944 	}
945 
946 	return errstr;
947 }
948 
949 /* __ext4_std_error decodes expected errors from journaling functions
950  * automatically and invokes the appropriate error response.  */
951 
952 void __ext4_std_error(struct super_block *sb, const char *function,
953 		      unsigned int line, int errno)
954 {
955 	char nbuf[16];
956 	const char *errstr;
957 
958 	if (unlikely(ext4_emergency_state(sb)))
959 		return;
960 
961 	/* Special case: if the error is EROFS, and we're not already
962 	 * inside a transaction, then there's really no point in logging
963 	 * an error. */
964 	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
965 		return;
966 
967 	if (ext4_error_ratelimit(sb)) {
968 		errstr = ext4_decode_error(sb, errno, nbuf);
969 		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
970 		       sb->s_id, function, line, errstr);
971 	}
972 	fserror_report_metadata(sb, errno ? -abs(errno) : -EFSCORRUPTED,
973 				GFP_ATOMIC);
974 
975 	ext4_handle_error(sb, false, -errno, 0, 0, function, line);
976 }
977 
978 void __ext4_msg(struct super_block *sb,
979 		const char *prefix, const char *fmt, ...)
980 {
981 	struct va_format vaf;
982 	va_list args;
983 
984 	if (sb) {
985 		atomic_inc(&EXT4_SB(sb)->s_msg_count);
986 		if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state),
987 				  "EXT4-fs"))
988 			return;
989 	}
990 
991 	va_start(args, fmt);
992 	vaf.fmt = fmt;
993 	vaf.va = &args;
994 	if (sb)
995 		printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
996 	else
997 		printk("%sEXT4-fs: %pV\n", prefix, &vaf);
998 	va_end(args);
999 }
1000 
1001 static int ext4_warning_ratelimit(struct super_block *sb)
1002 {
1003 	atomic_inc(&EXT4_SB(sb)->s_warning_count);
1004 	return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
1005 			    "EXT4-fs warning");
1006 }
1007 
1008 void __ext4_warning(struct super_block *sb, const char *function,
1009 		    unsigned int line, const char *fmt, ...)
1010 {
1011 	struct va_format vaf;
1012 	va_list args;
1013 
1014 	if (!ext4_warning_ratelimit(sb))
1015 		return;
1016 
1017 	va_start(args, fmt);
1018 	vaf.fmt = fmt;
1019 	vaf.va = &args;
1020 	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
1021 	       sb->s_id, function, line, &vaf);
1022 	va_end(args);
1023 }
1024 
1025 void __ext4_warning_inode(const struct inode *inode, const char *function,
1026 			  unsigned int line, const char *fmt, ...)
1027 {
1028 	struct va_format vaf;
1029 	va_list args;
1030 
1031 	if (!ext4_warning_ratelimit(inode->i_sb))
1032 		return;
1033 
1034 	va_start(args, fmt);
1035 	vaf.fmt = fmt;
1036 	vaf.va = &args;
1037 	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
1038 	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
1039 	       function, line, inode->i_ino, current->comm, &vaf);
1040 	va_end(args);
1041 }
1042 
1043 void __ext4_grp_locked_error(const char *function, unsigned int line,
1044 			     struct super_block *sb, ext4_group_t grp,
1045 			     unsigned long ino, ext4_fsblk_t block,
1046 			     const char *fmt, ...)
1047 __releases(bitlock)
1048 __acquires(bitlock)
1049 {
1050 	struct va_format vaf;
1051 	va_list args;
1052 
1053 	if (unlikely(ext4_emergency_state(sb)))
1054 		return;
1055 
1056 	trace_ext4_error(sb, function, line);
1057 	if (ext4_error_ratelimit(sb)) {
1058 		va_start(args, fmt);
1059 		vaf.fmt = fmt;
1060 		vaf.va = &args;
1061 		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
1062 		       sb->s_id, function, line, grp);
1063 		if (ino)
1064 			printk(KERN_CONT "inode %lu: ", ino);
1065 		if (block)
1066 			printk(KERN_CONT "block %llu:",
1067 			       (unsigned long long) block);
1068 		printk(KERN_CONT "%pV\n", &vaf);
1069 		va_end(args);
1070 	}
1071 
1072 	if (test_opt(sb, ERRORS_CONT)) {
1073 		if (test_opt(sb, WARN_ON_ERROR))
1074 			WARN_ON_ONCE(1);
1075 		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
1076 		if (!bdev_read_only(sb->s_bdev)) {
1077 			save_error_info(sb, EFSCORRUPTED, ino, block, function,
1078 					line);
1079 			schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
1080 		}
1081 		return;
1082 	}
1083 	ext4_unlock_group(sb, grp);
1084 	ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
1085 	/*
1086 	 * We only get here in the ERRORS_RO case; relocking the group
1087 	 * may be dangerous, but nothing bad will happen since the
1088 	 * filesystem will have already been marked read/only and the
1089 	 * journal has been aborted.  We return 1 as a hint to callers
1090 	 * who might what to use the return value from
1091 	 * ext4_grp_locked_error() to distinguish between the
1092 	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
1093 	 * aggressively from the ext4 function in question, with a
1094 	 * more appropriate error code.
1095 	 */
1096 	ext4_lock_group(sb, grp);
1097 	return;
1098 }
1099 
1100 void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
1101 				     ext4_group_t group,
1102 				     unsigned int flags)
1103 {
1104 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1105 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1106 	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1107 	int ret;
1108 
1109 	if (!grp || !gdp)
1110 		return;
1111 	if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
1112 		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1113 					    &grp->bb_state);
1114 		if (!ret)
1115 			percpu_counter_sub(&sbi->s_freeclusters_counter,
1116 					   grp->bb_free);
1117 	}
1118 
1119 	if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
1120 		ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
1121 					    &grp->bb_state);
1122 		if (!ret && gdp) {
1123 			int count;
1124 
1125 			count = ext4_free_inodes_count(sb, gdp);
1126 			percpu_counter_sub(&sbi->s_freeinodes_counter,
1127 					   count);
1128 		}
1129 	}
1130 }
1131 
1132 void ext4_update_dynamic_rev(struct super_block *sb)
1133 {
1134 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1135 
1136 	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1137 		return;
1138 
1139 	ext4_warning(sb,
1140 		     "updating to rev %d because of new feature flag, "
1141 		     "running e2fsck is recommended",
1142 		     EXT4_DYNAMIC_REV);
1143 
1144 	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
1145 	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
1146 	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1147 	/* leave es->s_feature_*compat flags alone */
1148 	/* es->s_uuid will be set by e2fsck if empty */
1149 
1150 	/*
1151 	 * The rest of the superblock fields should be zero, and if not it
1152 	 * means they are likely already in use, so leave them alone.  We
1153 	 * can leave it up to e2fsck to clean up any inconsistencies there.
1154 	 */
1155 }
1156 
1157 static inline struct inode *orphan_list_entry(struct list_head *l)
1158 {
1159 	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1160 }
1161 
1162 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1163 {
1164 	struct list_head *l;
1165 
1166 	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
1167 		 le32_to_cpu(sbi->s_es->s_last_orphan));
1168 
1169 	printk(KERN_ERR "sb_info orphan list:\n");
1170 	list_for_each(l, &sbi->s_orphan) {
1171 		struct inode *inode = orphan_list_entry(l);
1172 		printk(KERN_ERR "  "
1173 		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
1174 		       inode->i_sb->s_id, inode->i_ino, inode,
1175 		       inode->i_mode, inode->i_nlink,
1176 		       NEXT_ORPHAN(inode));
1177 	}
1178 }
1179 
1180 #ifdef CONFIG_QUOTA
1181 static int ext4_quota_off(struct super_block *sb, int type);
1182 
1183 static inline void ext4_quotas_off(struct super_block *sb, int type)
1184 {
1185 	BUG_ON(type > EXT4_MAXQUOTAS);
1186 
1187 	/* Use our quota_off function to clear inode flags etc. */
1188 	for (type--; type >= 0; type--)
1189 		ext4_quota_off(sb, type);
1190 }
1191 
1192 /*
1193  * This is a helper function which is used in the mount/remount
1194  * codepaths (which holds s_umount) to fetch the quota file name.
1195  */
1196 static inline char *get_qf_name(struct super_block *sb,
1197 				struct ext4_sb_info *sbi,
1198 				int type)
1199 {
1200 	return rcu_dereference_protected(sbi->s_qf_names[type],
1201 					 lockdep_is_held(&sb->s_umount));
1202 }
1203 #else
1204 static inline void ext4_quotas_off(struct super_block *sb, int type)
1205 {
1206 }
1207 #endif
1208 
1209 static int ext4_percpu_param_init(struct ext4_sb_info *sbi)
1210 {
1211 	ext4_fsblk_t block;
1212 	int err;
1213 
1214 	block = ext4_count_free_clusters(sbi->s_sb);
1215 	ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block));
1216 	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
1217 				  GFP_KERNEL);
1218 	if (!err) {
1219 		unsigned long freei = ext4_count_free_inodes(sbi->s_sb);
1220 		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
1221 		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
1222 					  GFP_KERNEL);
1223 	}
1224 	if (!err)
1225 		err = percpu_counter_init(&sbi->s_dirs_counter,
1226 					  ext4_count_dirs(sbi->s_sb), GFP_KERNEL);
1227 	if (!err)
1228 		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
1229 					  GFP_KERNEL);
1230 	if (!err)
1231 		err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
1232 					  GFP_KERNEL);
1233 	if (!err)
1234 		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
1235 
1236 	if (err)
1237 		ext4_msg(sbi->s_sb, KERN_ERR, "insufficient memory");
1238 
1239 	return err;
1240 }
1241 
1242 static void ext4_percpu_param_destroy(struct ext4_sb_info *sbi)
1243 {
1244 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
1245 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
1246 	percpu_counter_destroy(&sbi->s_dirs_counter);
1247 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1248 	percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
1249 	percpu_free_rwsem(&sbi->s_writepages_rwsem);
1250 }
1251 
1252 static void ext4_group_desc_free(struct ext4_sb_info *sbi)
1253 {
1254 	struct buffer_head **group_desc;
1255 	int i;
1256 
1257 	rcu_read_lock();
1258 	group_desc = rcu_dereference(sbi->s_group_desc);
1259 	for (i = 0; i < sbi->s_gdb_count; i++)
1260 		brelse(group_desc[i]);
1261 	kvfree(group_desc);
1262 	rcu_read_unlock();
1263 }
1264 
1265 static void ext4_flex_groups_free(struct ext4_sb_info *sbi)
1266 {
1267 	struct flex_groups **flex_groups;
1268 	int i;
1269 
1270 	rcu_read_lock();
1271 	flex_groups = rcu_dereference(sbi->s_flex_groups);
1272 	if (flex_groups) {
1273 		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
1274 			kvfree(flex_groups[i]);
1275 		kvfree(flex_groups);
1276 	}
1277 	rcu_read_unlock();
1278 }
1279 
1280 static void ext4_put_super(struct super_block *sb)
1281 {
1282 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1283 	struct ext4_super_block *es = sbi->s_es;
1284 	int aborted = 0;
1285 	int err;
1286 
1287 	/*
1288 	 * Unregister sysfs before destroying jbd2 journal.
1289 	 * Since we could still access attr_journal_task attribute via sysfs
1290 	 * path which could have sbi->s_journal->j_task as NULL
1291 	 * Unregister sysfs before flush sbi->s_sb_upd_work.
1292 	 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
1293 	 * read metadata verify failed then will queue error work.
1294 	 * update_super_work will call start_this_handle may trigger
1295 	 * BUG_ON.
1296 	 */
1297 	ext4_unregister_sysfs(sb);
1298 
1299 	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount"))
1300 		ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.",
1301 			 &sb->s_uuid);
1302 
1303 	ext4_unregister_li_request(sb);
1304 	ext4_quotas_off(sb, EXT4_MAXQUOTAS);
1305 
1306 	destroy_workqueue(sbi->rsv_conversion_wq);
1307 	ext4_release_orphan_info(sb);
1308 
1309 	if (sbi->s_journal) {
1310 		aborted = is_journal_aborted(sbi->s_journal);
1311 		err = ext4_journal_destroy(sbi, sbi->s_journal);
1312 		if ((err < 0) && !aborted) {
1313 			ext4_abort(sb, -err, "Couldn't clean up the journal");
1314 		}
1315 	} else
1316 		flush_work(&sbi->s_sb_upd_work);
1317 
1318 	ext4_es_unregister_shrinker(sbi);
1319 	timer_shutdown_sync(&sbi->s_err_report);
1320 	ext4_release_system_zone(sb);
1321 	ext4_mb_release(sb);
1322 	ext4_ext_release(sb);
1323 
1324 	if (!ext4_emergency_state(sb) && !sb_rdonly(sb)) {
1325 		if (!aborted) {
1326 			ext4_clear_feature_journal_needs_recovery(sb);
1327 			ext4_clear_feature_orphan_present(sb);
1328 			es->s_state = cpu_to_le16(sbi->s_mount_state);
1329 		}
1330 		ext4_commit_super(sb);
1331 	}
1332 
1333 	ext4_group_desc_free(sbi);
1334 	ext4_flex_groups_free(sbi);
1335 
1336 	WARN_ON_ONCE(!(sbi->s_mount_state & EXT4_ERROR_FS) &&
1337 		     percpu_counter_sum(&sbi->s_dirtyclusters_counter));
1338 	ext4_percpu_param_destroy(sbi);
1339 #ifdef CONFIG_QUOTA
1340 	for (int i = 0; i < EXT4_MAXQUOTAS; i++)
1341 		kfree(get_qf_name(sb, sbi, i));
1342 #endif
1343 
1344 	/* Debugging code just in case the in-memory inode orphan list
1345 	 * isn't empty.  The on-disk one can be non-empty if we've
1346 	 * detected an error and taken the fs readonly, but the
1347 	 * in-memory list had better be clean by this point. */
1348 	if (!list_empty(&sbi->s_orphan))
1349 		dump_orphan_list(sb, sbi);
1350 	ASSERT(list_empty(&sbi->s_orphan));
1351 
1352 	sync_blockdev(sb->s_bdev);
1353 	invalidate_bdev(sb->s_bdev);
1354 	if (sbi->s_journal_bdev_file) {
1355 		/*
1356 		 * Invalidate the journal device's buffers.  We don't want them
1357 		 * floating about in memory - the physical journal device may
1358 		 * hotswapped, and it breaks the `ro-after' testing code.
1359 		 */
1360 		sync_blockdev(file_bdev(sbi->s_journal_bdev_file));
1361 		invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
1362 	}
1363 
1364 	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
1365 	sbi->s_ea_inode_cache = NULL;
1366 
1367 	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
1368 	sbi->s_ea_block_cache = NULL;
1369 
1370 	ext4_stop_mmpd(sbi);
1371 
1372 	brelse(sbi->s_sbh);
1373 	sb->s_fs_info = NULL;
1374 	/*
1375 	 * Now that we are completely done shutting down the
1376 	 * superblock, we need to actually destroy the kobject.
1377 	 */
1378 	kobject_put(&sbi->s_kobj);
1379 	wait_for_completion(&sbi->s_kobj_unregister);
1380 	kfree(sbi->s_blockgroup_lock);
1381 	fs_put_dax(sbi->s_daxdev, NULL);
1382 	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1383 #if IS_ENABLED(CONFIG_UNICODE)
1384 	utf8_unload(sb->s_encoding);
1385 #endif
1386 	kfree(sbi);
1387 }
1388 
1389 static struct kmem_cache *ext4_inode_cachep;
1390 
1391 /*
1392  * Called inside transaction, so use GFP_NOFS
1393  */
1394 static struct inode *ext4_alloc_inode(struct super_block *sb)
1395 {
1396 	struct ext4_inode_info *ei;
1397 
1398 	ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS);
1399 	if (!ei)
1400 		return NULL;
1401 
1402 	inode_set_iversion(&ei->vfs_inode, 1);
1403 	ei->i_flags = 0;
1404 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
1405 	spin_lock_init(&ei->i_raw_lock);
1406 	ei->i_prealloc_node = RB_ROOT;
1407 	atomic_set(&ei->i_prealloc_active, 0);
1408 	rwlock_init(&ei->i_prealloc_lock);
1409 	ext4_es_init_tree(&ei->i_es_tree);
1410 	rwlock_init(&ei->i_es_lock);
1411 	INIT_LIST_HEAD(&ei->i_es_list);
1412 	ei->i_es_all_nr = 0;
1413 	ei->i_es_shk_nr = 0;
1414 	ei->i_es_shrink_lblk = 0;
1415 	ei->i_es_seq = 0;
1416 	ei->i_reserved_data_blocks = 0;
1417 	spin_lock_init(&(ei->i_block_reservation_lock));
1418 	ext4_init_pending_tree(&ei->i_pending_tree);
1419 #ifdef CONFIG_QUOTA
1420 	ei->i_reserved_quota = 0;
1421 	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1422 #endif
1423 	ei->jinode = NULL;
1424 	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1425 	spin_lock_init(&ei->i_completed_io_lock);
1426 	ei->i_sync_tid = 0;
1427 	ei->i_datasync_tid = 0;
1428 	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1429 	ext4_fc_init_inode(&ei->vfs_inode);
1430 	spin_lock_init(&ei->i_fc_lock);
1431 	return &ei->vfs_inode;
1432 }
1433 
1434 static int ext4_drop_inode(struct inode *inode)
1435 {
1436 	int drop = inode_generic_drop(inode);
1437 
1438 	if (!drop)
1439 		drop = fscrypt_drop_inode(inode);
1440 
1441 	trace_ext4_drop_inode(inode, drop);
1442 	return drop;
1443 }
1444 
1445 static void ext4_free_in_core_inode(struct inode *inode)
1446 {
1447 	fscrypt_free_inode(inode);
1448 	if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
1449 		pr_warn("%s: inode %ld still in fc list",
1450 			__func__, inode->i_ino);
1451 	}
1452 	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1453 }
1454 
1455 static void ext4_destroy_inode(struct inode *inode)
1456 {
1457 	if (ext4_inode_orphan_tracked(inode)) {
1458 		ext4_msg(inode->i_sb, KERN_ERR,
1459 			 "Inode %lu (%p): inode tracked as orphan!",
1460 			 inode->i_ino, EXT4_I(inode));
1461 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1462 				EXT4_I(inode), sizeof(struct ext4_inode_info),
1463 				true);
1464 		dump_stack();
1465 	}
1466 
1467 	if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ERROR_FS) &&
1468 	    WARN_ON_ONCE(EXT4_I(inode)->i_reserved_data_blocks))
1469 		ext4_msg(inode->i_sb, KERN_ERR,
1470 			 "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
1471 			 inode->i_ino, EXT4_I(inode),
1472 			 EXT4_I(inode)->i_reserved_data_blocks);
1473 }
1474 
1475 static void ext4_shutdown(struct super_block *sb)
1476 {
1477        ext4_force_shutdown(sb, EXT4_GOING_FLAGS_NOLOGFLUSH);
1478 }
1479 
1480 static void init_once(void *foo)
1481 {
1482 	struct ext4_inode_info *ei = foo;
1483 
1484 	INIT_LIST_HEAD(&ei->i_orphan);
1485 	init_rwsem(&ei->xattr_sem);
1486 	init_rwsem(&ei->i_data_sem);
1487 	inode_init_once(&ei->vfs_inode);
1488 	ext4_fc_init_inode(&ei->vfs_inode);
1489 #ifdef CONFIG_FS_ENCRYPTION
1490 	ei->i_crypt_info = NULL;
1491 #endif
1492 #ifdef CONFIG_FS_VERITY
1493 	ei->i_verity_info = NULL;
1494 #endif
1495 }
1496 
1497 static int __init init_inodecache(void)
1498 {
1499 	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
1500 				sizeof(struct ext4_inode_info), 0,
1501 				SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
1502 				offsetof(struct ext4_inode_info, i_data),
1503 				sizeof_field(struct ext4_inode_info, i_data),
1504 				init_once);
1505 	if (ext4_inode_cachep == NULL)
1506 		return -ENOMEM;
1507 	return 0;
1508 }
1509 
1510 static void destroy_inodecache(void)
1511 {
1512 	/*
1513 	 * Make sure all delayed rcu free inodes are flushed before we
1514 	 * destroy cache.
1515 	 */
1516 	rcu_barrier();
1517 	kmem_cache_destroy(ext4_inode_cachep);
1518 }
1519 
1520 void ext4_clear_inode(struct inode *inode)
1521 {
1522 	ext4_fc_del(inode);
1523 	invalidate_inode_buffers(inode);
1524 	clear_inode(inode);
1525 	ext4_discard_preallocations(inode);
1526 	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1527 	dquot_drop(inode);
1528 	if (EXT4_I(inode)->jinode) {
1529 		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1530 					       EXT4_I(inode)->jinode);
1531 		jbd2_free_inode(EXT4_I(inode)->jinode);
1532 		EXT4_I(inode)->jinode = NULL;
1533 	}
1534 	fscrypt_put_encryption_info(inode);
1535 	fsverity_cleanup_inode(inode);
1536 }
1537 
1538 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1539 					u64 ino, u32 generation)
1540 {
1541 	struct inode *inode;
1542 
1543 	/*
1544 	 * Currently we don't know the generation for parent directory, so
1545 	 * a generation of 0 means "accept any"
1546 	 */
1547 	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1548 	if (IS_ERR(inode))
1549 		return ERR_CAST(inode);
1550 	if (generation && inode->i_generation != generation) {
1551 		iput(inode);
1552 		return ERR_PTR(-ESTALE);
1553 	}
1554 
1555 	return inode;
1556 }
1557 
1558 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1559 					int fh_len, int fh_type)
1560 {
1561 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1562 				    ext4_nfs_get_inode);
1563 }
1564 
1565 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1566 					int fh_len, int fh_type)
1567 {
1568 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1569 				    ext4_nfs_get_inode);
1570 }
1571 
1572 static int ext4_nfs_commit_metadata(struct inode *inode)
1573 {
1574 	struct writeback_control wbc = {
1575 		.sync_mode = WB_SYNC_ALL
1576 	};
1577 
1578 	trace_ext4_nfs_commit_metadata(inode);
1579 	return ext4_write_inode(inode, &wbc);
1580 }
1581 
1582 #ifdef CONFIG_QUOTA
1583 static const char * const quotatypes[] = INITQFNAMES;
1584 #define QTYPE2NAME(t) (quotatypes[t])
1585 
1586 static int ext4_write_dquot(struct dquot *dquot);
1587 static int ext4_acquire_dquot(struct dquot *dquot);
1588 static int ext4_release_dquot(struct dquot *dquot);
1589 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1590 static int ext4_write_info(struct super_block *sb, int type);
1591 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1592 			 const struct path *path);
1593 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1594 			       size_t len, loff_t off);
1595 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1596 				const char *data, size_t len, loff_t off);
1597 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1598 			     unsigned int flags);
1599 
1600 static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
1601 {
1602 	return EXT4_I(inode)->i_dquot;
1603 }
1604 
1605 static const struct dquot_operations ext4_quota_operations = {
1606 	.get_reserved_space	= ext4_get_reserved_space,
1607 	.write_dquot		= ext4_write_dquot,
1608 	.acquire_dquot		= ext4_acquire_dquot,
1609 	.release_dquot		= ext4_release_dquot,
1610 	.mark_dirty		= ext4_mark_dquot_dirty,
1611 	.write_info		= ext4_write_info,
1612 	.alloc_dquot		= dquot_alloc,
1613 	.destroy_dquot		= dquot_destroy,
1614 	.get_projid		= ext4_get_projid,
1615 	.get_inode_usage	= ext4_get_inode_usage,
1616 	.get_next_id		= dquot_get_next_id,
1617 };
1618 
1619 static const struct quotactl_ops ext4_qctl_operations = {
1620 	.quota_on	= ext4_quota_on,
1621 	.quota_off	= ext4_quota_off,
1622 	.quota_sync	= dquot_quota_sync,
1623 	.get_state	= dquot_get_state,
1624 	.set_info	= dquot_set_dqinfo,
1625 	.get_dqblk	= dquot_get_dqblk,
1626 	.set_dqblk	= dquot_set_dqblk,
1627 	.get_nextdqblk	= dquot_get_next_dqblk,
1628 };
1629 #endif
1630 
1631 static const struct super_operations ext4_sops = {
1632 	.alloc_inode	= ext4_alloc_inode,
1633 	.free_inode	= ext4_free_in_core_inode,
1634 	.destroy_inode	= ext4_destroy_inode,
1635 	.write_inode	= ext4_write_inode,
1636 	.dirty_inode	= ext4_dirty_inode,
1637 	.drop_inode	= ext4_drop_inode,
1638 	.evict_inode	= ext4_evict_inode,
1639 	.put_super	= ext4_put_super,
1640 	.sync_fs	= ext4_sync_fs,
1641 	.freeze_fs	= ext4_freeze,
1642 	.unfreeze_fs	= ext4_unfreeze,
1643 	.statfs		= ext4_statfs,
1644 	.show_options	= ext4_show_options,
1645 	.shutdown	= ext4_shutdown,
1646 #ifdef CONFIG_QUOTA
1647 	.quota_read	= ext4_quota_read,
1648 	.quota_write	= ext4_quota_write,
1649 	.get_dquots	= ext4_get_dquots,
1650 #endif
1651 };
1652 
1653 static const struct export_operations ext4_export_ops = {
1654 	.encode_fh = generic_encode_ino32_fh,
1655 	.fh_to_dentry = ext4_fh_to_dentry,
1656 	.fh_to_parent = ext4_fh_to_parent,
1657 	.get_parent = ext4_get_parent,
1658 	.commit_metadata = ext4_nfs_commit_metadata,
1659 };
1660 
1661 enum {
1662 	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1663 	Opt_resgid, Opt_resuid, Opt_sb,
1664 	Opt_nouid32, Opt_debug, Opt_removed,
1665 	Opt_user_xattr, Opt_acl,
1666 	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1667 	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1668 	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1669 	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1670 	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1671 	Opt_inlinecrypt,
1672 	Opt_usrjquota, Opt_grpjquota, Opt_quota,
1673 	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1674 	Opt_usrquota, Opt_grpquota, Opt_prjquota,
1675 	Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1676 	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
1677 	Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize,
1678 	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1679 	Opt_inode_readahead_blks, Opt_journal_ioprio,
1680 	Opt_dioread_nolock, Opt_dioread_lock,
1681 	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1682 	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1683 	Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan,
1684 	Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type,
1685 #ifdef CONFIG_EXT4_DEBUG
1686 	Opt_fc_debug_max_replay, Opt_fc_debug_force
1687 #endif
1688 };
1689 
1690 static const struct constant_table ext4_param_errors[] = {
1691 	{"continue",	EXT4_MOUNT_ERRORS_CONT},
1692 	{"panic",	EXT4_MOUNT_ERRORS_PANIC},
1693 	{"remount-ro",	EXT4_MOUNT_ERRORS_RO},
1694 	{}
1695 };
1696 
1697 static const struct constant_table ext4_param_data[] = {
1698 	{"journal",	EXT4_MOUNT_JOURNAL_DATA},
1699 	{"ordered",	EXT4_MOUNT_ORDERED_DATA},
1700 	{"writeback",	EXT4_MOUNT_WRITEBACK_DATA},
1701 	{}
1702 };
1703 
1704 static const struct constant_table ext4_param_data_err[] = {
1705 	{"abort",	Opt_data_err_abort},
1706 	{"ignore",	Opt_data_err_ignore},
1707 	{}
1708 };
1709 
1710 static const struct constant_table ext4_param_jqfmt[] = {
1711 	{"vfsold",	QFMT_VFS_OLD},
1712 	{"vfsv0",	QFMT_VFS_V0},
1713 	{"vfsv1",	QFMT_VFS_V1},
1714 	{}
1715 };
1716 
1717 static const struct constant_table ext4_param_dax[] = {
1718 	{"always",	Opt_dax_always},
1719 	{"inode",	Opt_dax_inode},
1720 	{"never",	Opt_dax_never},
1721 	{}
1722 };
1723 
1724 /*
1725  * Mount option specification
1726  * We don't use fsparam_flag_no because of the way we set the
1727  * options and the way we show them in _ext4_show_options(). To
1728  * keep the changes to a minimum, let's keep the negative options
1729  * separate for now.
1730  */
1731 static const struct fs_parameter_spec ext4_param_specs[] = {
1732 	fsparam_flag	("bsddf",		Opt_bsd_df),
1733 	fsparam_flag	("minixdf",		Opt_minix_df),
1734 	fsparam_flag	("grpid",		Opt_grpid),
1735 	fsparam_flag	("bsdgroups",		Opt_grpid),
1736 	fsparam_flag	("nogrpid",		Opt_nogrpid),
1737 	fsparam_flag	("sysvgroups",		Opt_nogrpid),
1738 	fsparam_gid	("resgid",		Opt_resgid),
1739 	fsparam_uid	("resuid",		Opt_resuid),
1740 	fsparam_u32	("sb",			Opt_sb),
1741 	fsparam_enum	("errors",		Opt_errors, ext4_param_errors),
1742 	fsparam_flag	("nouid32",		Opt_nouid32),
1743 	fsparam_flag	("debug",		Opt_debug),
1744 	fsparam_flag	("oldalloc",		Opt_removed),
1745 	fsparam_flag	("orlov",		Opt_removed),
1746 	fsparam_flag	("user_xattr",		Opt_user_xattr),
1747 	fsparam_flag	("acl",			Opt_acl),
1748 	fsparam_flag	("norecovery",		Opt_noload),
1749 	fsparam_flag	("noload",		Opt_noload),
1750 	fsparam_flag	("bh",			Opt_removed),
1751 	fsparam_flag	("nobh",		Opt_removed),
1752 	fsparam_u32	("commit",		Opt_commit),
1753 	fsparam_u32	("min_batch_time",	Opt_min_batch_time),
1754 	fsparam_u32	("max_batch_time",	Opt_max_batch_time),
1755 	fsparam_u32	("journal_dev",		Opt_journal_dev),
1756 	fsparam_bdev	("journal_path",	Opt_journal_path),
1757 	fsparam_flag	("journal_checksum",	Opt_journal_checksum),
1758 	fsparam_flag	("nojournal_checksum",	Opt_nojournal_checksum),
1759 	fsparam_flag	("journal_async_commit",Opt_journal_async_commit),
1760 	fsparam_flag	("abort",		Opt_abort),
1761 	fsparam_enum	("data",		Opt_data, ext4_param_data),
1762 	fsparam_enum	("data_err",		Opt_data_err,
1763 						ext4_param_data_err),
1764 	fsparam_string_empty
1765 			("usrjquota",		Opt_usrjquota),
1766 	fsparam_string_empty
1767 			("grpjquota",		Opt_grpjquota),
1768 	fsparam_enum	("jqfmt",		Opt_jqfmt, ext4_param_jqfmt),
1769 	fsparam_flag	("grpquota",		Opt_grpquota),
1770 	fsparam_flag	("quota",		Opt_quota),
1771 	fsparam_flag	("noquota",		Opt_noquota),
1772 	fsparam_flag	("usrquota",		Opt_usrquota),
1773 	fsparam_flag	("prjquota",		Opt_prjquota),
1774 	fsparam_flag	("barrier",		Opt_barrier),
1775 	fsparam_u32	("barrier",		Opt_barrier),
1776 	fsparam_flag	("nobarrier",		Opt_nobarrier),
1777 	fsparam_flag	("i_version",		Opt_removed),
1778 	fsparam_flag	("dax",			Opt_dax),
1779 	fsparam_enum	("dax",			Opt_dax_type, ext4_param_dax),
1780 	fsparam_u32	("stripe",		Opt_stripe),
1781 	fsparam_flag	("delalloc",		Opt_delalloc),
1782 	fsparam_flag	("nodelalloc",		Opt_nodelalloc),
1783 	fsparam_flag	("warn_on_error",	Opt_warn_on_error),
1784 	fsparam_flag	("nowarn_on_error",	Opt_nowarn_on_error),
1785 	fsparam_u32	("debug_want_extra_isize",
1786 						Opt_debug_want_extra_isize),
1787 	fsparam_flag	("mblk_io_submit",	Opt_removed),
1788 	fsparam_flag	("nomblk_io_submit",	Opt_removed),
1789 	fsparam_flag	("block_validity",	Opt_block_validity),
1790 	fsparam_flag	("noblock_validity",	Opt_noblock_validity),
1791 	fsparam_u32	("inode_readahead_blks",
1792 						Opt_inode_readahead_blks),
1793 	fsparam_u32	("journal_ioprio",	Opt_journal_ioprio),
1794 	fsparam_u32	("auto_da_alloc",	Opt_auto_da_alloc),
1795 	fsparam_flag	("auto_da_alloc",	Opt_auto_da_alloc),
1796 	fsparam_flag	("noauto_da_alloc",	Opt_noauto_da_alloc),
1797 	fsparam_flag	("dioread_nolock",	Opt_dioread_nolock),
1798 	fsparam_flag	("nodioread_nolock",	Opt_dioread_lock),
1799 	fsparam_flag	("dioread_lock",	Opt_dioread_lock),
1800 	fsparam_flag	("discard",		Opt_discard),
1801 	fsparam_flag	("nodiscard",		Opt_nodiscard),
1802 	fsparam_u32	("init_itable",		Opt_init_itable),
1803 	fsparam_flag	("init_itable",		Opt_init_itable),
1804 	fsparam_flag	("noinit_itable",	Opt_noinit_itable),
1805 #ifdef CONFIG_EXT4_DEBUG
1806 	fsparam_flag	("fc_debug_force",	Opt_fc_debug_force),
1807 	fsparam_u32	("fc_debug_max_replay",	Opt_fc_debug_max_replay),
1808 #endif
1809 	fsparam_u32	("max_dir_size_kb",	Opt_max_dir_size_kb),
1810 	fsparam_flag	("test_dummy_encryption",
1811 						Opt_test_dummy_encryption),
1812 	fsparam_string	("test_dummy_encryption",
1813 						Opt_test_dummy_encryption),
1814 	fsparam_flag	("inlinecrypt",		Opt_inlinecrypt),
1815 	fsparam_flag	("nombcache",		Opt_nombcache),
1816 	fsparam_flag	("no_mbcache",		Opt_nombcache),	/* for backward compatibility */
1817 	fsparam_flag	("prefetch_block_bitmaps",
1818 						Opt_removed),
1819 	fsparam_flag	("no_prefetch_block_bitmaps",
1820 						Opt_no_prefetch_block_bitmaps),
1821 	fsparam_s32	("mb_optimize_scan",	Opt_mb_optimize_scan),
1822 	fsparam_string	("check",		Opt_removed),	/* mount option from ext2/3 */
1823 	fsparam_flag	("nocheck",		Opt_removed),	/* mount option from ext2/3 */
1824 	fsparam_flag	("reservation",		Opt_removed),	/* mount option from ext2/3 */
1825 	fsparam_flag	("noreservation",	Opt_removed),	/* mount option from ext2/3 */
1826 	fsparam_u32	("journal",		Opt_removed),	/* mount option from ext2/3 */
1827 	{}
1828 };
1829 
1830 
1831 #define MOPT_SET	0x0001
1832 #define MOPT_CLEAR	0x0002
1833 #define MOPT_NOSUPPORT	0x0004
1834 #define MOPT_EXPLICIT	0x0008
1835 #ifdef CONFIG_QUOTA
1836 #define MOPT_Q		0
1837 #define MOPT_QFMT	0x0010
1838 #else
1839 #define MOPT_Q		MOPT_NOSUPPORT
1840 #define MOPT_QFMT	MOPT_NOSUPPORT
1841 #endif
1842 #define MOPT_NO_EXT2	0x0020
1843 #define MOPT_NO_EXT3	0x0040
1844 #define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1845 #define MOPT_SKIP	0x0080
1846 #define	MOPT_2		0x0100
1847 
1848 static const struct mount_opts {
1849 	int	token;
1850 	int	mount_opt;
1851 	int	flags;
1852 } ext4_mount_opts[] = {
1853 	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1854 	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1855 	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1856 	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1857 	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1858 	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1859 	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1860 	 MOPT_EXT4_ONLY | MOPT_SET},
1861 	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1862 	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1863 	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1864 	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1865 	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
1866 	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1867 	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1868 	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1869 	{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
1870 	{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1871 	{Opt_commit, 0, MOPT_NO_EXT2},
1872 	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1873 	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1874 	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1875 	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1876 	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1877 				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1878 	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1879 	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1880 	{Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2},
1881 	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1882 	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1883 	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1884 	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1885 	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1886 	{Opt_dax_type, 0, MOPT_EXT4_ONLY},
1887 	{Opt_journal_dev, 0, MOPT_NO_EXT2},
1888 	{Opt_journal_path, 0, MOPT_NO_EXT2},
1889 	{Opt_journal_ioprio, 0, MOPT_NO_EXT2},
1890 	{Opt_data, 0, MOPT_NO_EXT2},
1891 	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
1892 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1893 	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
1894 #else
1895 	{Opt_acl, 0, MOPT_NOSUPPORT},
1896 #endif
1897 	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
1898 	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1899 	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
1900 	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
1901 							MOPT_SET | MOPT_Q},
1902 	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1903 							MOPT_SET | MOPT_Q},
1904 	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1905 							MOPT_SET | MOPT_Q},
1906 	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1907 		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1908 							MOPT_CLEAR | MOPT_Q},
1909 	{Opt_usrjquota, 0, MOPT_Q},
1910 	{Opt_grpjquota, 0, MOPT_Q},
1911 	{Opt_jqfmt, 0, MOPT_QFMT},
1912 	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1913 	{Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS,
1914 	 MOPT_SET},
1915 #ifdef CONFIG_EXT4_DEBUG
1916 	{Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
1917 	 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
1918 #endif
1919 	{Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2},
1920 	{Opt_err, 0, 0}
1921 };
1922 
1923 #if IS_ENABLED(CONFIG_UNICODE)
1924 static const struct ext4_sb_encodings {
1925 	__u16 magic;
1926 	char *name;
1927 	unsigned int version;
1928 } ext4_sb_encoding_map[] = {
1929 	{EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
1930 };
1931 
1932 static const struct ext4_sb_encodings *
1933 ext4_sb_read_encoding(const struct ext4_super_block *es)
1934 {
1935 	__u16 magic = le16_to_cpu(es->s_encoding);
1936 	int i;
1937 
1938 	for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
1939 		if (magic == ext4_sb_encoding_map[i].magic)
1940 			return &ext4_sb_encoding_map[i];
1941 
1942 	return NULL;
1943 }
1944 #endif
1945 
1946 #define EXT4_SPEC_JQUOTA			(1 <<  0)
1947 #define EXT4_SPEC_JQFMT				(1 <<  1)
1948 #define EXT4_SPEC_DATAJ				(1 <<  2)
1949 #define EXT4_SPEC_SB_BLOCK			(1 <<  3)
1950 #define EXT4_SPEC_JOURNAL_DEV			(1 <<  4)
1951 #define EXT4_SPEC_JOURNAL_IOPRIO		(1 <<  5)
1952 #define EXT4_SPEC_s_want_extra_isize		(1 <<  7)
1953 #define EXT4_SPEC_s_max_batch_time		(1 <<  8)
1954 #define EXT4_SPEC_s_min_batch_time		(1 <<  9)
1955 #define EXT4_SPEC_s_inode_readahead_blks	(1 << 10)
1956 #define EXT4_SPEC_s_li_wait_mult		(1 << 11)
1957 #define EXT4_SPEC_s_max_dir_size_kb		(1 << 12)
1958 #define EXT4_SPEC_s_stripe			(1 << 13)
1959 #define EXT4_SPEC_s_resuid			(1 << 14)
1960 #define EXT4_SPEC_s_resgid			(1 << 15)
1961 #define EXT4_SPEC_s_commit_interval		(1 << 16)
1962 #define EXT4_SPEC_s_fc_debug_max_replay		(1 << 17)
1963 #define EXT4_SPEC_s_sb_block			(1 << 18)
1964 #define EXT4_SPEC_mb_optimize_scan		(1 << 19)
1965 
1966 struct ext4_fs_context {
1967 	char		*s_qf_names[EXT4_MAXQUOTAS];
1968 	struct fscrypt_dummy_policy dummy_enc_policy;
1969 	int		s_jquota_fmt;	/* Format of quota to use */
1970 #ifdef CONFIG_EXT4_DEBUG
1971 	int s_fc_debug_max_replay;
1972 #endif
1973 	unsigned short	qname_spec;
1974 	unsigned long	vals_s_flags;	/* Bits to set in s_flags */
1975 	unsigned long	mask_s_flags;	/* Bits changed in s_flags */
1976 	unsigned long	journal_devnum;
1977 	unsigned long	s_commit_interval;
1978 	unsigned long	s_stripe;
1979 	unsigned int	s_inode_readahead_blks;
1980 	unsigned int	s_want_extra_isize;
1981 	unsigned int	s_li_wait_mult;
1982 	unsigned int	s_max_dir_size_kb;
1983 	unsigned int	journal_ioprio;
1984 	unsigned int	vals_s_mount_opt;
1985 	unsigned int	mask_s_mount_opt;
1986 	unsigned int	vals_s_mount_opt2;
1987 	unsigned int	mask_s_mount_opt2;
1988 	unsigned int	opt_flags;	/* MOPT flags */
1989 	unsigned int	spec;
1990 	u32		s_max_batch_time;
1991 	u32		s_min_batch_time;
1992 	kuid_t		s_resuid;
1993 	kgid_t		s_resgid;
1994 	ext4_fsblk_t	s_sb_block;
1995 };
1996 
1997 static void ext4_fc_free(struct fs_context *fc)
1998 {
1999 	struct ext4_fs_context *ctx = fc->fs_private;
2000 	int i;
2001 
2002 	if (!ctx)
2003 		return;
2004 
2005 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
2006 		kfree(ctx->s_qf_names[i]);
2007 
2008 	fscrypt_free_dummy_policy(&ctx->dummy_enc_policy);
2009 	kfree(ctx);
2010 }
2011 
2012 int ext4_init_fs_context(struct fs_context *fc)
2013 {
2014 	struct ext4_fs_context *ctx;
2015 
2016 	ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
2017 	if (!ctx)
2018 		return -ENOMEM;
2019 
2020 	fc->fs_private = ctx;
2021 	fc->ops = &ext4_context_ops;
2022 
2023 	/* i_version is always enabled now */
2024 	fc->sb_flags |= SB_I_VERSION;
2025 
2026 	return 0;
2027 }
2028 
2029 #ifdef CONFIG_QUOTA
2030 /*
2031  * Note the name of the specified quota file.
2032  */
2033 static int note_qf_name(struct fs_context *fc, int qtype,
2034 		       struct fs_parameter *param)
2035 {
2036 	struct ext4_fs_context *ctx = fc->fs_private;
2037 	char *qname;
2038 
2039 	if (param->size < 1) {
2040 		ext4_msg(NULL, KERN_ERR, "Missing quota name");
2041 		return -EINVAL;
2042 	}
2043 	if (strchr(param->string, '/')) {
2044 		ext4_msg(NULL, KERN_ERR,
2045 			 "quotafile must be on filesystem root");
2046 		return -EINVAL;
2047 	}
2048 	if (ctx->s_qf_names[qtype]) {
2049 		if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) {
2050 			ext4_msg(NULL, KERN_ERR,
2051 				 "%s quota file already specified",
2052 				 QTYPE2NAME(qtype));
2053 			return -EINVAL;
2054 		}
2055 		return 0;
2056 	}
2057 
2058 	qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
2059 	if (!qname) {
2060 		ext4_msg(NULL, KERN_ERR,
2061 			 "Not enough memory for storing quotafile name");
2062 		return -ENOMEM;
2063 	}
2064 	ctx->s_qf_names[qtype] = qname;
2065 	ctx->qname_spec |= 1 << qtype;
2066 	ctx->spec |= EXT4_SPEC_JQUOTA;
2067 	return 0;
2068 }
2069 
2070 /*
2071  * Clear the name of the specified quota file.
2072  */
2073 static int unnote_qf_name(struct fs_context *fc, int qtype)
2074 {
2075 	struct ext4_fs_context *ctx = fc->fs_private;
2076 
2077 	kfree(ctx->s_qf_names[qtype]);
2078 
2079 	ctx->s_qf_names[qtype] = NULL;
2080 	ctx->qname_spec |= 1 << qtype;
2081 	ctx->spec |= EXT4_SPEC_JQUOTA;
2082 	return 0;
2083 }
2084 #endif
2085 
2086 static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param,
2087 					    struct ext4_fs_context *ctx)
2088 {
2089 	int err;
2090 
2091 	if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
2092 		ext4_msg(NULL, KERN_WARNING,
2093 			 "test_dummy_encryption option not supported");
2094 		return -EINVAL;
2095 	}
2096 	err = fscrypt_parse_test_dummy_encryption(param,
2097 						  &ctx->dummy_enc_policy);
2098 	if (err == -EINVAL) {
2099 		ext4_msg(NULL, KERN_WARNING,
2100 			 "Value of option \"%s\" is unrecognized", param->key);
2101 	} else if (err == -EEXIST) {
2102 		ext4_msg(NULL, KERN_WARNING,
2103 			 "Conflicting test_dummy_encryption options");
2104 		return -EINVAL;
2105 	}
2106 	return err;
2107 }
2108 
2109 #define EXT4_SET_CTX(name)						\
2110 static inline __maybe_unused						\
2111 void ctx_set_##name(struct ext4_fs_context *ctx, unsigned long flag)	\
2112 {									\
2113 	ctx->mask_s_##name |= flag;					\
2114 	ctx->vals_s_##name |= flag;					\
2115 }
2116 
2117 #define EXT4_CLEAR_CTX(name)						\
2118 static inline __maybe_unused						\
2119 void ctx_clear_##name(struct ext4_fs_context *ctx, unsigned long flag)	\
2120 {									\
2121 	ctx->mask_s_##name |= flag;					\
2122 	ctx->vals_s_##name &= ~flag;					\
2123 }
2124 
2125 #define EXT4_TEST_CTX(name)						\
2126 static inline unsigned long						\
2127 ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag)	\
2128 {									\
2129 	return (ctx->vals_s_##name & flag);				\
2130 }
2131 
2132 EXT4_SET_CTX(flags); /* set only */
2133 EXT4_SET_CTX(mount_opt);
2134 EXT4_CLEAR_CTX(mount_opt);
2135 EXT4_TEST_CTX(mount_opt);
2136 EXT4_SET_CTX(mount_opt2);
2137 EXT4_CLEAR_CTX(mount_opt2);
2138 EXT4_TEST_CTX(mount_opt2);
2139 
2140 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
2141 {
2142 	struct ext4_fs_context *ctx = fc->fs_private;
2143 	struct fs_parse_result result;
2144 	const struct mount_opts *m;
2145 	int is_remount;
2146 	int token;
2147 
2148 	token = fs_parse(fc, ext4_param_specs, param, &result);
2149 	if (token < 0)
2150 		return token;
2151 	is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
2152 
2153 	for (m = ext4_mount_opts; m->token != Opt_err; m++)
2154 		if (token == m->token)
2155 			break;
2156 
2157 	ctx->opt_flags |= m->flags;
2158 
2159 	if (m->flags & MOPT_EXPLICIT) {
2160 		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
2161 			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC);
2162 		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
2163 			ctx_set_mount_opt2(ctx,
2164 				       EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM);
2165 		} else
2166 			return -EINVAL;
2167 	}
2168 
2169 	if (m->flags & MOPT_NOSUPPORT) {
2170 		ext4_msg(NULL, KERN_ERR, "%s option not supported",
2171 			 param->key);
2172 		return 0;
2173 	}
2174 
2175 	switch (token) {
2176 #ifdef CONFIG_QUOTA
2177 	case Opt_usrjquota:
2178 		if (!*param->string)
2179 			return unnote_qf_name(fc, USRQUOTA);
2180 		else
2181 			return note_qf_name(fc, USRQUOTA, param);
2182 	case Opt_grpjquota:
2183 		if (!*param->string)
2184 			return unnote_qf_name(fc, GRPQUOTA);
2185 		else
2186 			return note_qf_name(fc, GRPQUOTA, param);
2187 #endif
2188 	case Opt_sb:
2189 		if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2190 			ext4_msg(NULL, KERN_WARNING,
2191 				 "Ignoring %s option on remount", param->key);
2192 		} else {
2193 			ctx->s_sb_block = result.uint_32;
2194 			ctx->spec |= EXT4_SPEC_s_sb_block;
2195 		}
2196 		return 0;
2197 	case Opt_removed:
2198 		ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option",
2199 			 param->key);
2200 		return 0;
2201 	case Opt_inlinecrypt:
2202 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
2203 		ctx_set_flags(ctx, SB_INLINECRYPT);
2204 #else
2205 		ext4_msg(NULL, KERN_ERR, "inline encryption not supported");
2206 #endif
2207 		return 0;
2208 	case Opt_errors:
2209 		ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK);
2210 		ctx_set_mount_opt(ctx, result.uint_32);
2211 		return 0;
2212 #ifdef CONFIG_QUOTA
2213 	case Opt_jqfmt:
2214 		ctx->s_jquota_fmt = result.uint_32;
2215 		ctx->spec |= EXT4_SPEC_JQFMT;
2216 		return 0;
2217 #endif
2218 	case Opt_data:
2219 		ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
2220 		ctx_set_mount_opt(ctx, result.uint_32);
2221 		ctx->spec |= EXT4_SPEC_DATAJ;
2222 		return 0;
2223 	case Opt_commit:
2224 		if (result.uint_32 == 0)
2225 			result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE;
2226 		else if (result.uint_32 > INT_MAX / HZ) {
2227 			ext4_msg(NULL, KERN_ERR,
2228 				 "Invalid commit interval %d, "
2229 				 "must be smaller than %d",
2230 				 result.uint_32, INT_MAX / HZ);
2231 			return -EINVAL;
2232 		}
2233 		ctx->s_commit_interval = HZ * result.uint_32;
2234 		ctx->spec |= EXT4_SPEC_s_commit_interval;
2235 		return 0;
2236 	case Opt_debug_want_extra_isize:
2237 		if ((result.uint_32 & 1) || (result.uint_32 < 4)) {
2238 			ext4_msg(NULL, KERN_ERR,
2239 				 "Invalid want_extra_isize %d", result.uint_32);
2240 			return -EINVAL;
2241 		}
2242 		ctx->s_want_extra_isize = result.uint_32;
2243 		ctx->spec |= EXT4_SPEC_s_want_extra_isize;
2244 		return 0;
2245 	case Opt_max_batch_time:
2246 		ctx->s_max_batch_time = result.uint_32;
2247 		ctx->spec |= EXT4_SPEC_s_max_batch_time;
2248 		return 0;
2249 	case Opt_min_batch_time:
2250 		ctx->s_min_batch_time = result.uint_32;
2251 		ctx->spec |= EXT4_SPEC_s_min_batch_time;
2252 		return 0;
2253 	case Opt_inode_readahead_blks:
2254 		if (result.uint_32 &&
2255 		    (result.uint_32 > (1 << 30) ||
2256 		     !is_power_of_2(result.uint_32))) {
2257 			ext4_msg(NULL, KERN_ERR,
2258 				 "EXT4-fs: inode_readahead_blks must be "
2259 				 "0 or a power of 2 smaller than 2^31");
2260 			return -EINVAL;
2261 		}
2262 		ctx->s_inode_readahead_blks = result.uint_32;
2263 		ctx->spec |= EXT4_SPEC_s_inode_readahead_blks;
2264 		return 0;
2265 	case Opt_init_itable:
2266 		ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE);
2267 		ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
2268 		if (param->type == fs_value_is_string)
2269 			ctx->s_li_wait_mult = result.uint_32;
2270 		ctx->spec |= EXT4_SPEC_s_li_wait_mult;
2271 		return 0;
2272 	case Opt_max_dir_size_kb:
2273 		ctx->s_max_dir_size_kb = result.uint_32;
2274 		ctx->spec |= EXT4_SPEC_s_max_dir_size_kb;
2275 		return 0;
2276 #ifdef CONFIG_EXT4_DEBUG
2277 	case Opt_fc_debug_max_replay:
2278 		ctx->s_fc_debug_max_replay = result.uint_32;
2279 		ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay;
2280 		return 0;
2281 #endif
2282 	case Opt_stripe:
2283 		ctx->s_stripe = result.uint_32;
2284 		ctx->spec |= EXT4_SPEC_s_stripe;
2285 		return 0;
2286 	case Opt_resuid:
2287 		ctx->s_resuid = result.uid;
2288 		ctx->spec |= EXT4_SPEC_s_resuid;
2289 		return 0;
2290 	case Opt_resgid:
2291 		ctx->s_resgid = result.gid;
2292 		ctx->spec |= EXT4_SPEC_s_resgid;
2293 		return 0;
2294 	case Opt_journal_dev:
2295 		if (is_remount) {
2296 			ext4_msg(NULL, KERN_ERR,
2297 				 "Cannot specify journal on remount");
2298 			return -EINVAL;
2299 		}
2300 		ctx->journal_devnum = result.uint_32;
2301 		ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
2302 		return 0;
2303 	case Opt_journal_path:
2304 	{
2305 		struct inode *journal_inode;
2306 		struct path path;
2307 		int error;
2308 
2309 		if (is_remount) {
2310 			ext4_msg(NULL, KERN_ERR,
2311 				 "Cannot specify journal on remount");
2312 			return -EINVAL;
2313 		}
2314 
2315 		error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path);
2316 		if (error) {
2317 			ext4_msg(NULL, KERN_ERR, "error: could not find "
2318 				 "journal device path");
2319 			return -EINVAL;
2320 		}
2321 
2322 		journal_inode = d_inode(path.dentry);
2323 		ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev);
2324 		ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
2325 		path_put(&path);
2326 		return 0;
2327 	}
2328 	case Opt_journal_ioprio:
2329 		if (result.uint_32 > 7) {
2330 			ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority"
2331 				 " (must be 0-7)");
2332 			return -EINVAL;
2333 		}
2334 		ctx->journal_ioprio =
2335 			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32);
2336 		ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO;
2337 		return 0;
2338 	case Opt_test_dummy_encryption:
2339 		return ext4_parse_test_dummy_encryption(param, ctx);
2340 	case Opt_dax:
2341 	case Opt_dax_type:
2342 #ifdef CONFIG_FS_DAX
2343 	{
2344 		int type = (token == Opt_dax) ?
2345 			   Opt_dax : result.uint_32;
2346 
2347 		switch (type) {
2348 		case Opt_dax:
2349 		case Opt_dax_always:
2350 			ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2351 			ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2352 			break;
2353 		case Opt_dax_never:
2354 			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2355 			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2356 			break;
2357 		case Opt_dax_inode:
2358 			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2359 			ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2360 			/* Strictly for printing options */
2361 			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE);
2362 			break;
2363 		}
2364 		return 0;
2365 	}
2366 #else
2367 		ext4_msg(NULL, KERN_INFO, "dax option not supported");
2368 		return -EINVAL;
2369 #endif
2370 	case Opt_data_err:
2371 		if (result.uint_32 == Opt_data_err_abort)
2372 			ctx_set_mount_opt(ctx, m->mount_opt);
2373 		else if (result.uint_32 == Opt_data_err_ignore)
2374 			ctx_clear_mount_opt(ctx, m->mount_opt);
2375 		return 0;
2376 	case Opt_mb_optimize_scan:
2377 		if (result.int_32 == 1) {
2378 			ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
2379 			ctx->spec |= EXT4_SPEC_mb_optimize_scan;
2380 		} else if (result.int_32 == 0) {
2381 			ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
2382 			ctx->spec |= EXT4_SPEC_mb_optimize_scan;
2383 		} else {
2384 			ext4_msg(NULL, KERN_WARNING,
2385 				 "mb_optimize_scan should be set to 0 or 1.");
2386 			return -EINVAL;
2387 		}
2388 		return 0;
2389 	}
2390 
2391 	/*
2392 	 * At this point we should only be getting options requiring MOPT_SET,
2393 	 * or MOPT_CLEAR. Anything else is a bug
2394 	 */
2395 	if (m->token == Opt_err) {
2396 		ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s",
2397 			 param->key);
2398 		WARN_ON(1);
2399 		return -EINVAL;
2400 	}
2401 
2402 	else {
2403 		unsigned int set = 0;
2404 
2405 		if ((param->type == fs_value_is_flag) ||
2406 		    result.uint_32 > 0)
2407 			set = 1;
2408 
2409 		if (m->flags & MOPT_CLEAR)
2410 			set = !set;
2411 		else if (unlikely(!(m->flags & MOPT_SET))) {
2412 			ext4_msg(NULL, KERN_WARNING,
2413 				 "buggy handling of option %s",
2414 				 param->key);
2415 			WARN_ON(1);
2416 			return -EINVAL;
2417 		}
2418 		if (m->flags & MOPT_2) {
2419 			if (set != 0)
2420 				ctx_set_mount_opt2(ctx, m->mount_opt);
2421 			else
2422 				ctx_clear_mount_opt2(ctx, m->mount_opt);
2423 		} else {
2424 			if (set != 0)
2425 				ctx_set_mount_opt(ctx, m->mount_opt);
2426 			else
2427 				ctx_clear_mount_opt(ctx, m->mount_opt);
2428 		}
2429 	}
2430 
2431 	return 0;
2432 }
2433 
2434 static int parse_options(struct fs_context *fc, char *options)
2435 {
2436 	struct fs_parameter param;
2437 	int ret;
2438 	char *key;
2439 
2440 	if (!options)
2441 		return 0;
2442 
2443 	while ((key = strsep(&options, ",")) != NULL) {
2444 		if (*key) {
2445 			size_t v_len = 0;
2446 			char *value = strchr(key, '=');
2447 
2448 			param.type = fs_value_is_flag;
2449 			param.string = NULL;
2450 
2451 			if (value) {
2452 				if (value == key)
2453 					continue;
2454 
2455 				*value++ = 0;
2456 				v_len = strlen(value);
2457 				param.string = kmemdup_nul(value, v_len,
2458 							   GFP_KERNEL);
2459 				if (!param.string)
2460 					return -ENOMEM;
2461 				param.type = fs_value_is_string;
2462 			}
2463 
2464 			param.key = key;
2465 			param.size = v_len;
2466 
2467 			ret = ext4_parse_param(fc, &param);
2468 			kfree(param.string);
2469 			if (ret < 0)
2470 				return ret;
2471 		}
2472 	}
2473 
2474 	ret = ext4_validate_options(fc);
2475 	if (ret < 0)
2476 		return ret;
2477 
2478 	return 0;
2479 }
2480 
2481 static int parse_apply_sb_mount_options(struct super_block *sb,
2482 					struct ext4_fs_context *m_ctx)
2483 {
2484 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2485 	char s_mount_opts[64];
2486 	struct ext4_fs_context *s_ctx = NULL;
2487 	struct fs_context *fc = NULL;
2488 	int ret = -ENOMEM;
2489 
2490 	if (!sbi->s_es->s_mount_opts[0])
2491 		return 0;
2492 
2493 	if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0)
2494 		return -E2BIG;
2495 
2496 	fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
2497 	if (!fc)
2498 		return -ENOMEM;
2499 
2500 	s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
2501 	if (!s_ctx)
2502 		goto out_free;
2503 
2504 	fc->fs_private = s_ctx;
2505 	fc->s_fs_info = sbi;
2506 
2507 	ret = parse_options(fc, s_mount_opts);
2508 	if (ret < 0)
2509 		goto parse_failed;
2510 
2511 	ret = ext4_check_opt_consistency(fc, sb);
2512 	if (ret < 0) {
2513 parse_failed:
2514 		ext4_msg(sb, KERN_WARNING,
2515 			 "failed to parse options in superblock: %s",
2516 			 s_mount_opts);
2517 		ret = 0;
2518 		goto out_free;
2519 	}
2520 
2521 	if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV)
2522 		m_ctx->journal_devnum = s_ctx->journal_devnum;
2523 	if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)
2524 		m_ctx->journal_ioprio = s_ctx->journal_ioprio;
2525 
2526 	ext4_apply_options(fc, sb);
2527 	ret = 0;
2528 
2529 out_free:
2530 	ext4_fc_free(fc);
2531 	kfree(fc);
2532 	return ret;
2533 }
2534 
2535 static void ext4_apply_quota_options(struct fs_context *fc,
2536 				     struct super_block *sb)
2537 {
2538 #ifdef CONFIG_QUOTA
2539 	bool quota_feature = ext4_has_feature_quota(sb);
2540 	struct ext4_fs_context *ctx = fc->fs_private;
2541 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2542 	char *qname;
2543 	int i;
2544 
2545 	if (quota_feature)
2546 		return;
2547 
2548 	if (ctx->spec & EXT4_SPEC_JQUOTA) {
2549 		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2550 			if (!(ctx->qname_spec & (1 << i)))
2551 				continue;
2552 
2553 			qname = ctx->s_qf_names[i]; /* May be NULL */
2554 			if (qname)
2555 				set_opt(sb, QUOTA);
2556 			ctx->s_qf_names[i] = NULL;
2557 			qname = rcu_replace_pointer(sbi->s_qf_names[i], qname,
2558 						lockdep_is_held(&sb->s_umount));
2559 			if (qname)
2560 				kfree_rcu_mightsleep(qname);
2561 		}
2562 	}
2563 
2564 	if (ctx->spec & EXT4_SPEC_JQFMT)
2565 		sbi->s_jquota_fmt = ctx->s_jquota_fmt;
2566 #endif
2567 }
2568 
2569 /*
2570  * Check quota settings consistency.
2571  */
2572 static int ext4_check_quota_consistency(struct fs_context *fc,
2573 					struct super_block *sb)
2574 {
2575 #ifdef CONFIG_QUOTA
2576 	struct ext4_fs_context *ctx = fc->fs_private;
2577 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2578 	bool quota_feature = ext4_has_feature_quota(sb);
2579 	bool quota_loaded = sb_any_quota_loaded(sb);
2580 	bool usr_qf_name, grp_qf_name, usrquota, grpquota;
2581 	int quota_flags, i;
2582 
2583 	/*
2584 	 * We do the test below only for project quotas. 'usrquota' and
2585 	 * 'grpquota' mount options are allowed even without quota feature
2586 	 * to support legacy quotas in quota files.
2587 	 */
2588 	if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) &&
2589 	    !ext4_has_feature_project(sb)) {
2590 		ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. "
2591 			 "Cannot enable project quota enforcement.");
2592 		return -EINVAL;
2593 	}
2594 
2595 	quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
2596 		      EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA;
2597 	if (quota_loaded &&
2598 	    ctx->mask_s_mount_opt & quota_flags &&
2599 	    !ctx_test_mount_opt(ctx, quota_flags))
2600 		goto err_quota_change;
2601 
2602 	if (ctx->spec & EXT4_SPEC_JQUOTA) {
2603 
2604 		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2605 			if (!(ctx->qname_spec & (1 << i)))
2606 				continue;
2607 
2608 			if (quota_loaded &&
2609 			    !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i])
2610 				goto err_jquota_change;
2611 
2612 			if (sbi->s_qf_names[i] && ctx->s_qf_names[i] &&
2613 			    strcmp(get_qf_name(sb, sbi, i),
2614 				   ctx->s_qf_names[i]) != 0)
2615 				goto err_jquota_specified;
2616 		}
2617 
2618 		if (quota_feature) {
2619 			ext4_msg(NULL, KERN_INFO,
2620 				 "Journaled quota options ignored when "
2621 				 "QUOTA feature is enabled");
2622 			return 0;
2623 		}
2624 	}
2625 
2626 	if (ctx->spec & EXT4_SPEC_JQFMT) {
2627 		if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded)
2628 			goto err_jquota_change;
2629 		if (quota_feature) {
2630 			ext4_msg(NULL, KERN_INFO, "Quota format mount options "
2631 				 "ignored when QUOTA feature is enabled");
2632 			return 0;
2633 		}
2634 	}
2635 
2636 	/* Make sure we don't mix old and new quota format */
2637 	usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) ||
2638 		       ctx->s_qf_names[USRQUOTA]);
2639 	grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) ||
2640 		       ctx->s_qf_names[GRPQUOTA]);
2641 
2642 	usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
2643 		    test_opt(sb, USRQUOTA));
2644 
2645 	grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) ||
2646 		    test_opt(sb, GRPQUOTA));
2647 
2648 	if (usr_qf_name) {
2649 		ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
2650 		usrquota = false;
2651 	}
2652 	if (grp_qf_name) {
2653 		ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
2654 		grpquota = false;
2655 	}
2656 
2657 	if (usr_qf_name || grp_qf_name) {
2658 		if (usrquota || grpquota) {
2659 			ext4_msg(NULL, KERN_ERR, "old and new quota "
2660 				 "format mixing");
2661 			return -EINVAL;
2662 		}
2663 
2664 		if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) {
2665 			ext4_msg(NULL, KERN_ERR, "journaled quota format "
2666 				 "not specified");
2667 			return -EINVAL;
2668 		}
2669 	}
2670 
2671 	return 0;
2672 
2673 err_quota_change:
2674 	ext4_msg(NULL, KERN_ERR,
2675 		 "Cannot change quota options when quota turned on");
2676 	return -EINVAL;
2677 err_jquota_change:
2678 	ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota "
2679 		 "options when quota turned on");
2680 	return -EINVAL;
2681 err_jquota_specified:
2682 	ext4_msg(NULL, KERN_ERR, "%s quota file already specified",
2683 		 QTYPE2NAME(i));
2684 	return -EINVAL;
2685 #else
2686 	return 0;
2687 #endif
2688 }
2689 
2690 static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
2691 					    struct super_block *sb)
2692 {
2693 	const struct ext4_fs_context *ctx = fc->fs_private;
2694 	const struct ext4_sb_info *sbi = EXT4_SB(sb);
2695 
2696 	if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy))
2697 		return 0;
2698 
2699 	if (!ext4_has_feature_encrypt(sb)) {
2700 		ext4_msg(NULL, KERN_WARNING,
2701 			 "test_dummy_encryption requires encrypt feature");
2702 		return -EINVAL;
2703 	}
2704 	/*
2705 	 * This mount option is just for testing, and it's not worthwhile to
2706 	 * implement the extra complexity (e.g. RCU protection) that would be
2707 	 * needed to allow it to be set or changed during remount.  We do allow
2708 	 * it to be specified during remount, but only if there is no change.
2709 	 */
2710 	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2711 		if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
2712 						 &ctx->dummy_enc_policy))
2713 			return 0;
2714 		ext4_msg(NULL, KERN_WARNING,
2715 			 "Can't set or change test_dummy_encryption on remount");
2716 		return -EINVAL;
2717 	}
2718 	/* Also make sure s_mount_opts didn't contain a conflicting value. */
2719 	if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) {
2720 		if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
2721 						 &ctx->dummy_enc_policy))
2722 			return 0;
2723 		ext4_msg(NULL, KERN_WARNING,
2724 			 "Conflicting test_dummy_encryption options");
2725 		return -EINVAL;
2726 	}
2727 	return 0;
2728 }
2729 
2730 static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx,
2731 					     struct super_block *sb)
2732 {
2733 	if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) ||
2734 	    /* if already set, it was already verified to be the same */
2735 	    fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy))
2736 		return;
2737 	EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy;
2738 	memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy));
2739 	ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
2740 }
2741 
2742 static int ext4_check_opt_consistency(struct fs_context *fc,
2743 				      struct super_block *sb)
2744 {
2745 	struct ext4_fs_context *ctx = fc->fs_private;
2746 	struct ext4_sb_info *sbi = fc->s_fs_info;
2747 	int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
2748 	int err;
2749 
2750 	if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
2751 		ext4_msg(NULL, KERN_ERR,
2752 			 "Mount option(s) incompatible with ext2");
2753 		return -EINVAL;
2754 	}
2755 	if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
2756 		ext4_msg(NULL, KERN_ERR,
2757 			 "Mount option(s) incompatible with ext3");
2758 		return -EINVAL;
2759 	}
2760 
2761 	if (ctx->s_want_extra_isize >
2762 	    (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) {
2763 		ext4_msg(NULL, KERN_ERR,
2764 			 "Invalid want_extra_isize %d",
2765 			 ctx->s_want_extra_isize);
2766 		return -EINVAL;
2767 	}
2768 
2769 	err = ext4_check_test_dummy_encryption(fc, sb);
2770 	if (err)
2771 		return err;
2772 
2773 	if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) {
2774 		if (!sbi->s_journal) {
2775 			ext4_msg(NULL, KERN_WARNING,
2776 				 "Remounting file system with no journal "
2777 				 "so ignoring journalled data option");
2778 			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
2779 		} else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) !=
2780 			   test_opt(sb, DATA_FLAGS)) {
2781 			ext4_msg(NULL, KERN_ERR, "Cannot change data mode "
2782 				 "on remount");
2783 			return -EINVAL;
2784 		}
2785 	}
2786 
2787 	if (is_remount) {
2788 		if (!sbi->s_journal &&
2789 		    ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) {
2790 			ext4_msg(NULL, KERN_WARNING,
2791 				 "Remounting fs w/o journal so ignoring data_err option");
2792 			ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT);
2793 		}
2794 
2795 		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
2796 		    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
2797 			ext4_msg(NULL, KERN_ERR, "can't mount with "
2798 				 "both data=journal and dax");
2799 			return -EINVAL;
2800 		}
2801 
2802 		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
2803 		    (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2804 		     (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
2805 fail_dax_change_remount:
2806 			ext4_msg(NULL, KERN_ERR, "can't change "
2807 				 "dax mount option while remounting");
2808 			return -EINVAL;
2809 		} else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) &&
2810 			 (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2811 			  (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) {
2812 			goto fail_dax_change_remount;
2813 		} else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) &&
2814 			   ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2815 			    (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2816 			    !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) {
2817 			goto fail_dax_change_remount;
2818 		}
2819 	}
2820 
2821 	return ext4_check_quota_consistency(fc, sb);
2822 }
2823 
2824 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb)
2825 {
2826 	struct ext4_fs_context *ctx = fc->fs_private;
2827 	struct ext4_sb_info *sbi = fc->s_fs_info;
2828 
2829 	sbi->s_mount_opt &= ~ctx->mask_s_mount_opt;
2830 	sbi->s_mount_opt |= ctx->vals_s_mount_opt;
2831 	sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2;
2832 	sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2;
2833 	sb->s_flags &= ~ctx->mask_s_flags;
2834 	sb->s_flags |= ctx->vals_s_flags;
2835 
2836 #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; })
2837 	APPLY(s_commit_interval);
2838 	APPLY(s_stripe);
2839 	APPLY(s_max_batch_time);
2840 	APPLY(s_min_batch_time);
2841 	APPLY(s_want_extra_isize);
2842 	APPLY(s_inode_readahead_blks);
2843 	APPLY(s_max_dir_size_kb);
2844 	APPLY(s_li_wait_mult);
2845 	APPLY(s_resgid);
2846 	APPLY(s_resuid);
2847 
2848 #ifdef CONFIG_EXT4_DEBUG
2849 	APPLY(s_fc_debug_max_replay);
2850 #endif
2851 
2852 	ext4_apply_quota_options(fc, sb);
2853 	ext4_apply_test_dummy_encryption(ctx, sb);
2854 }
2855 
2856 
2857 static int ext4_validate_options(struct fs_context *fc)
2858 {
2859 #ifdef CONFIG_QUOTA
2860 	struct ext4_fs_context *ctx = fc->fs_private;
2861 	char *usr_qf_name, *grp_qf_name;
2862 
2863 	usr_qf_name = ctx->s_qf_names[USRQUOTA];
2864 	grp_qf_name = ctx->s_qf_names[GRPQUOTA];
2865 
2866 	if (usr_qf_name || grp_qf_name) {
2867 		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name)
2868 			ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
2869 
2870 		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name)
2871 			ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
2872 
2873 		if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
2874 		    ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) {
2875 			ext4_msg(NULL, KERN_ERR, "old and new quota "
2876 				 "format mixing");
2877 			return -EINVAL;
2878 		}
2879 	}
2880 #endif
2881 	return 1;
2882 }
2883 
2884 static inline void ext4_show_quota_options(struct seq_file *seq,
2885 					   struct super_block *sb)
2886 {
2887 #if defined(CONFIG_QUOTA)
2888 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2889 	char *usr_qf_name, *grp_qf_name;
2890 
2891 	if (sbi->s_jquota_fmt) {
2892 		char *fmtname = "";
2893 
2894 		switch (sbi->s_jquota_fmt) {
2895 		case QFMT_VFS_OLD:
2896 			fmtname = "vfsold";
2897 			break;
2898 		case QFMT_VFS_V0:
2899 			fmtname = "vfsv0";
2900 			break;
2901 		case QFMT_VFS_V1:
2902 			fmtname = "vfsv1";
2903 			break;
2904 		}
2905 		seq_printf(seq, ",jqfmt=%s", fmtname);
2906 	}
2907 
2908 	rcu_read_lock();
2909 	usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
2910 	grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
2911 	if (usr_qf_name)
2912 		seq_show_option(seq, "usrjquota", usr_qf_name);
2913 	if (grp_qf_name)
2914 		seq_show_option(seq, "grpjquota", grp_qf_name);
2915 	rcu_read_unlock();
2916 #endif
2917 }
2918 
2919 static const char *token2str(int token)
2920 {
2921 	const struct fs_parameter_spec *spec;
2922 
2923 	for (spec = ext4_param_specs; spec->name != NULL; spec++)
2924 		if (spec->opt == token && !spec->type)
2925 			break;
2926 	return spec->name;
2927 }
2928 
2929 /*
2930  * Show an option if
2931  *  - it's set to a non-default value OR
2932  *  - if the per-sb default is different from the global default
2933  */
2934 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2935 			      int nodefs)
2936 {
2937 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2938 	struct ext4_super_block *es = sbi->s_es;
2939 	int def_errors;
2940 	const struct mount_opts *m;
2941 	char sep = nodefs ? '\n' : ',';
2942 
2943 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2944 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2945 
2946 	if (sbi->s_sb_block != 1)
2947 		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2948 
2949 	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2950 		int want_set = m->flags & MOPT_SET;
2951 		int opt_2 = m->flags & MOPT_2;
2952 		unsigned int mount_opt, def_mount_opt;
2953 
2954 		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2955 		    m->flags & MOPT_SKIP)
2956 			continue;
2957 
2958 		if (opt_2) {
2959 			mount_opt = sbi->s_mount_opt2;
2960 			def_mount_opt = sbi->s_def_mount_opt2;
2961 		} else {
2962 			mount_opt = sbi->s_mount_opt;
2963 			def_mount_opt = sbi->s_def_mount_opt;
2964 		}
2965 		/* skip if same as the default */
2966 		if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
2967 			continue;
2968 		/* select Opt_noFoo vs Opt_Foo */
2969 		if ((want_set &&
2970 		     (mount_opt & m->mount_opt) != m->mount_opt) ||
2971 		    (!want_set && (mount_opt & m->mount_opt)))
2972 			continue;
2973 		SEQ_OPTS_PRINT("%s", token2str(m->token));
2974 	}
2975 
2976 	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2977 	    ext4_get_resuid(es) != EXT4_DEF_RESUID)
2978 		SEQ_OPTS_PRINT("resuid=%u",
2979 				from_kuid_munged(&init_user_ns, sbi->s_resuid));
2980 	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2981 	    ext4_get_resgid(es) != EXT4_DEF_RESGID)
2982 		SEQ_OPTS_PRINT("resgid=%u",
2983 				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2984 	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2985 	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
2986 		SEQ_OPTS_PUTS("errors=remount-ro");
2987 	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2988 		SEQ_OPTS_PUTS("errors=continue");
2989 	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2990 		SEQ_OPTS_PUTS("errors=panic");
2991 	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2992 		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2993 	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2994 		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2995 	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2996 		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2997 	if (nodefs && sb->s_flags & SB_I_VERSION)
2998 		SEQ_OPTS_PUTS("i_version");
2999 	if (nodefs || sbi->s_stripe)
3000 		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
3001 	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
3002 			(sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
3003 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
3004 			SEQ_OPTS_PUTS("data=journal");
3005 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
3006 			SEQ_OPTS_PUTS("data=ordered");
3007 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
3008 			SEQ_OPTS_PUTS("data=writeback");
3009 	}
3010 	if (nodefs ||
3011 	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
3012 		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
3013 			       sbi->s_inode_readahead_blks);
3014 
3015 	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
3016 		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
3017 		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
3018 	if (nodefs || sbi->s_max_dir_size_kb)
3019 		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
3020 	if (test_opt(sb, DATA_ERR_ABORT))
3021 		SEQ_OPTS_PUTS("data_err=abort");
3022 
3023 	fscrypt_show_test_dummy_encryption(seq, sep, sb);
3024 
3025 	if (sb->s_flags & SB_INLINECRYPT)
3026 		SEQ_OPTS_PUTS("inlinecrypt");
3027 
3028 	if (test_opt(sb, DAX_ALWAYS)) {
3029 		if (IS_EXT2_SB(sb))
3030 			SEQ_OPTS_PUTS("dax");
3031 		else
3032 			SEQ_OPTS_PUTS("dax=always");
3033 	} else if (test_opt2(sb, DAX_NEVER)) {
3034 		SEQ_OPTS_PUTS("dax=never");
3035 	} else if (test_opt2(sb, DAX_INODE)) {
3036 		SEQ_OPTS_PUTS("dax=inode");
3037 	}
3038 
3039 	if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
3040 			!test_opt2(sb, MB_OPTIMIZE_SCAN)) {
3041 		SEQ_OPTS_PUTS("mb_optimize_scan=0");
3042 	} else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
3043 			test_opt2(sb, MB_OPTIMIZE_SCAN)) {
3044 		SEQ_OPTS_PUTS("mb_optimize_scan=1");
3045 	}
3046 
3047 	if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS))
3048 		SEQ_OPTS_PUTS("prefetch_block_bitmaps");
3049 
3050 	if (ext4_emergency_ro(sb))
3051 		SEQ_OPTS_PUTS("emergency_ro");
3052 
3053 	if (ext4_forced_shutdown(sb))
3054 		SEQ_OPTS_PUTS("shutdown");
3055 
3056 	ext4_show_quota_options(seq, sb);
3057 	return 0;
3058 }
3059 
3060 static int ext4_show_options(struct seq_file *seq, struct dentry *root)
3061 {
3062 	return _ext4_show_options(seq, root->d_sb, 0);
3063 }
3064 
3065 int ext4_seq_options_show(struct seq_file *seq, void *offset)
3066 {
3067 	struct super_block *sb = seq->private;
3068 	int rc;
3069 
3070 	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
3071 	rc = _ext4_show_options(seq, sb, 1);
3072 	seq_putc(seq, '\n');
3073 	return rc;
3074 }
3075 
3076 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
3077 			    int read_only)
3078 {
3079 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3080 	int err = 0;
3081 
3082 	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
3083 		ext4_msg(sb, KERN_ERR, "revision level too high, "
3084 			 "forcing read-only mode");
3085 		err = -EROFS;
3086 		goto done;
3087 	}
3088 	if (read_only)
3089 		goto done;
3090 	if (!(sbi->s_mount_state & EXT4_VALID_FS))
3091 		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
3092 			 "running e2fsck is recommended");
3093 	else if (sbi->s_mount_state & EXT4_ERROR_FS)
3094 		ext4_msg(sb, KERN_WARNING,
3095 			 "warning: mounting fs with errors, "
3096 			 "running e2fsck is recommended");
3097 	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
3098 		 le16_to_cpu(es->s_mnt_count) >=
3099 		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
3100 		ext4_msg(sb, KERN_WARNING,
3101 			 "warning: maximal mount count reached, "
3102 			 "running e2fsck is recommended");
3103 	else if (le32_to_cpu(es->s_checkinterval) &&
3104 		 (ext4_get_tstamp(es, s_lastcheck) +
3105 		  le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
3106 		ext4_msg(sb, KERN_WARNING,
3107 			 "warning: checktime reached, "
3108 			 "running e2fsck is recommended");
3109 	if (!sbi->s_journal)
3110 		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
3111 	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
3112 		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
3113 	le16_add_cpu(&es->s_mnt_count, 1);
3114 	ext4_update_tstamp(es, s_mtime);
3115 	if (sbi->s_journal) {
3116 		ext4_set_feature_journal_needs_recovery(sb);
3117 		if (ext4_has_feature_orphan_file(sb))
3118 			ext4_set_feature_orphan_present(sb);
3119 	}
3120 
3121 	err = ext4_commit_super(sb);
3122 done:
3123 	if (test_opt(sb, DEBUG))
3124 		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
3125 				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
3126 			sb->s_blocksize,
3127 			sbi->s_groups_count,
3128 			EXT4_BLOCKS_PER_GROUP(sb),
3129 			EXT4_INODES_PER_GROUP(sb),
3130 			sbi->s_mount_opt, sbi->s_mount_opt2);
3131 	return err;
3132 }
3133 
3134 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3135 {
3136 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3137 	struct flex_groups **old_groups, **new_groups;
3138 	int size, i, j;
3139 
3140 	if (!sbi->s_log_groups_per_flex)
3141 		return 0;
3142 
3143 	size = ext4_flex_group(sbi, ngroup - 1) + 1;
3144 	if (size <= sbi->s_flex_groups_allocated)
3145 		return 0;
3146 
3147 	new_groups = kvzalloc(roundup_pow_of_two(size *
3148 			      sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
3149 	if (!new_groups) {
3150 		ext4_msg(sb, KERN_ERR,
3151 			 "not enough memory for %d flex group pointers", size);
3152 		return -ENOMEM;
3153 	}
3154 	for (i = sbi->s_flex_groups_allocated; i < size; i++) {
3155 		new_groups[i] = kvzalloc(roundup_pow_of_two(
3156 					 sizeof(struct flex_groups)),
3157 					 GFP_KERNEL);
3158 		if (!new_groups[i]) {
3159 			for (j = sbi->s_flex_groups_allocated; j < i; j++)
3160 				kvfree(new_groups[j]);
3161 			kvfree(new_groups);
3162 			ext4_msg(sb, KERN_ERR,
3163 				 "not enough memory for %d flex groups", size);
3164 			return -ENOMEM;
3165 		}
3166 	}
3167 	rcu_read_lock();
3168 	old_groups = rcu_dereference(sbi->s_flex_groups);
3169 	if (old_groups)
3170 		memcpy(new_groups, old_groups,
3171 		       (sbi->s_flex_groups_allocated *
3172 			sizeof(struct flex_groups *)));
3173 	rcu_read_unlock();
3174 	rcu_assign_pointer(sbi->s_flex_groups, new_groups);
3175 	sbi->s_flex_groups_allocated = size;
3176 	if (old_groups)
3177 		ext4_kvfree_array_rcu(old_groups);
3178 	return 0;
3179 }
3180 
3181 static int ext4_fill_flex_info(struct super_block *sb)
3182 {
3183 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3184 	struct ext4_group_desc *gdp = NULL;
3185 	struct flex_groups *fg;
3186 	ext4_group_t flex_group;
3187 	int i, err;
3188 
3189 	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
3190 	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
3191 		sbi->s_log_groups_per_flex = 0;
3192 		return 1;
3193 	}
3194 
3195 	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
3196 	if (err)
3197 		goto failed;
3198 
3199 	for (i = 0; i < sbi->s_groups_count; i++) {
3200 		gdp = ext4_get_group_desc(sb, i, NULL);
3201 
3202 		flex_group = ext4_flex_group(sbi, i);
3203 		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
3204 		atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
3205 		atomic64_add(ext4_free_group_clusters(sb, gdp),
3206 			     &fg->free_clusters);
3207 		atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
3208 	}
3209 
3210 	return 1;
3211 failed:
3212 	return 0;
3213 }
3214 
3215 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
3216 				   struct ext4_group_desc *gdp)
3217 {
3218 	int offset = offsetof(struct ext4_group_desc, bg_checksum);
3219 	__u16 crc = 0;
3220 	__le32 le_group = cpu_to_le32(block_group);
3221 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3222 
3223 	if (ext4_has_feature_metadata_csum(sbi->s_sb)) {
3224 		/* Use new metadata_csum algorithm */
3225 		__u32 csum32;
3226 		__u16 dummy_csum = 0;
3227 
3228 		csum32 = ext4_chksum(sbi->s_csum_seed, (__u8 *)&le_group,
3229 				     sizeof(le_group));
3230 		csum32 = ext4_chksum(csum32, (__u8 *)gdp, offset);
3231 		csum32 = ext4_chksum(csum32, (__u8 *)&dummy_csum,
3232 				     sizeof(dummy_csum));
3233 		offset += sizeof(dummy_csum);
3234 		if (offset < sbi->s_desc_size)
3235 			csum32 = ext4_chksum(csum32, (__u8 *)gdp + offset,
3236 					     sbi->s_desc_size - offset);
3237 
3238 		crc = csum32 & 0xFFFF;
3239 		goto out;
3240 	}
3241 
3242 	/* old crc16 code */
3243 	if (!ext4_has_feature_gdt_csum(sb))
3244 		return 0;
3245 
3246 	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
3247 	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
3248 	crc = crc16(crc, (__u8 *)gdp, offset);
3249 	offset += sizeof(gdp->bg_checksum); /* skip checksum */
3250 	/* for checksum of struct ext4_group_desc do the rest...*/
3251 	if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
3252 		crc = crc16(crc, (__u8 *)gdp + offset,
3253 			    sbi->s_desc_size - offset);
3254 
3255 out:
3256 	return cpu_to_le16(crc);
3257 }
3258 
3259 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
3260 				struct ext4_group_desc *gdp)
3261 {
3262 	if (ext4_has_group_desc_csum(sb) &&
3263 	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
3264 		return 0;
3265 
3266 	return 1;
3267 }
3268 
3269 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
3270 			      struct ext4_group_desc *gdp)
3271 {
3272 	if (!ext4_has_group_desc_csum(sb))
3273 		return;
3274 	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
3275 }
3276 
3277 /* Called at mount-time, super-block is locked */
3278 static int ext4_check_descriptors(struct super_block *sb,
3279 				  ext4_fsblk_t sb_block,
3280 				  ext4_group_t *first_not_zeroed)
3281 {
3282 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3283 	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
3284 	ext4_fsblk_t last_block;
3285 	ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
3286 	ext4_fsblk_t block_bitmap;
3287 	ext4_fsblk_t inode_bitmap;
3288 	ext4_fsblk_t inode_table;
3289 	int flexbg_flag = 0;
3290 	ext4_group_t i, grp = sbi->s_groups_count;
3291 
3292 	if (ext4_has_feature_flex_bg(sb))
3293 		flexbg_flag = 1;
3294 
3295 	ext4_debug("Checking group descriptors");
3296 
3297 	for (i = 0; i < sbi->s_groups_count; i++) {
3298 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
3299 
3300 		if (i == sbi->s_groups_count - 1 || flexbg_flag)
3301 			last_block = ext4_blocks_count(sbi->s_es) - 1;
3302 		else
3303 			last_block = first_block +
3304 				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
3305 
3306 		if ((grp == sbi->s_groups_count) &&
3307 		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3308 			grp = i;
3309 
3310 		block_bitmap = ext4_block_bitmap(sb, gdp);
3311 		if (block_bitmap == sb_block) {
3312 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3313 				 "Block bitmap for group %u overlaps "
3314 				 "superblock", i);
3315 			if (!sb_rdonly(sb))
3316 				return 0;
3317 		}
3318 		if (block_bitmap >= sb_block + 1 &&
3319 		    block_bitmap <= last_bg_block) {
3320 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3321 				 "Block bitmap for group %u overlaps "
3322 				 "block group descriptors", i);
3323 			if (!sb_rdonly(sb))
3324 				return 0;
3325 		}
3326 		if (block_bitmap < first_block || block_bitmap > last_block) {
3327 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3328 			       "Block bitmap for group %u not in group "
3329 			       "(block %llu)!", i, block_bitmap);
3330 			return 0;
3331 		}
3332 		inode_bitmap = ext4_inode_bitmap(sb, gdp);
3333 		if (inode_bitmap == sb_block) {
3334 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3335 				 "Inode bitmap for group %u overlaps "
3336 				 "superblock", i);
3337 			if (!sb_rdonly(sb))
3338 				return 0;
3339 		}
3340 		if (inode_bitmap >= sb_block + 1 &&
3341 		    inode_bitmap <= last_bg_block) {
3342 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3343 				 "Inode bitmap for group %u overlaps "
3344 				 "block group descriptors", i);
3345 			if (!sb_rdonly(sb))
3346 				return 0;
3347 		}
3348 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
3349 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3350 			       "Inode bitmap for group %u not in group "
3351 			       "(block %llu)!", i, inode_bitmap);
3352 			return 0;
3353 		}
3354 		inode_table = ext4_inode_table(sb, gdp);
3355 		if (inode_table == sb_block) {
3356 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3357 				 "Inode table for group %u overlaps "
3358 				 "superblock", i);
3359 			if (!sb_rdonly(sb))
3360 				return 0;
3361 		}
3362 		if (inode_table >= sb_block + 1 &&
3363 		    inode_table <= last_bg_block) {
3364 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3365 				 "Inode table for group %u overlaps "
3366 				 "block group descriptors", i);
3367 			if (!sb_rdonly(sb))
3368 				return 0;
3369 		}
3370 		if (inode_table < first_block ||
3371 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
3372 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3373 			       "Inode table for group %u not in group "
3374 			       "(block %llu)!", i, inode_table);
3375 			return 0;
3376 		}
3377 		ext4_lock_group(sb, i);
3378 		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
3379 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3380 				 "Checksum for group %u failed (%u!=%u)",
3381 				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
3382 				     gdp)), le16_to_cpu(gdp->bg_checksum));
3383 			if (!sb_rdonly(sb)) {
3384 				ext4_unlock_group(sb, i);
3385 				return 0;
3386 			}
3387 		}
3388 		ext4_unlock_group(sb, i);
3389 		if (!flexbg_flag)
3390 			first_block += EXT4_BLOCKS_PER_GROUP(sb);
3391 	}
3392 	if (NULL != first_not_zeroed)
3393 		*first_not_zeroed = grp;
3394 	return 1;
3395 }
3396 
3397 /*
3398  * Maximal extent format file size.
3399  * Resulting logical blkno at s_maxbytes must fit in our on-disk
3400  * extent format containers, within a sector_t, and within i_blocks
3401  * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
3402  * so that won't be a limiting factor.
3403  *
3404  * However there is other limiting factor. We do store extents in the form
3405  * of starting block and length, hence the resulting length of the extent
3406  * covering maximum file size must fit into on-disk format containers as
3407  * well. Given that length is always by 1 unit bigger than max unit (because
3408  * we count 0 as well) we have to lower the s_maxbytes by one fs block.
3409  *
3410  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
3411  */
3412 static loff_t ext4_max_size(int blkbits, int has_huge_files)
3413 {
3414 	loff_t res;
3415 	loff_t upper_limit = MAX_LFS_FILESIZE;
3416 
3417 	BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
3418 
3419 	if (!has_huge_files) {
3420 		upper_limit = (1LL << 32) - 1;
3421 
3422 		/* total blocks in file system block size */
3423 		upper_limit >>= (blkbits - 9);
3424 		upper_limit <<= blkbits;
3425 	}
3426 
3427 	/*
3428 	 * 32-bit extent-start container, ee_block. We lower the maxbytes
3429 	 * by one fs block, so ee_len can cover the extent of maximum file
3430 	 * size
3431 	 */
3432 	res = (1LL << 32) - 1;
3433 	res <<= blkbits;
3434 
3435 	/* Sanity check against vm- & vfs- imposed limits */
3436 	if (res > upper_limit)
3437 		res = upper_limit;
3438 
3439 	return res;
3440 }
3441 
3442 /*
3443  * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
3444  * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
3445  * We need to be 1 filesystem block less than the 2^48 sector limit.
3446  */
3447 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3448 {
3449 	loff_t upper_limit, res = EXT4_NDIR_BLOCKS;
3450 	int meta_blocks;
3451 	unsigned int ppb = 1 << (bits - 2);
3452 
3453 	/*
3454 	 * This is calculated to be the largest file size for a dense, block
3455 	 * mapped file such that the file's total number of 512-byte sectors,
3456 	 * including data and all indirect blocks, does not exceed (2^48 - 1).
3457 	 *
3458 	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
3459 	 * number of 512-byte sectors of the file.
3460 	 */
3461 	if (!has_huge_files) {
3462 		/*
3463 		 * !has_huge_files or implies that the inode i_block field
3464 		 * represents total file blocks in 2^32 512-byte sectors ==
3465 		 * size of vfs inode i_blocks * 8
3466 		 */
3467 		upper_limit = (1LL << 32) - 1;
3468 
3469 		/* total blocks in file system block size */
3470 		upper_limit >>= (bits - 9);
3471 
3472 	} else {
3473 		/*
3474 		 * We use 48 bit ext4_inode i_blocks
3475 		 * With EXT4_HUGE_FILE_FL set the i_blocks
3476 		 * represent total number of blocks in
3477 		 * file system block size
3478 		 */
3479 		upper_limit = (1LL << 48) - 1;
3480 
3481 	}
3482 
3483 	/* Compute how many blocks we can address by block tree */
3484 	res += ppb;
3485 	res += ppb * ppb;
3486 	res += ((loff_t)ppb) * ppb * ppb;
3487 	/* Compute how many metadata blocks are needed */
3488 	meta_blocks = 1;
3489 	meta_blocks += 1 + ppb;
3490 	meta_blocks += 1 + ppb + ppb * ppb;
3491 	/* Does block tree limit file size? */
3492 	if (res + meta_blocks <= upper_limit)
3493 		goto check_lfs;
3494 
3495 	res = upper_limit;
3496 	/* How many metadata blocks are needed for addressing upper_limit? */
3497 	upper_limit -= EXT4_NDIR_BLOCKS;
3498 	/* indirect blocks */
3499 	meta_blocks = 1;
3500 	upper_limit -= ppb;
3501 	/* double indirect blocks */
3502 	if (upper_limit < ppb * ppb) {
3503 		meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb);
3504 		res -= meta_blocks;
3505 		goto check_lfs;
3506 	}
3507 	meta_blocks += 1 + ppb;
3508 	upper_limit -= ppb * ppb;
3509 	/* tripple indirect blocks for the rest */
3510 	meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) +
3511 		DIV_ROUND_UP_ULL(upper_limit, ppb*ppb);
3512 	res -= meta_blocks;
3513 check_lfs:
3514 	res <<= bits;
3515 	if (res > MAX_LFS_FILESIZE)
3516 		res = MAX_LFS_FILESIZE;
3517 
3518 	return res;
3519 }
3520 
3521 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3522 				   ext4_fsblk_t logical_sb_block, int nr)
3523 {
3524 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3525 	ext4_group_t bg, first_meta_bg;
3526 	int has_super = 0;
3527 
3528 	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
3529 
3530 	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3531 		return logical_sb_block + nr + 1;
3532 	bg = sbi->s_desc_per_block * nr;
3533 	if (ext4_bg_has_super(sb, bg))
3534 		has_super = 1;
3535 
3536 	/*
3537 	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
3538 	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
3539 	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
3540 	 * compensate.
3541 	 */
3542 	if (sb->s_blocksize == 1024 && nr == 0 &&
3543 	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3544 		has_super++;
3545 
3546 	return (has_super + ext4_group_first_block_no(sb, bg));
3547 }
3548 
3549 /**
3550  * ext4_get_stripe_size: Get the stripe size.
3551  * @sbi: In memory super block info
3552  *
3553  * If we have specified it via mount option, then
3554  * use the mount option value. If the value specified at mount time is
3555  * greater than the blocks per group use the super block value.
3556  * If the super block value is greater than blocks per group return 0.
3557  * Allocator needs it be less than blocks per group.
3558  *
3559  */
3560 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
3561 {
3562 	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
3563 	unsigned long stripe_width =
3564 			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
3565 	int ret;
3566 
3567 	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
3568 		ret = sbi->s_stripe;
3569 	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
3570 		ret = stripe_width;
3571 	else if (stride && stride <= sbi->s_blocks_per_group)
3572 		ret = stride;
3573 	else
3574 		ret = 0;
3575 
3576 	/*
3577 	 * If the stripe width is 1, this makes no sense and
3578 	 * we set it to 0 to turn off stripe handling code.
3579 	 */
3580 	if (ret <= 1)
3581 		ret = 0;
3582 
3583 	return ret;
3584 }
3585 
3586 /*
3587  * Check whether this filesystem can be mounted based on
3588  * the features present and the RDONLY/RDWR mount requested.
3589  * Returns 1 if this filesystem can be mounted as requested,
3590  * 0 if it cannot be.
3591  */
3592 int ext4_feature_set_ok(struct super_block *sb, int readonly)
3593 {
3594 	if (ext4_has_unknown_ext4_incompat_features(sb)) {
3595 		ext4_msg(sb, KERN_ERR,
3596 			"Couldn't mount because of "
3597 			"unsupported optional features (%x)",
3598 			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
3599 			~EXT4_FEATURE_INCOMPAT_SUPP));
3600 		return 0;
3601 	}
3602 
3603 	if (!IS_ENABLED(CONFIG_UNICODE) && ext4_has_feature_casefold(sb)) {
3604 		ext4_msg(sb, KERN_ERR,
3605 			 "Filesystem with casefold feature cannot be "
3606 			 "mounted without CONFIG_UNICODE");
3607 		return 0;
3608 	}
3609 
3610 	if (readonly)
3611 		return 1;
3612 
3613 	if (ext4_has_feature_readonly(sb)) {
3614 		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3615 		sb->s_flags |= SB_RDONLY;
3616 		return 1;
3617 	}
3618 
3619 	/* Check that feature set is OK for a read-write mount */
3620 	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3621 		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
3622 			 "unsupported optional features (%x)",
3623 			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
3624 				~EXT4_FEATURE_RO_COMPAT_SUPP));
3625 		return 0;
3626 	}
3627 	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3628 		ext4_msg(sb, KERN_ERR,
3629 			 "Can't support bigalloc feature without "
3630 			 "extents feature\n");
3631 		return 0;
3632 	}
3633 
3634 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3635 	if (!readonly && (ext4_has_feature_quota(sb) ||
3636 			  ext4_has_feature_project(sb))) {
3637 		ext4_msg(sb, KERN_ERR,
3638 			 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
3639 		return 0;
3640 	}
3641 #endif  /* CONFIG_QUOTA */
3642 	return 1;
3643 }
3644 
3645 /*
3646  * This function is called once a day if we have errors logged
3647  * on the file system
3648  */
3649 static void print_daily_error_info(struct timer_list *t)
3650 {
3651 	struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report);
3652 	struct super_block *sb = sbi->s_sb;
3653 	struct ext4_super_block *es = sbi->s_es;
3654 
3655 	if (es->s_error_count)
3656 		/* fsck newer than v1.41.13 is needed to clean this condition. */
3657 		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3658 			 le32_to_cpu(es->s_error_count));
3659 	if (es->s_first_error_time) {
3660 		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3661 		       sb->s_id,
3662 		       ext4_get_tstamp(es, s_first_error_time),
3663 		       (int) sizeof(es->s_first_error_func),
3664 		       es->s_first_error_func,
3665 		       le32_to_cpu(es->s_first_error_line));
3666 		if (es->s_first_error_ino)
3667 			printk(KERN_CONT ": inode %u",
3668 			       le32_to_cpu(es->s_first_error_ino));
3669 		if (es->s_first_error_block)
3670 			printk(KERN_CONT ": block %llu", (unsigned long long)
3671 			       le64_to_cpu(es->s_first_error_block));
3672 		printk(KERN_CONT "\n");
3673 	}
3674 	if (es->s_last_error_time) {
3675 		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
3676 		       sb->s_id,
3677 		       ext4_get_tstamp(es, s_last_error_time),
3678 		       (int) sizeof(es->s_last_error_func),
3679 		       es->s_last_error_func,
3680 		       le32_to_cpu(es->s_last_error_line));
3681 		if (es->s_last_error_ino)
3682 			printk(KERN_CONT ": inode %u",
3683 			       le32_to_cpu(es->s_last_error_ino));
3684 		if (es->s_last_error_block)
3685 			printk(KERN_CONT ": block %llu", (unsigned long long)
3686 			       le64_to_cpu(es->s_last_error_block));
3687 		printk(KERN_CONT "\n");
3688 	}
3689 	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
3690 }
3691 
3692 /* Find next suitable group and run ext4_init_inode_table */
3693 static int ext4_run_li_request(struct ext4_li_request *elr)
3694 {
3695 	struct ext4_group_desc *gdp = NULL;
3696 	struct super_block *sb = elr->lr_super;
3697 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3698 	ext4_group_t group = elr->lr_next_group;
3699 	unsigned int prefetch_ios = 0;
3700 	int ret = 0;
3701 	int nr = EXT4_SB(sb)->s_mb_prefetch;
3702 	u64 start_time;
3703 
3704 	if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
3705 		elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios);
3706 		ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr);
3707 		trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr);
3708 		if (group >= elr->lr_next_group) {
3709 			ret = 1;
3710 			if (elr->lr_first_not_zeroed != ngroups &&
3711 			    !ext4_emergency_state(sb) && !sb_rdonly(sb) &&
3712 			    test_opt(sb, INIT_INODE_TABLE)) {
3713 				elr->lr_next_group = elr->lr_first_not_zeroed;
3714 				elr->lr_mode = EXT4_LI_MODE_ITABLE;
3715 				ret = 0;
3716 			}
3717 		}
3718 		return ret;
3719 	}
3720 
3721 	for (; group < ngroups; group++) {
3722 		gdp = ext4_get_group_desc(sb, group, NULL);
3723 		if (!gdp) {
3724 			ret = 1;
3725 			break;
3726 		}
3727 
3728 		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3729 			break;
3730 	}
3731 
3732 	if (group >= ngroups)
3733 		ret = 1;
3734 
3735 	if (!ret) {
3736 		start_time = ktime_get_ns();
3737 		ret = ext4_init_inode_table(sb, group,
3738 					    elr->lr_timeout ? 0 : 1);
3739 		trace_ext4_lazy_itable_init(sb, group);
3740 		if (elr->lr_timeout == 0) {
3741 			elr->lr_timeout = nsecs_to_jiffies((ktime_get_ns() - start_time) *
3742 				EXT4_SB(elr->lr_super)->s_li_wait_mult);
3743 		}
3744 		elr->lr_next_sched = jiffies + elr->lr_timeout;
3745 		elr->lr_next_group = group + 1;
3746 	}
3747 	return ret;
3748 }
3749 
3750 /*
3751  * Remove lr_request from the list_request and free the
3752  * request structure. Should be called with li_list_mtx held
3753  */
3754 static void ext4_remove_li_request(struct ext4_li_request *elr)
3755 {
3756 	if (!elr)
3757 		return;
3758 
3759 	list_del(&elr->lr_request);
3760 	EXT4_SB(elr->lr_super)->s_li_request = NULL;
3761 	kfree(elr);
3762 }
3763 
3764 static void ext4_unregister_li_request(struct super_block *sb)
3765 {
3766 	mutex_lock(&ext4_li_mtx);
3767 	if (!ext4_li_info) {
3768 		mutex_unlock(&ext4_li_mtx);
3769 		return;
3770 	}
3771 
3772 	mutex_lock(&ext4_li_info->li_list_mtx);
3773 	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3774 	mutex_unlock(&ext4_li_info->li_list_mtx);
3775 	mutex_unlock(&ext4_li_mtx);
3776 }
3777 
3778 static struct task_struct *ext4_lazyinit_task;
3779 
3780 /*
3781  * This is the function where ext4lazyinit thread lives. It walks
3782  * through the request list searching for next scheduled filesystem.
3783  * When such a fs is found, run the lazy initialization request
3784  * (ext4_rn_li_request) and keep track of the time spend in this
3785  * function. Based on that time we compute next schedule time of
3786  * the request. When walking through the list is complete, compute
3787  * next waking time and put itself into sleep.
3788  */
3789 static int ext4_lazyinit_thread(void *arg)
3790 {
3791 	struct ext4_lazy_init *eli = arg;
3792 	struct list_head *pos, *n;
3793 	struct ext4_li_request *elr;
3794 	unsigned long next_wakeup, cur;
3795 
3796 	BUG_ON(NULL == eli);
3797 	set_freezable();
3798 
3799 cont_thread:
3800 	while (true) {
3801 		bool next_wakeup_initialized = false;
3802 
3803 		next_wakeup = 0;
3804 		mutex_lock(&eli->li_list_mtx);
3805 		if (list_empty(&eli->li_request_list)) {
3806 			mutex_unlock(&eli->li_list_mtx);
3807 			goto exit_thread;
3808 		}
3809 		list_for_each_safe(pos, n, &eli->li_request_list) {
3810 			int err = 0;
3811 			int progress = 0;
3812 			elr = list_entry(pos, struct ext4_li_request,
3813 					 lr_request);
3814 
3815 			if (time_before(jiffies, elr->lr_next_sched)) {
3816 				if (!next_wakeup_initialized ||
3817 				    time_before(elr->lr_next_sched, next_wakeup)) {
3818 					next_wakeup = elr->lr_next_sched;
3819 					next_wakeup_initialized = true;
3820 				}
3821 				continue;
3822 			}
3823 			if (down_read_trylock(&elr->lr_super->s_umount)) {
3824 				if (sb_start_write_trylock(elr->lr_super)) {
3825 					progress = 1;
3826 					/*
3827 					 * We hold sb->s_umount, sb can not
3828 					 * be removed from the list, it is
3829 					 * now safe to drop li_list_mtx
3830 					 */
3831 					mutex_unlock(&eli->li_list_mtx);
3832 					err = ext4_run_li_request(elr);
3833 					sb_end_write(elr->lr_super);
3834 					mutex_lock(&eli->li_list_mtx);
3835 					n = pos->next;
3836 				}
3837 				up_read((&elr->lr_super->s_umount));
3838 			}
3839 			/* error, remove the lazy_init job */
3840 			if (err) {
3841 				ext4_remove_li_request(elr);
3842 				continue;
3843 			}
3844 			if (!progress) {
3845 				elr->lr_next_sched = jiffies +
3846 					get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
3847 			}
3848 			if (!next_wakeup_initialized ||
3849 			    time_before(elr->lr_next_sched, next_wakeup)) {
3850 				next_wakeup = elr->lr_next_sched;
3851 				next_wakeup_initialized = true;
3852 			}
3853 		}
3854 		mutex_unlock(&eli->li_list_mtx);
3855 
3856 		try_to_freeze();
3857 
3858 		cur = jiffies;
3859 		if (!next_wakeup_initialized || time_after_eq(cur, next_wakeup)) {
3860 			cond_resched();
3861 			continue;
3862 		}
3863 
3864 		schedule_timeout_interruptible(next_wakeup - cur);
3865 
3866 		if (kthread_should_stop()) {
3867 			ext4_clear_request_list();
3868 			goto exit_thread;
3869 		}
3870 	}
3871 
3872 exit_thread:
3873 	/*
3874 	 * It looks like the request list is empty, but we need
3875 	 * to check it under the li_list_mtx lock, to prevent any
3876 	 * additions into it, and of course we should lock ext4_li_mtx
3877 	 * to atomically free the list and ext4_li_info, because at
3878 	 * this point another ext4 filesystem could be registering
3879 	 * new one.
3880 	 */
3881 	mutex_lock(&ext4_li_mtx);
3882 	mutex_lock(&eli->li_list_mtx);
3883 	if (!list_empty(&eli->li_request_list)) {
3884 		mutex_unlock(&eli->li_list_mtx);
3885 		mutex_unlock(&ext4_li_mtx);
3886 		goto cont_thread;
3887 	}
3888 	mutex_unlock(&eli->li_list_mtx);
3889 	kfree(ext4_li_info);
3890 	ext4_li_info = NULL;
3891 	mutex_unlock(&ext4_li_mtx);
3892 
3893 	return 0;
3894 }
3895 
3896 static void ext4_clear_request_list(void)
3897 {
3898 	struct list_head *pos, *n;
3899 	struct ext4_li_request *elr;
3900 
3901 	mutex_lock(&ext4_li_info->li_list_mtx);
3902 	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
3903 		elr = list_entry(pos, struct ext4_li_request,
3904 				 lr_request);
3905 		ext4_remove_li_request(elr);
3906 	}
3907 	mutex_unlock(&ext4_li_info->li_list_mtx);
3908 }
3909 
3910 static int ext4_run_lazyinit_thread(void)
3911 {
3912 	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3913 					 ext4_li_info, "ext4lazyinit");
3914 	if (IS_ERR(ext4_lazyinit_task)) {
3915 		int err = PTR_ERR(ext4_lazyinit_task);
3916 		ext4_clear_request_list();
3917 		kfree(ext4_li_info);
3918 		ext4_li_info = NULL;
3919 		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3920 				 "initialization thread\n",
3921 				 err);
3922 		return err;
3923 	}
3924 	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3925 	return 0;
3926 }
3927 
3928 /*
3929  * Check whether it make sense to run itable init. thread or not.
3930  * If there is at least one uninitialized inode table, return
3931  * corresponding group number, else the loop goes through all
3932  * groups and return total number of groups.
3933  */
3934 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3935 {
3936 	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3937 	struct ext4_group_desc *gdp = NULL;
3938 
3939 	if (!ext4_has_group_desc_csum(sb))
3940 		return ngroups;
3941 
3942 	for (group = 0; group < ngroups; group++) {
3943 		gdp = ext4_get_group_desc(sb, group, NULL);
3944 		if (!gdp)
3945 			continue;
3946 
3947 		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3948 			break;
3949 	}
3950 
3951 	return group;
3952 }
3953 
3954 static int ext4_li_info_new(void)
3955 {
3956 	struct ext4_lazy_init *eli = NULL;
3957 
3958 	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
3959 	if (!eli)
3960 		return -ENOMEM;
3961 
3962 	INIT_LIST_HEAD(&eli->li_request_list);
3963 	mutex_init(&eli->li_list_mtx);
3964 
3965 	eli->li_state |= EXT4_LAZYINIT_QUIT;
3966 
3967 	ext4_li_info = eli;
3968 
3969 	return 0;
3970 }
3971 
3972 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3973 					    ext4_group_t start)
3974 {
3975 	struct ext4_li_request *elr;
3976 
3977 	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
3978 	if (!elr)
3979 		return NULL;
3980 
3981 	elr->lr_super = sb;
3982 	elr->lr_first_not_zeroed = start;
3983 	if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) {
3984 		elr->lr_mode = EXT4_LI_MODE_ITABLE;
3985 		elr->lr_next_group = start;
3986 	} else {
3987 		elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
3988 	}
3989 
3990 	/*
3991 	 * Randomize first schedule time of the request to
3992 	 * spread the inode table initialization requests
3993 	 * better.
3994 	 */
3995 	elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
3996 	return elr;
3997 }
3998 
3999 int ext4_register_li_request(struct super_block *sb,
4000 			     ext4_group_t first_not_zeroed)
4001 {
4002 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4003 	struct ext4_li_request *elr = NULL;
4004 	ext4_group_t ngroups = sbi->s_groups_count;
4005 	int ret = 0;
4006 
4007 	mutex_lock(&ext4_li_mtx);
4008 	if (sbi->s_li_request != NULL) {
4009 		/*
4010 		 * Reset timeout so it can be computed again, because
4011 		 * s_li_wait_mult might have changed.
4012 		 */
4013 		sbi->s_li_request->lr_timeout = 0;
4014 		goto out;
4015 	}
4016 
4017 	if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
4018 	    (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
4019 	     (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE))))
4020 		goto out;
4021 
4022 	elr = ext4_li_request_new(sb, first_not_zeroed);
4023 	if (!elr) {
4024 		ret = -ENOMEM;
4025 		goto out;
4026 	}
4027 
4028 	if (NULL == ext4_li_info) {
4029 		ret = ext4_li_info_new();
4030 		if (ret)
4031 			goto out;
4032 	}
4033 
4034 	mutex_lock(&ext4_li_info->li_list_mtx);
4035 	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
4036 	mutex_unlock(&ext4_li_info->li_list_mtx);
4037 
4038 	sbi->s_li_request = elr;
4039 	/*
4040 	 * set elr to NULL here since it has been inserted to
4041 	 * the request_list and the removal and free of it is
4042 	 * handled by ext4_clear_request_list from now on.
4043 	 */
4044 	elr = NULL;
4045 
4046 	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
4047 		ret = ext4_run_lazyinit_thread();
4048 		if (ret)
4049 			goto out;
4050 	}
4051 out:
4052 	mutex_unlock(&ext4_li_mtx);
4053 	if (ret)
4054 		kfree(elr);
4055 	return ret;
4056 }
4057 
4058 /*
4059  * We do not need to lock anything since this is called on
4060  * module unload.
4061  */
4062 static void ext4_destroy_lazyinit_thread(void)
4063 {
4064 	/*
4065 	 * If thread exited earlier
4066 	 * there's nothing to be done.
4067 	 */
4068 	if (!ext4_li_info || !ext4_lazyinit_task)
4069 		return;
4070 
4071 	kthread_stop(ext4_lazyinit_task);
4072 }
4073 
4074 static int set_journal_csum_feature_set(struct super_block *sb)
4075 {
4076 	int ret = 1;
4077 	int compat, incompat;
4078 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4079 
4080 	if (ext4_has_feature_metadata_csum(sb)) {
4081 		/* journal checksum v3 */
4082 		compat = 0;
4083 		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
4084 	} else {
4085 		/* journal checksum v1 */
4086 		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
4087 		incompat = 0;
4088 	}
4089 
4090 	jbd2_journal_clear_features(sbi->s_journal,
4091 			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
4092 			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
4093 			JBD2_FEATURE_INCOMPAT_CSUM_V2);
4094 	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4095 		ret = jbd2_journal_set_features(sbi->s_journal,
4096 				compat, 0,
4097 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
4098 				incompat);
4099 	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
4100 		ret = jbd2_journal_set_features(sbi->s_journal,
4101 				compat, 0,
4102 				incompat);
4103 		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
4104 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
4105 	} else {
4106 		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
4107 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
4108 	}
4109 
4110 	return ret;
4111 }
4112 
4113 /*
4114  * Note: calculating the overhead so we can be compatible with
4115  * historical BSD practice is quite difficult in the face of
4116  * clusters/bigalloc.  This is because multiple metadata blocks from
4117  * different block group can end up in the same allocation cluster.
4118  * Calculating the exact overhead in the face of clustered allocation
4119  * requires either O(all block bitmaps) in memory or O(number of block
4120  * groups**2) in time.  We will still calculate the superblock for
4121  * older file systems --- and if we come across with a bigalloc file
4122  * system with zero in s_overhead_clusters the estimate will be close to
4123  * correct especially for very large cluster sizes --- but for newer
4124  * file systems, it's better to calculate this figure once at mkfs
4125  * time, and store it in the superblock.  If the superblock value is
4126  * present (even for non-bigalloc file systems), we will use it.
4127  */
4128 static int count_overhead(struct super_block *sb, ext4_group_t grp,
4129 			  char *buf)
4130 {
4131 	struct ext4_sb_info	*sbi = EXT4_SB(sb);
4132 	struct ext4_group_desc	*gdp;
4133 	ext4_fsblk_t		first_block, last_block, b;
4134 	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
4135 	int			s, j, count = 0;
4136 	int			has_super = ext4_bg_has_super(sb, grp);
4137 
4138 	if (!ext4_has_feature_bigalloc(sb))
4139 		return (has_super + ext4_bg_num_gdb(sb, grp) +
4140 			(has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
4141 			sbi->s_itb_per_group + 2);
4142 
4143 	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
4144 		(grp * EXT4_BLOCKS_PER_GROUP(sb));
4145 	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
4146 	for (i = 0; i < ngroups; i++) {
4147 		gdp = ext4_get_group_desc(sb, i, NULL);
4148 		b = ext4_block_bitmap(sb, gdp);
4149 		if (b >= first_block && b <= last_block) {
4150 			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
4151 			count++;
4152 		}
4153 		b = ext4_inode_bitmap(sb, gdp);
4154 		if (b >= first_block && b <= last_block) {
4155 			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
4156 			count++;
4157 		}
4158 		b = ext4_inode_table(sb, gdp);
4159 		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
4160 			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
4161 				int c = EXT4_B2C(sbi, b - first_block);
4162 				ext4_set_bit(c, buf);
4163 				count++;
4164 			}
4165 		if (i != grp)
4166 			continue;
4167 		s = 0;
4168 		if (ext4_bg_has_super(sb, grp)) {
4169 			ext4_set_bit(s++, buf);
4170 			count++;
4171 		}
4172 		j = ext4_bg_num_gdb(sb, grp);
4173 		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
4174 			ext4_error(sb, "Invalid number of block group "
4175 				   "descriptor blocks: %d", j);
4176 			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
4177 		}
4178 		count += j;
4179 		for (; j > 0; j--)
4180 			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
4181 	}
4182 	if (!count)
4183 		return 0;
4184 	return EXT4_CLUSTERS_PER_GROUP(sb) -
4185 		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
4186 }
4187 
4188 /*
4189  * Compute the overhead and stash it in sbi->s_overhead
4190  */
4191 int ext4_calculate_overhead(struct super_block *sb)
4192 {
4193 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4194 	struct ext4_super_block *es = sbi->s_es;
4195 	struct inode *j_inode;
4196 	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
4197 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4198 	ext4_fsblk_t overhead = 0;
4199 	char *buf = kvmalloc(sb->s_blocksize, GFP_NOFS | __GFP_ZERO);
4200 
4201 	if (!buf)
4202 		return -ENOMEM;
4203 
4204 	/*
4205 	 * Compute the overhead (FS structures).  This is constant
4206 	 * for a given filesystem unless the number of block groups
4207 	 * changes so we cache the previous value until it does.
4208 	 */
4209 
4210 	/*
4211 	 * All of the blocks before first_data_block are overhead
4212 	 */
4213 	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
4214 
4215 	/*
4216 	 * Add the overhead found in each block group
4217 	 */
4218 	for (i = 0; i < ngroups; i++) {
4219 		int blks;
4220 
4221 		blks = count_overhead(sb, i, buf);
4222 		overhead += blks;
4223 		if (blks)
4224 			memset(buf, 0, sb->s_blocksize);
4225 		cond_resched();
4226 	}
4227 
4228 	/*
4229 	 * Add the internal journal blocks whether the journal has been
4230 	 * loaded or not
4231 	 */
4232 	if (sbi->s_journal && !sbi->s_journal_bdev_file)
4233 		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
4234 	else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
4235 		/* j_inum for internal journal is non-zero */
4236 		j_inode = ext4_get_journal_inode(sb, j_inum);
4237 		if (!IS_ERR(j_inode)) {
4238 			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
4239 			overhead += EXT4_NUM_B2C(sbi, j_blocks);
4240 			iput(j_inode);
4241 		} else {
4242 			ext4_msg(sb, KERN_ERR, "can't get journal size");
4243 		}
4244 	}
4245 	sbi->s_overhead = overhead;
4246 	smp_wmb();
4247 	kvfree(buf);
4248 	return 0;
4249 }
4250 
4251 static void ext4_set_resv_clusters(struct super_block *sb)
4252 {
4253 	ext4_fsblk_t resv_clusters;
4254 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4255 
4256 	/*
4257 	 * There's no need to reserve anything when we aren't using extents.
4258 	 * The space estimates are exact, there are no unwritten extents,
4259 	 * hole punching doesn't need new metadata... This is needed especially
4260 	 * to keep ext2/3 backward compatibility.
4261 	 */
4262 	if (!ext4_has_feature_extents(sb))
4263 		return;
4264 	/*
4265 	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
4266 	 * This should cover the situations where we can not afford to run
4267 	 * out of space like for example punch hole, or converting
4268 	 * unwritten extents in delalloc path. In most cases such
4269 	 * allocation would require 1, or 2 blocks, higher numbers are
4270 	 * very rare.
4271 	 */
4272 	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
4273 			 sbi->s_cluster_bits);
4274 
4275 	do_div(resv_clusters, 50);
4276 	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
4277 
4278 	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
4279 }
4280 
4281 static const char *ext4_quota_mode(struct super_block *sb)
4282 {
4283 #ifdef CONFIG_QUOTA
4284 	if (!ext4_quota_capable(sb))
4285 		return "none";
4286 
4287 	if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb))
4288 		return "journalled";
4289 	else
4290 		return "writeback";
4291 #else
4292 	return "disabled";
4293 #endif
4294 }
4295 
4296 static void ext4_setup_csum_trigger(struct super_block *sb,
4297 				    enum ext4_journal_trigger_type type,
4298 				    void (*trigger)(
4299 					struct jbd2_buffer_trigger_type *type,
4300 					struct buffer_head *bh,
4301 					void *mapped_data,
4302 					size_t size))
4303 {
4304 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4305 
4306 	sbi->s_journal_triggers[type].sb = sb;
4307 	sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger;
4308 }
4309 
4310 static void ext4_free_sbi(struct ext4_sb_info *sbi)
4311 {
4312 	if (!sbi)
4313 		return;
4314 
4315 	kfree(sbi->s_blockgroup_lock);
4316 	fs_put_dax(sbi->s_daxdev, NULL);
4317 	kfree(sbi);
4318 }
4319 
4320 static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
4321 {
4322 	struct ext4_sb_info *sbi;
4323 
4324 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
4325 	if (!sbi)
4326 		return NULL;
4327 
4328 	sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
4329 					   NULL, NULL);
4330 
4331 	sbi->s_blockgroup_lock =
4332 		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
4333 
4334 	if (!sbi->s_blockgroup_lock)
4335 		goto err_out;
4336 
4337 	sb->s_fs_info = sbi;
4338 	sbi->s_sb = sb;
4339 	return sbi;
4340 err_out:
4341 	fs_put_dax(sbi->s_daxdev, NULL);
4342 	kfree(sbi);
4343 	return NULL;
4344 }
4345 
4346 static void ext4_set_def_opts(struct super_block *sb,
4347 			      struct ext4_super_block *es)
4348 {
4349 	unsigned long def_mount_opts;
4350 
4351 	/* Set defaults before we parse the mount options */
4352 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
4353 	set_opt(sb, INIT_INODE_TABLE);
4354 	if (def_mount_opts & EXT4_DEFM_DEBUG)
4355 		set_opt(sb, DEBUG);
4356 	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
4357 		set_opt(sb, GRPID);
4358 	if (def_mount_opts & EXT4_DEFM_UID16)
4359 		set_opt(sb, NO_UID32);
4360 	/* xattr user namespace & acls are now defaulted on */
4361 	set_opt(sb, XATTR_USER);
4362 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4363 	set_opt(sb, POSIX_ACL);
4364 #endif
4365 	if (ext4_has_feature_fast_commit(sb))
4366 		set_opt2(sb, JOURNAL_FAST_COMMIT);
4367 	/* don't forget to enable journal_csum when metadata_csum is enabled. */
4368 	if (ext4_has_feature_metadata_csum(sb))
4369 		set_opt(sb, JOURNAL_CHECKSUM);
4370 
4371 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4372 		set_opt(sb, JOURNAL_DATA);
4373 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4374 		set_opt(sb, ORDERED_DATA);
4375 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4376 		set_opt(sb, WRITEBACK_DATA);
4377 
4378 	if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC)
4379 		set_opt(sb, ERRORS_PANIC);
4380 	else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE)
4381 		set_opt(sb, ERRORS_CONT);
4382 	else
4383 		set_opt(sb, ERRORS_RO);
4384 	/* block_validity enabled by default; disable with noblock_validity */
4385 	set_opt(sb, BLOCK_VALIDITY);
4386 	if (def_mount_opts & EXT4_DEFM_DISCARD)
4387 		set_opt(sb, DISCARD);
4388 
4389 	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4390 		set_opt(sb, BARRIER);
4391 
4392 	/*
4393 	 * enable delayed allocation by default
4394 	 * Use -o nodelalloc to turn it off
4395 	 */
4396 	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4397 	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4398 		set_opt(sb, DELALLOC);
4399 
4400 	set_opt(sb, DIOREAD_NOLOCK);
4401 }
4402 
4403 static int ext4_handle_clustersize(struct super_block *sb)
4404 {
4405 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4406 	struct ext4_super_block *es = sbi->s_es;
4407 	int clustersize;
4408 
4409 	/* Handle clustersize */
4410 	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4411 	if (ext4_has_feature_bigalloc(sb)) {
4412 		if (clustersize < sb->s_blocksize) {
4413 			ext4_msg(sb, KERN_ERR,
4414 				 "cluster size (%d) smaller than "
4415 				 "block size (%lu)", clustersize, sb->s_blocksize);
4416 			return -EINVAL;
4417 		}
4418 		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
4419 			le32_to_cpu(es->s_log_block_size);
4420 	} else {
4421 		if (clustersize != sb->s_blocksize) {
4422 			ext4_msg(sb, KERN_ERR,
4423 				 "fragment/cluster size (%d) != "
4424 				 "block size (%lu)", clustersize, sb->s_blocksize);
4425 			return -EINVAL;
4426 		}
4427 		if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
4428 			ext4_msg(sb, KERN_ERR,
4429 				 "#blocks per group too big: %lu",
4430 				 sbi->s_blocks_per_group);
4431 			return -EINVAL;
4432 		}
4433 		sbi->s_cluster_bits = 0;
4434 	}
4435 	sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group);
4436 	if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
4437 		ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu",
4438 			 sbi->s_clusters_per_group);
4439 		return -EINVAL;
4440 	}
4441 	if (sbi->s_blocks_per_group !=
4442 	    (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
4443 		ext4_msg(sb, KERN_ERR,
4444 			 "blocks per group (%lu) and clusters per group (%lu) inconsistent",
4445 			 sbi->s_blocks_per_group, sbi->s_clusters_per_group);
4446 		return -EINVAL;
4447 	}
4448 	sbi->s_cluster_ratio = clustersize / sb->s_blocksize;
4449 
4450 	/* Do we have standard group size of clustersize * 8 blocks ? */
4451 	if (sbi->s_blocks_per_group == clustersize << 3)
4452 		set_opt2(sb, STD_GROUP_SIZE);
4453 
4454 	return 0;
4455 }
4456 
4457 /*
4458  * ext4_atomic_write_init: Initializes filesystem min & max atomic write units.
4459  * With non-bigalloc filesystem awu will be based upon filesystem blocksize
4460  * & bdev awu units.
4461  * With bigalloc it will be based upon bigalloc cluster size & bdev awu units.
4462  * @sb: super block
4463  */
4464 static void ext4_atomic_write_init(struct super_block *sb)
4465 {
4466 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4467 	struct block_device *bdev = sb->s_bdev;
4468 	unsigned int clustersize = EXT4_CLUSTER_SIZE(sb);
4469 
4470 	if (!bdev_can_atomic_write(bdev))
4471 		return;
4472 
4473 	if (!ext4_has_feature_extents(sb))
4474 		return;
4475 
4476 	sbi->s_awu_min = max(sb->s_blocksize,
4477 			      bdev_atomic_write_unit_min_bytes(bdev));
4478 	sbi->s_awu_max = min(clustersize,
4479 			      bdev_atomic_write_unit_max_bytes(bdev));
4480 	if (sbi->s_awu_min && sbi->s_awu_max &&
4481 	    sbi->s_awu_min <= sbi->s_awu_max) {
4482 		ext4_msg(sb, KERN_NOTICE, "Supports (experimental) DIO atomic writes awu_min: %u, awu_max: %u",
4483 			 sbi->s_awu_min, sbi->s_awu_max);
4484 	} else {
4485 		sbi->s_awu_min = 0;
4486 		sbi->s_awu_max = 0;
4487 	}
4488 }
4489 
4490 static void ext4_fast_commit_init(struct super_block *sb)
4491 {
4492 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4493 
4494 	/* Initialize fast commit stuff */
4495 	atomic_set(&sbi->s_fc_subtid, 0);
4496 	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
4497 	INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
4498 	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
4499 	INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
4500 	sbi->s_fc_bytes = 0;
4501 	ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
4502 	sbi->s_fc_ineligible_tid = 0;
4503 	mutex_init(&sbi->s_fc_lock);
4504 	memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4505 	sbi->s_fc_replay_state.fc_regions = NULL;
4506 	sbi->s_fc_replay_state.fc_regions_size = 0;
4507 	sbi->s_fc_replay_state.fc_regions_used = 0;
4508 	sbi->s_fc_replay_state.fc_regions_valid = 0;
4509 	sbi->s_fc_replay_state.fc_modified_inodes = NULL;
4510 	sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
4511 	sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4512 }
4513 
4514 static int ext4_inode_info_init(struct super_block *sb,
4515 				struct ext4_super_block *es)
4516 {
4517 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4518 
4519 	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
4520 		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
4521 		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
4522 	} else {
4523 		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
4524 		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
4525 		if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
4526 			ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
4527 				 sbi->s_first_ino);
4528 			return -EINVAL;
4529 		}
4530 		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
4531 		    (!is_power_of_2(sbi->s_inode_size)) ||
4532 		    (sbi->s_inode_size > sb->s_blocksize)) {
4533 			ext4_msg(sb, KERN_ERR,
4534 			       "unsupported inode size: %d",
4535 			       sbi->s_inode_size);
4536 			ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize);
4537 			return -EINVAL;
4538 		}
4539 		/*
4540 		 * i_atime_extra is the last extra field available for
4541 		 * [acm]times in struct ext4_inode. Checking for that
4542 		 * field should suffice to ensure we have extra space
4543 		 * for all three.
4544 		 */
4545 		if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
4546 			sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
4547 			sb->s_time_gran = 1;
4548 			sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
4549 		} else {
4550 			sb->s_time_gran = NSEC_PER_SEC;
4551 			sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
4552 		}
4553 		sb->s_time_min = EXT4_TIMESTAMP_MIN;
4554 	}
4555 
4556 	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
4557 		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4558 			EXT4_GOOD_OLD_INODE_SIZE;
4559 		if (ext4_has_feature_extra_isize(sb)) {
4560 			unsigned v, max = (sbi->s_inode_size -
4561 					   EXT4_GOOD_OLD_INODE_SIZE);
4562 
4563 			v = le16_to_cpu(es->s_want_extra_isize);
4564 			if (v > max) {
4565 				ext4_msg(sb, KERN_ERR,
4566 					 "bad s_want_extra_isize: %d", v);
4567 				return -EINVAL;
4568 			}
4569 			if (sbi->s_want_extra_isize < v)
4570 				sbi->s_want_extra_isize = v;
4571 
4572 			v = le16_to_cpu(es->s_min_extra_isize);
4573 			if (v > max) {
4574 				ext4_msg(sb, KERN_ERR,
4575 					 "bad s_min_extra_isize: %d", v);
4576 				return -EINVAL;
4577 			}
4578 			if (sbi->s_want_extra_isize < v)
4579 				sbi->s_want_extra_isize = v;
4580 		}
4581 	}
4582 
4583 	return 0;
4584 }
4585 
4586 #if IS_ENABLED(CONFIG_UNICODE)
4587 static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
4588 {
4589 	const struct ext4_sb_encodings *encoding_info;
4590 	struct unicode_map *encoding;
4591 	__u16 encoding_flags = le16_to_cpu(es->s_encoding_flags);
4592 
4593 	if (!ext4_has_feature_casefold(sb) || sb->s_encoding)
4594 		return 0;
4595 
4596 	encoding_info = ext4_sb_read_encoding(es);
4597 	if (!encoding_info) {
4598 		ext4_msg(sb, KERN_ERR,
4599 			"Encoding requested by superblock is unknown");
4600 		return -EINVAL;
4601 	}
4602 
4603 	encoding = utf8_load(encoding_info->version);
4604 	if (IS_ERR(encoding)) {
4605 		ext4_msg(sb, KERN_ERR,
4606 			"can't mount with superblock charset: %s-%u.%u.%u "
4607 			"not supported by the kernel. flags: 0x%x.",
4608 			encoding_info->name,
4609 			unicode_major(encoding_info->version),
4610 			unicode_minor(encoding_info->version),
4611 			unicode_rev(encoding_info->version),
4612 			encoding_flags);
4613 		return -EINVAL;
4614 	}
4615 	ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
4616 		"%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4617 		unicode_major(encoding_info->version),
4618 		unicode_minor(encoding_info->version),
4619 		unicode_rev(encoding_info->version),
4620 		encoding_flags);
4621 
4622 	sb->s_encoding = encoding;
4623 	sb->s_encoding_flags = encoding_flags;
4624 
4625 	return 0;
4626 }
4627 #else
4628 static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
4629 {
4630 	return 0;
4631 }
4632 #endif
4633 
4634 static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es)
4635 {
4636 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4637 
4638 	/* Warn if metadata_csum and gdt_csum are both set. */
4639 	if (ext4_has_feature_metadata_csum(sb) &&
4640 	    ext4_has_feature_gdt_csum(sb))
4641 		ext4_warning(sb, "metadata_csum and uninit_bg are "
4642 			     "redundant flags; please run fsck.");
4643 
4644 	/* Check for a known checksum algorithm */
4645 	if (!ext4_verify_csum_type(sb, es)) {
4646 		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4647 			 "unknown checksum algorithm.");
4648 		return -EINVAL;
4649 	}
4650 	ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
4651 				ext4_orphan_file_block_trigger);
4652 
4653 	/* Check superblock checksum */
4654 	if (!ext4_superblock_csum_verify(sb, es)) {
4655 		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4656 			 "invalid superblock checksum.  Run e2fsck?");
4657 		return -EFSBADCRC;
4658 	}
4659 
4660 	/* Precompute checksum seed for all metadata */
4661 	if (ext4_has_feature_csum_seed(sb))
4662 		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
4663 	else if (ext4_has_feature_metadata_csum(sb) ||
4664 		 ext4_has_feature_ea_inode(sb))
4665 		sbi->s_csum_seed = ext4_chksum(~0, es->s_uuid,
4666 					       sizeof(es->s_uuid));
4667 	return 0;
4668 }
4669 
4670 static int ext4_check_feature_compatibility(struct super_block *sb,
4671 					    struct ext4_super_block *es,
4672 					    int silent)
4673 {
4674 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4675 
4676 	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4677 	    (ext4_has_compat_features(sb) ||
4678 	     ext4_has_ro_compat_features(sb) ||
4679 	     ext4_has_incompat_features(sb)))
4680 		ext4_msg(sb, KERN_WARNING,
4681 		       "feature flags set on rev 0 fs, "
4682 		       "running e2fsck is recommended");
4683 
4684 	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
4685 		set_opt2(sb, HURD_COMPAT);
4686 		if (ext4_has_feature_64bit(sb)) {
4687 			ext4_msg(sb, KERN_ERR,
4688 				 "The Hurd can't support 64-bit file systems");
4689 			return -EINVAL;
4690 		}
4691 
4692 		/*
4693 		 * ea_inode feature uses l_i_version field which is not
4694 		 * available in HURD_COMPAT mode.
4695 		 */
4696 		if (ext4_has_feature_ea_inode(sb)) {
4697 			ext4_msg(sb, KERN_ERR,
4698 				 "ea_inode feature is not supported for Hurd");
4699 			return -EINVAL;
4700 		}
4701 	}
4702 
4703 	if (IS_EXT2_SB(sb)) {
4704 		if (ext2_feature_set_ok(sb))
4705 			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
4706 				 "using the ext4 subsystem");
4707 		else {
4708 			/*
4709 			 * If we're probing be silent, if this looks like
4710 			 * it's actually an ext[34] filesystem.
4711 			 */
4712 			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4713 				return -EINVAL;
4714 			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
4715 				 "to feature incompatibilities");
4716 			return -EINVAL;
4717 		}
4718 	}
4719 
4720 	if (IS_EXT3_SB(sb)) {
4721 		if (ext3_feature_set_ok(sb))
4722 			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
4723 				 "using the ext4 subsystem");
4724 		else {
4725 			/*
4726 			 * If we're probing be silent, if this looks like
4727 			 * it's actually an ext4 filesystem.
4728 			 */
4729 			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4730 				return -EINVAL;
4731 			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
4732 				 "to feature incompatibilities");
4733 			return -EINVAL;
4734 		}
4735 	}
4736 
4737 	/*
4738 	 * Check feature flags regardless of the revision level, since we
4739 	 * previously didn't change the revision level when setting the flags,
4740 	 * so there is a chance incompat flags are set on a rev 0 filesystem.
4741 	 */
4742 	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4743 		return -EINVAL;
4744 
4745 	if (sbi->s_daxdev) {
4746 		if (sb->s_blocksize == PAGE_SIZE)
4747 			set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
4748 		else
4749 			ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
4750 	}
4751 
4752 	if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4753 		if (ext4_has_feature_inline_data(sb)) {
4754 			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
4755 					" that may contain inline data");
4756 			return -EINVAL;
4757 		}
4758 		if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4759 			ext4_msg(sb, KERN_ERR,
4760 				"DAX unsupported by block device.");
4761 			return -EINVAL;
4762 		}
4763 	}
4764 
4765 	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4766 		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
4767 			 es->s_encryption_level);
4768 		return -EINVAL;
4769 	}
4770 
4771 	return 0;
4772 }
4773 
4774 static int ext4_check_geometry(struct super_block *sb,
4775 			       struct ext4_super_block *es)
4776 {
4777 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4778 	__u64 blocks_count;
4779 	int err;
4780 
4781 	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) {
4782 		ext4_msg(sb, KERN_ERR,
4783 			 "Number of reserved GDT blocks insanely large: %d",
4784 			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
4785 		return -EINVAL;
4786 	}
4787 	/*
4788 	 * Test whether we have more sectors than will fit in sector_t,
4789 	 * and whether the max offset is addressable by the page cache.
4790 	 */
4791 	err = generic_check_addressable(sb->s_blocksize_bits,
4792 					ext4_blocks_count(es));
4793 	if (err) {
4794 		ext4_msg(sb, KERN_ERR, "filesystem"
4795 			 " too large to mount safely on this system");
4796 		return err;
4797 	}
4798 
4799 	/* check blocks count against device size */
4800 	blocks_count = sb_bdev_nr_blocks(sb);
4801 	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4802 		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
4803 		       "exceeds size of device (%llu blocks)",
4804 		       ext4_blocks_count(es), blocks_count);
4805 		return -EINVAL;
4806 	}
4807 
4808 	/*
4809 	 * It makes no sense for the first data block to be beyond the end
4810 	 * of the filesystem.
4811 	 */
4812 	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4813 		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4814 			 "block %u is beyond end of filesystem (%llu)",
4815 			 le32_to_cpu(es->s_first_data_block),
4816 			 ext4_blocks_count(es));
4817 		return -EINVAL;
4818 	}
4819 	if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
4820 	    (sbi->s_cluster_ratio == 1)) {
4821 		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4822 			 "block is 0 with a 1k block and cluster size");
4823 		return -EINVAL;
4824 	}
4825 
4826 	blocks_count = (ext4_blocks_count(es) -
4827 			le32_to_cpu(es->s_first_data_block) +
4828 			EXT4_BLOCKS_PER_GROUP(sb) - 1);
4829 	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4830 	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4831 		ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4832 		       "(block count %llu, first data block %u, "
4833 		       "blocks per group %lu)", blocks_count,
4834 		       ext4_blocks_count(es),
4835 		       le32_to_cpu(es->s_first_data_block),
4836 		       EXT4_BLOCKS_PER_GROUP(sb));
4837 		return -EINVAL;
4838 	}
4839 	sbi->s_groups_count = blocks_count;
4840 	sbi->s_blockfile_groups = min(sbi->s_groups_count,
4841 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4842 	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4843 	    le32_to_cpu(es->s_inodes_count)) {
4844 		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4845 			 le32_to_cpu(es->s_inodes_count),
4846 			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4847 		return -EINVAL;
4848 	}
4849 
4850 	return 0;
4851 }
4852 
4853 static int ext4_group_desc_init(struct super_block *sb,
4854 				struct ext4_super_block *es,
4855 				ext4_fsblk_t logical_sb_block,
4856 				ext4_group_t *first_not_zeroed)
4857 {
4858 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4859 	unsigned int db_count;
4860 	ext4_fsblk_t block;
4861 	int i;
4862 
4863 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4864 		   EXT4_DESC_PER_BLOCK(sb);
4865 	if (ext4_has_feature_meta_bg(sb)) {
4866 		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4867 			ext4_msg(sb, KERN_WARNING,
4868 				 "first meta block group too large: %u "
4869 				 "(group descriptor block count %u)",
4870 				 le32_to_cpu(es->s_first_meta_bg), db_count);
4871 			return -EINVAL;
4872 		}
4873 	}
4874 	rcu_assign_pointer(sbi->s_group_desc,
4875 			   kvmalloc_array(db_count,
4876 					  sizeof(struct buffer_head *),
4877 					  GFP_KERNEL));
4878 	if (sbi->s_group_desc == NULL) {
4879 		ext4_msg(sb, KERN_ERR, "not enough memory");
4880 		return -ENOMEM;
4881 	}
4882 
4883 	bgl_lock_init(sbi->s_blockgroup_lock);
4884 
4885 	/* Pre-read the descriptors into the buffer cache */
4886 	for (i = 0; i < db_count; i++) {
4887 		block = descriptor_loc(sb, logical_sb_block, i);
4888 		ext4_sb_breadahead_unmovable(sb, block);
4889 	}
4890 
4891 	for (i = 0; i < db_count; i++) {
4892 		struct buffer_head *bh;
4893 
4894 		block = descriptor_loc(sb, logical_sb_block, i);
4895 		bh = ext4_sb_bread_unmovable(sb, block);
4896 		if (IS_ERR(bh)) {
4897 			ext4_msg(sb, KERN_ERR,
4898 			       "can't read group descriptor %d", i);
4899 			sbi->s_gdb_count = i;
4900 			return PTR_ERR(bh);
4901 		}
4902 		rcu_read_lock();
4903 		rcu_dereference(sbi->s_group_desc)[i] = bh;
4904 		rcu_read_unlock();
4905 	}
4906 	sbi->s_gdb_count = db_count;
4907 	if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) {
4908 		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4909 		return -EFSCORRUPTED;
4910 	}
4911 
4912 	return 0;
4913 }
4914 
4915 static int ext4_load_and_init_journal(struct super_block *sb,
4916 				      struct ext4_super_block *es,
4917 				      struct ext4_fs_context *ctx)
4918 {
4919 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4920 	int err;
4921 
4922 	err = ext4_load_journal(sb, es, ctx->journal_devnum);
4923 	if (err)
4924 		return err;
4925 
4926 	if (ext4_has_feature_64bit(sb) &&
4927 	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4928 				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4929 		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4930 		goto out;
4931 	}
4932 
4933 	if (!set_journal_csum_feature_set(sb)) {
4934 		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4935 			 "feature set");
4936 		goto out;
4937 	}
4938 
4939 	if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
4940 		!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4941 					  JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
4942 		ext4_msg(sb, KERN_ERR,
4943 			"Failed to set fast commit journal feature");
4944 		goto out;
4945 	}
4946 
4947 	/* We have now updated the journal if required, so we can
4948 	 * validate the data journaling mode. */
4949 	switch (test_opt(sb, DATA_FLAGS)) {
4950 	case 0:
4951 		/* No mode set, assume a default based on the journal
4952 		 * capabilities: ORDERED_DATA if the journal can
4953 		 * cope, else JOURNAL_DATA
4954 		 */
4955 		if (jbd2_journal_check_available_features
4956 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4957 			set_opt(sb, ORDERED_DATA);
4958 			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
4959 		} else {
4960 			set_opt(sb, JOURNAL_DATA);
4961 			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
4962 		}
4963 		break;
4964 
4965 	case EXT4_MOUNT_ORDERED_DATA:
4966 	case EXT4_MOUNT_WRITEBACK_DATA:
4967 		if (!jbd2_journal_check_available_features
4968 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4969 			ext4_msg(sb, KERN_ERR, "Journal does not support "
4970 			       "requested data journaling mode");
4971 			goto out;
4972 		}
4973 		break;
4974 	default:
4975 		break;
4976 	}
4977 
4978 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4979 	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4980 		ext4_msg(sb, KERN_ERR, "can't mount with "
4981 			"journal_async_commit in data=ordered mode");
4982 		goto out;
4983 	}
4984 
4985 	set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
4986 
4987 	sbi->s_journal->j_submit_inode_data_buffers =
4988 		ext4_journal_submit_inode_data_buffers;
4989 	sbi->s_journal->j_finish_inode_data_buffers =
4990 		ext4_journal_finish_inode_data_buffers;
4991 
4992 	return 0;
4993 
4994 out:
4995 	ext4_journal_destroy(sbi, sbi->s_journal);
4996 	return -EINVAL;
4997 }
4998 
4999 static int ext4_check_journal_data_mode(struct super_block *sb)
5000 {
5001 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
5002 		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with "
5003 			    "data=journal disables delayed allocation, "
5004 			    "dioread_nolock, O_DIRECT and fast_commit support!\n");
5005 		/* can't mount with both data=journal and dioread_nolock. */
5006 		clear_opt(sb, DIOREAD_NOLOCK);
5007 		clear_opt2(sb, JOURNAL_FAST_COMMIT);
5008 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
5009 			ext4_msg(sb, KERN_ERR, "can't mount with "
5010 				 "both data=journal and delalloc");
5011 			return -EINVAL;
5012 		}
5013 		if (test_opt(sb, DAX_ALWAYS)) {
5014 			ext4_msg(sb, KERN_ERR, "can't mount with "
5015 				 "both data=journal and dax");
5016 			return -EINVAL;
5017 		}
5018 		if (ext4_has_feature_encrypt(sb)) {
5019 			ext4_msg(sb, KERN_WARNING,
5020 				 "encrypted files will use data=ordered "
5021 				 "instead of data journaling mode");
5022 		}
5023 		if (test_opt(sb, DELALLOC))
5024 			clear_opt(sb, DELALLOC);
5025 	} else {
5026 		sb->s_iflags |= SB_I_CGROUPWB;
5027 	}
5028 
5029 	return 0;
5030 }
5031 
5032 static const char *ext4_has_journal_option(struct super_block *sb)
5033 {
5034 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5035 
5036 	if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
5037 		return "journal_async_commit";
5038 	if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM))
5039 		return "journal_checksum";
5040 	if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
5041 		return "commit=";
5042 	if (EXT4_MOUNT_DATA_FLAGS &
5043 	    (sbi->s_mount_opt ^ sbi->s_def_mount_opt))
5044 		return "data=";
5045 	if (test_opt(sb, DATA_ERR_ABORT))
5046 		return "data_err=abort";
5047 	return NULL;
5048 }
5049 
5050 /*
5051  * Limit the maximum folio order to 2048 blocks to prevent overestimation
5052  * of reserve handle credits during the folio writeback in environments
5053  * where the PAGE_SIZE exceeds 4KB.
5054  */
5055 #define EXT4_MAX_PAGECACHE_ORDER(sb)		\
5056 		umin(MAX_PAGECACHE_ORDER, (11 + (sb)->s_blocksize_bits - PAGE_SHIFT))
5057 static void ext4_set_max_mapping_order(struct super_block *sb)
5058 {
5059 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5060 
5061 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5062 		sbi->s_max_folio_order = sbi->s_min_folio_order;
5063 	else
5064 		sbi->s_max_folio_order = EXT4_MAX_PAGECACHE_ORDER(sb);
5065 }
5066 
5067 static int ext4_check_large_folio(struct super_block *sb)
5068 {
5069 	const char *err_str = NULL;
5070 
5071 	if (ext4_has_feature_encrypt(sb))
5072 		err_str = "encrypt";
5073 
5074 	if (!err_str) {
5075 		ext4_set_max_mapping_order(sb);
5076 	} else if (sb->s_blocksize > PAGE_SIZE) {
5077 		ext4_msg(sb, KERN_ERR, "bs(%lu) > ps(%lu) unsupported for %s",
5078 			 sb->s_blocksize, PAGE_SIZE, err_str);
5079 		return -EINVAL;
5080 	}
5081 
5082 	return 0;
5083 }
5084 
5085 static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
5086 			   int silent)
5087 {
5088 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5089 	struct ext4_super_block *es;
5090 	ext4_fsblk_t logical_sb_block;
5091 	unsigned long offset = 0;
5092 	struct buffer_head *bh;
5093 	int ret = -EINVAL;
5094 	int blocksize;
5095 
5096 	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
5097 	if (!blocksize) {
5098 		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
5099 		return -EINVAL;
5100 	}
5101 
5102 	/*
5103 	 * The ext4 superblock will not be buffer aligned for other than 1kB
5104 	 * block sizes.  We need to calculate the offset from buffer start.
5105 	 */
5106 	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
5107 		logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
5108 		offset = do_div(logical_sb_block, blocksize);
5109 	} else {
5110 		logical_sb_block = sbi->s_sb_block;
5111 	}
5112 
5113 	bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
5114 	if (IS_ERR(bh)) {
5115 		ext4_msg(sb, KERN_ERR, "unable to read superblock");
5116 		return PTR_ERR(bh);
5117 	}
5118 	/*
5119 	 * Note: s_es must be initialized as soon as possible because
5120 	 *       some ext4 macro-instructions depend on its value
5121 	 */
5122 	es = (struct ext4_super_block *) (bh->b_data + offset);
5123 	sbi->s_es = es;
5124 	sb->s_magic = le16_to_cpu(es->s_magic);
5125 	if (sb->s_magic != EXT4_SUPER_MAGIC) {
5126 		if (!silent)
5127 			ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5128 		goto out;
5129 	}
5130 
5131 	if (le32_to_cpu(es->s_log_block_size) >
5132 	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
5133 		ext4_msg(sb, KERN_ERR,
5134 			 "Invalid log block size: %u",
5135 			 le32_to_cpu(es->s_log_block_size));
5136 		goto out;
5137 	}
5138 	if (le32_to_cpu(es->s_log_cluster_size) >
5139 	    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
5140 		ext4_msg(sb, KERN_ERR,
5141 			 "Invalid log cluster size: %u",
5142 			 le32_to_cpu(es->s_log_cluster_size));
5143 		goto out;
5144 	}
5145 
5146 	blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
5147 
5148 	/*
5149 	 * If the default block size is not the same as the real block size,
5150 	 * we need to reload it.
5151 	 */
5152 	if (sb->s_blocksize == blocksize)
5153 		goto success;
5154 
5155 	/*
5156 	 * bh must be released before kill_bdev(), otherwise
5157 	 * it won't be freed and its page also. kill_bdev()
5158 	 * is called by sb_set_blocksize().
5159 	 */
5160 	brelse(bh);
5161 	/* Validate the filesystem blocksize */
5162 	if (!sb_set_blocksize(sb, blocksize)) {
5163 		ext4_msg(sb, KERN_ERR, "bad block size %d",
5164 				blocksize);
5165 		bh = NULL;
5166 		goto out;
5167 	}
5168 
5169 	logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
5170 	offset = do_div(logical_sb_block, blocksize);
5171 	bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
5172 	if (IS_ERR(bh)) {
5173 		ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try");
5174 		ret = PTR_ERR(bh);
5175 		bh = NULL;
5176 		goto out;
5177 	}
5178 	es = (struct ext4_super_block *)(bh->b_data + offset);
5179 	sbi->s_es = es;
5180 	if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
5181 		ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!");
5182 		goto out;
5183 	}
5184 
5185 success:
5186 	sbi->s_min_folio_order = get_order(blocksize);
5187 	*lsb = logical_sb_block;
5188 	sbi->s_sbh = bh;
5189 	return 0;
5190 out:
5191 	brelse(bh);
5192 	return ret;
5193 }
5194 
5195 static int ext4_hash_info_init(struct super_block *sb)
5196 {
5197 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5198 	struct ext4_super_block *es = sbi->s_es;
5199 	unsigned int i;
5200 
5201 	sbi->s_def_hash_version = es->s_def_hash_version;
5202 
5203 	if (sbi->s_def_hash_version > DX_HASH_LAST) {
5204 		ext4_msg(sb, KERN_ERR,
5205 			 "Invalid default hash set in the superblock");
5206 		return -EINVAL;
5207 	} else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) {
5208 		ext4_msg(sb, KERN_ERR,
5209 			 "SIPHASH is not a valid default hash value");
5210 		return -EINVAL;
5211 	}
5212 
5213 	for (i = 0; i < 4; i++)
5214 		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
5215 
5216 	if (ext4_has_feature_dir_index(sb)) {
5217 		i = le32_to_cpu(es->s_flags);
5218 		if (i & EXT2_FLAGS_UNSIGNED_HASH)
5219 			sbi->s_hash_unsigned = 3;
5220 		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
5221 #ifdef __CHAR_UNSIGNED__
5222 			if (!sb_rdonly(sb))
5223 				es->s_flags |=
5224 					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
5225 			sbi->s_hash_unsigned = 3;
5226 #else
5227 			if (!sb_rdonly(sb))
5228 				es->s_flags |=
5229 					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
5230 #endif
5231 		}
5232 	}
5233 	return 0;
5234 }
5235 
5236 static int ext4_block_group_meta_init(struct super_block *sb, int silent)
5237 {
5238 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5239 	struct ext4_super_block *es = sbi->s_es;
5240 	int has_huge_files;
5241 
5242 	has_huge_files = ext4_has_feature_huge_file(sb);
5243 	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
5244 						      has_huge_files);
5245 	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
5246 
5247 	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
5248 	if (ext4_has_feature_64bit(sb)) {
5249 		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
5250 		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
5251 		    !is_power_of_2(sbi->s_desc_size)) {
5252 			ext4_msg(sb, KERN_ERR,
5253 			       "unsupported descriptor size %lu",
5254 			       sbi->s_desc_size);
5255 			return -EINVAL;
5256 		}
5257 	} else
5258 		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
5259 
5260 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
5261 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
5262 
5263 	sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb);
5264 	if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) {
5265 		if (!silent)
5266 			ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5267 		return -EINVAL;
5268 	}
5269 	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
5270 	    sbi->s_inodes_per_group > sb->s_blocksize * 8) {
5271 		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
5272 			 sbi->s_inodes_per_group);
5273 		return -EINVAL;
5274 	}
5275 	sbi->s_itb_per_group = sbi->s_inodes_per_group /
5276 					sbi->s_inodes_per_block;
5277 	sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb);
5278 	sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY;
5279 	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
5280 	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
5281 
5282 	return 0;
5283 }
5284 
5285 /*
5286  * It's hard to get stripe aligned blocks if stripe is not aligned with
5287  * cluster, just disable stripe and alert user to simplify code and avoid
5288  * stripe aligned allocation which will rarely succeed.
5289  */
5290 static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
5291 {
5292 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5293 	return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
5294 		stripe % sbi->s_cluster_ratio != 0);
5295 }
5296 
5297 static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
5298 {
5299 	struct ext4_super_block *es = NULL;
5300 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5301 	ext4_fsblk_t logical_sb_block;
5302 	struct inode *root;
5303 	int needs_recovery;
5304 	int err;
5305 	ext4_group_t first_not_zeroed;
5306 	struct ext4_fs_context *ctx = fc->fs_private;
5307 	int silent = fc->sb_flags & SB_SILENT;
5308 
5309 	/* Set defaults for the variables that will be set during parsing */
5310 	if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO))
5311 		ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
5312 
5313 	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
5314 	sbi->s_sectors_written_start =
5315 		part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
5316 
5317 	err = ext4_load_super(sb, &logical_sb_block, silent);
5318 	if (err)
5319 		goto out_fail;
5320 
5321 	es = sbi->s_es;
5322 	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
5323 
5324 	err = ext4_init_metadata_csum(sb, es);
5325 	if (err)
5326 		goto failed_mount;
5327 
5328 	ext4_set_def_opts(sb, es);
5329 
5330 	sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es));
5331 	sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es));
5332 	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
5333 	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
5334 	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
5335 	sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB;
5336 	sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC;
5337 
5338 	/*
5339 	 * set default s_li_wait_mult for lazyinit, for the case there is
5340 	 * no mount option specified.
5341 	 */
5342 	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
5343 
5344 	err = ext4_inode_info_init(sb, es);
5345 	if (err)
5346 		goto failed_mount;
5347 
5348 	err = parse_apply_sb_mount_options(sb, ctx);
5349 	if (err < 0)
5350 		goto failed_mount;
5351 
5352 	sbi->s_def_mount_opt = sbi->s_mount_opt;
5353 	sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
5354 
5355 	err = ext4_check_opt_consistency(fc, sb);
5356 	if (err < 0)
5357 		goto failed_mount;
5358 
5359 	ext4_apply_options(fc, sb);
5360 
5361 	err = ext4_check_large_folio(sb);
5362 	if (err < 0)
5363 		goto failed_mount;
5364 
5365 	err = ext4_encoding_init(sb, es);
5366 	if (err)
5367 		goto failed_mount;
5368 
5369 	err = ext4_check_journal_data_mode(sb);
5370 	if (err)
5371 		goto failed_mount;
5372 
5373 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5374 		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5375 
5376 	/* HSM events are allowed by default. */
5377 	sb->s_iflags |= SB_I_ALLOW_HSM;
5378 
5379 	err = ext4_check_feature_compatibility(sb, es, silent);
5380 	if (err)
5381 		goto failed_mount;
5382 
5383 	err = ext4_block_group_meta_init(sb, silent);
5384 	if (err)
5385 		goto failed_mount;
5386 
5387 	err = ext4_hash_info_init(sb);
5388 	if (err)
5389 		goto failed_mount;
5390 
5391 	err = ext4_handle_clustersize(sb);
5392 	if (err)
5393 		goto failed_mount;
5394 
5395 	err = ext4_check_geometry(sb, es);
5396 	if (err)
5397 		goto failed_mount;
5398 
5399 	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
5400 	spin_lock_init(&sbi->s_error_lock);
5401 	INIT_WORK(&sbi->s_sb_upd_work, update_super_work);
5402 
5403 	err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed);
5404 	if (err)
5405 		goto failed_mount3;
5406 
5407 	err = ext4_es_register_shrinker(sbi);
5408 	if (err)
5409 		goto failed_mount3;
5410 
5411 	sbi->s_stripe = ext4_get_stripe_size(sbi);
5412 	if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
5413 		ext4_msg(sb, KERN_WARNING,
5414 			 "stripe (%lu) is not aligned with cluster size (%u), "
5415 			 "stripe is disabled",
5416 			 sbi->s_stripe, sbi->s_cluster_ratio);
5417 		sbi->s_stripe = 0;
5418 	}
5419 	sbi->s_extent_max_zeroout_kb = 32;
5420 
5421 	/*
5422 	 * set up enough so that it can read an inode
5423 	 */
5424 	sb->s_op = &ext4_sops;
5425 	sb->s_export_op = &ext4_export_ops;
5426 	sb->s_xattr = ext4_xattr_handlers;
5427 #ifdef CONFIG_FS_ENCRYPTION
5428 	sb->s_cop = &ext4_cryptops;
5429 #endif
5430 #ifdef CONFIG_FS_VERITY
5431 	sb->s_vop = &ext4_verityops;
5432 #endif
5433 #ifdef CONFIG_QUOTA
5434 	sb->dq_op = &ext4_quota_operations;
5435 	if (ext4_has_feature_quota(sb))
5436 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
5437 	else
5438 		sb->s_qcop = &ext4_qctl_operations;
5439 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
5440 #endif
5441 	super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid));
5442 	super_set_sysfs_name_bdev(sb);
5443 
5444 	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
5445 	mutex_init(&sbi->s_orphan_lock);
5446 
5447 	spin_lock_init(&sbi->s_bdev_wb_lock);
5448 
5449 	ext4_atomic_write_init(sb);
5450 	ext4_fast_commit_init(sb);
5451 
5452 	sb->s_root = NULL;
5453 
5454 	needs_recovery = (es->s_last_orphan != 0 ||
5455 			  ext4_has_feature_orphan_present(sb) ||
5456 			  ext4_has_feature_journal_needs_recovery(sb));
5457 
5458 	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) {
5459 		err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block));
5460 		if (err)
5461 			goto failed_mount3a;
5462 	}
5463 
5464 	err = -EINVAL;
5465 	/*
5466 	 * The first inode we look at is the journal inode.  Don't try
5467 	 * root first: it may be modified in the journal!
5468 	 */
5469 	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
5470 		err = ext4_load_and_init_journal(sb, es, ctx);
5471 		if (err)
5472 			goto failed_mount3a;
5473 		if (bdev_read_only(sb->s_bdev))
5474 		    needs_recovery = 0;
5475 	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
5476 		   ext4_has_feature_journal_needs_recovery(sb)) {
5477 		ext4_msg(sb, KERN_ERR, "required journal recovery "
5478 		       "suppressed and not mounted read-only");
5479 		goto failed_mount3a;
5480 	} else {
5481 		const char *journal_option;
5482 
5483 		/* Nojournal mode, all journal mount options are illegal */
5484 		journal_option = ext4_has_journal_option(sb);
5485 		if (journal_option != NULL) {
5486 			ext4_msg(sb, KERN_ERR,
5487 				 "can't mount with %s, fs mounted w/o journal",
5488 				 journal_option);
5489 			goto failed_mount3a;
5490 		}
5491 
5492 		sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
5493 		clear_opt(sb, JOURNAL_CHECKSUM);
5494 		clear_opt(sb, DATA_FLAGS);
5495 		clear_opt2(sb, JOURNAL_FAST_COMMIT);
5496 		sbi->s_journal = NULL;
5497 		needs_recovery = 0;
5498 	}
5499 
5500 	if (!test_opt(sb, NO_MBCACHE)) {
5501 		sbi->s_ea_block_cache = ext4_xattr_create_cache();
5502 		if (!sbi->s_ea_block_cache) {
5503 			ext4_msg(sb, KERN_ERR,
5504 				 "Failed to create ea_block_cache");
5505 			err = -EINVAL;
5506 			goto failed_mount_wq;
5507 		}
5508 
5509 		if (ext4_has_feature_ea_inode(sb)) {
5510 			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
5511 			if (!sbi->s_ea_inode_cache) {
5512 				ext4_msg(sb, KERN_ERR,
5513 					 "Failed to create ea_inode_cache");
5514 				err = -EINVAL;
5515 				goto failed_mount_wq;
5516 			}
5517 		}
5518 	}
5519 
5520 	/*
5521 	 * Get the # of file system overhead blocks from the
5522 	 * superblock if present.
5523 	 */
5524 	sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
5525 	/* ignore the precalculated value if it is ridiculous */
5526 	if (sbi->s_overhead > ext4_blocks_count(es))
5527 		sbi->s_overhead = 0;
5528 	/*
5529 	 * If the bigalloc feature is not enabled recalculating the
5530 	 * overhead doesn't take long, so we might as well just redo
5531 	 * it to make sure we are using the correct value.
5532 	 */
5533 	if (!ext4_has_feature_bigalloc(sb))
5534 		sbi->s_overhead = 0;
5535 	if (sbi->s_overhead == 0) {
5536 		err = ext4_calculate_overhead(sb);
5537 		if (err)
5538 			goto failed_mount_wq;
5539 	}
5540 
5541 	/*
5542 	 * The maximum number of concurrent works can be high and
5543 	 * concurrency isn't really necessary.  Limit it to 1.
5544 	 */
5545 	EXT4_SB(sb)->rsv_conversion_wq =
5546 		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
5547 	if (!EXT4_SB(sb)->rsv_conversion_wq) {
5548 		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
5549 		err = -ENOMEM;
5550 		goto failed_mount4;
5551 	}
5552 
5553 	/*
5554 	 * The jbd2_journal_load will have done any necessary log recovery,
5555 	 * so we can safely mount the rest of the filesystem now.
5556 	 */
5557 
5558 	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
5559 	if (IS_ERR(root)) {
5560 		ext4_msg(sb, KERN_ERR, "get root inode failed");
5561 		err = PTR_ERR(root);
5562 		root = NULL;
5563 		goto failed_mount4;
5564 	}
5565 	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
5566 		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
5567 		iput(root);
5568 		err = -EFSCORRUPTED;
5569 		goto failed_mount4;
5570 	}
5571 
5572 	generic_set_sb_d_ops(sb);
5573 	sb->s_root = d_make_root(root);
5574 	if (!sb->s_root) {
5575 		ext4_msg(sb, KERN_ERR, "get root dentry failed");
5576 		err = -ENOMEM;
5577 		goto failed_mount4;
5578 	}
5579 
5580 	err = ext4_setup_super(sb, es, sb_rdonly(sb));
5581 	if (err == -EROFS) {
5582 		sb->s_flags |= SB_RDONLY;
5583 	} else if (err)
5584 		goto failed_mount4a;
5585 
5586 	ext4_set_resv_clusters(sb);
5587 
5588 	if (test_opt(sb, BLOCK_VALIDITY)) {
5589 		err = ext4_setup_system_zone(sb);
5590 		if (err) {
5591 			ext4_msg(sb, KERN_ERR, "failed to initialize system "
5592 				 "zone (%d)", err);
5593 			goto failed_mount4a;
5594 		}
5595 	}
5596 	ext4_fc_replay_cleanup(sb);
5597 
5598 	ext4_ext_init(sb);
5599 
5600 	/*
5601 	 * Enable optimize_scan if number of groups is > threshold. This can be
5602 	 * turned off by passing "mb_optimize_scan=0". This can also be
5603 	 * turned on forcefully by passing "mb_optimize_scan=1".
5604 	 */
5605 	if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) {
5606 		if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
5607 			set_opt2(sb, MB_OPTIMIZE_SCAN);
5608 		else
5609 			clear_opt2(sb, MB_OPTIMIZE_SCAN);
5610 	}
5611 
5612 	err = ext4_mb_init(sb);
5613 	if (err) {
5614 		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
5615 			 err);
5616 		goto failed_mount5;
5617 	}
5618 
5619 	/*
5620 	 * We can only set up the journal commit callback once
5621 	 * mballoc is initialized
5622 	 */
5623 	if (sbi->s_journal)
5624 		sbi->s_journal->j_commit_callback =
5625 			ext4_journal_commit_callback;
5626 
5627 	err = ext4_percpu_param_init(sbi);
5628 	if (err)
5629 		goto failed_mount6;
5630 
5631 	if (ext4_has_feature_flex_bg(sb))
5632 		if (!ext4_fill_flex_info(sb)) {
5633 			ext4_msg(sb, KERN_ERR,
5634 			       "unable to initialize "
5635 			       "flex_bg meta info!");
5636 			err = -ENOMEM;
5637 			goto failed_mount6;
5638 		}
5639 
5640 	err = ext4_register_li_request(sb, first_not_zeroed);
5641 	if (err)
5642 		goto failed_mount6;
5643 
5644 	err = ext4_init_orphan_info(sb);
5645 	if (err)
5646 		goto failed_mount7;
5647 #ifdef CONFIG_QUOTA
5648 	/* Enable quota usage during mount. */
5649 	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
5650 		err = ext4_enable_quotas(sb);
5651 		if (err)
5652 			goto failed_mount8;
5653 	}
5654 #endif  /* CONFIG_QUOTA */
5655 
5656 	/*
5657 	 * Save the original bdev mapping's wb_err value which could be
5658 	 * used to detect the metadata async write error.
5659 	 */
5660 	errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
5661 				 &sbi->s_bdev_wb_err);
5662 	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
5663 	ext4_orphan_cleanup(sb, es);
5664 	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5665 	/*
5666 	 * Update the checksum after updating free space/inode counters and
5667 	 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
5668 	 * checksum in the buffer cache until it is written out and
5669 	 * e2fsprogs programs trying to open a file system immediately
5670 	 * after it is mounted can fail.
5671 	 */
5672 	ext4_superblock_csum_set(sb);
5673 	if (needs_recovery) {
5674 		ext4_msg(sb, KERN_INFO, "recovery complete");
5675 		err = ext4_mark_recovery_complete(sb, es);
5676 		if (err)
5677 			goto failed_mount9;
5678 	}
5679 
5680 	if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) {
5681 		ext4_msg(sb, KERN_WARNING,
5682 			 "mounting with \"discard\" option, but the device does not support discard");
5683 		clear_opt(sb, DISCARD);
5684 	}
5685 
5686 	if (es->s_error_count)
5687 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
5688 
5689 	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
5690 	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
5691 	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
5692 	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
5693 	atomic_set(&sbi->s_warning_count, 0);
5694 	atomic_set(&sbi->s_msg_count, 0);
5695 
5696 	/* Register sysfs after all initializations are complete. */
5697 	err = ext4_register_sysfs(sb);
5698 	if (err)
5699 		goto failed_mount9;
5700 
5701 	return 0;
5702 
5703 failed_mount9:
5704 	ext4_quotas_off(sb, EXT4_MAXQUOTAS);
5705 failed_mount8: __maybe_unused
5706 	ext4_release_orphan_info(sb);
5707 failed_mount7:
5708 	ext4_unregister_li_request(sb);
5709 failed_mount6:
5710 	ext4_mb_release(sb);
5711 	ext4_flex_groups_free(sbi);
5712 	ext4_percpu_param_destroy(sbi);
5713 failed_mount5:
5714 	ext4_ext_release(sb);
5715 	ext4_release_system_zone(sb);
5716 failed_mount4a:
5717 	dput(sb->s_root);
5718 	sb->s_root = NULL;
5719 failed_mount4:
5720 	ext4_msg(sb, KERN_ERR, "mount failed");
5721 	if (EXT4_SB(sb)->rsv_conversion_wq)
5722 		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5723 failed_mount_wq:
5724 	ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
5725 	sbi->s_ea_inode_cache = NULL;
5726 
5727 	ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
5728 	sbi->s_ea_block_cache = NULL;
5729 
5730 	if (sbi->s_journal) {
5731 		ext4_journal_destroy(sbi, sbi->s_journal);
5732 	}
5733 failed_mount3a:
5734 	ext4_es_unregister_shrinker(sbi);
5735 failed_mount3:
5736 	/* flush s_sb_upd_work before sbi destroy */
5737 	flush_work(&sbi->s_sb_upd_work);
5738 	ext4_stop_mmpd(sbi);
5739 	timer_delete_sync(&sbi->s_err_report);
5740 	ext4_group_desc_free(sbi);
5741 failed_mount:
5742 #if IS_ENABLED(CONFIG_UNICODE)
5743 	utf8_unload(sb->s_encoding);
5744 #endif
5745 
5746 #ifdef CONFIG_QUOTA
5747 	for (unsigned int i = 0; i < EXT4_MAXQUOTAS; i++)
5748 		kfree(get_qf_name(sb, sbi, i));
5749 #endif
5750 	fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5751 	brelse(sbi->s_sbh);
5752 	if (sbi->s_journal_bdev_file) {
5753 		invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
5754 		bdev_fput(sbi->s_journal_bdev_file);
5755 	}
5756 out_fail:
5757 	invalidate_bdev(sb->s_bdev);
5758 	sb->s_fs_info = NULL;
5759 	return err;
5760 }
5761 
5762 static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
5763 {
5764 	struct ext4_fs_context *ctx = fc->fs_private;
5765 	struct ext4_sb_info *sbi;
5766 	const char *descr;
5767 	int ret;
5768 
5769 	sbi = ext4_alloc_sbi(sb);
5770 	if (!sbi)
5771 		return -ENOMEM;
5772 
5773 	fc->s_fs_info = sbi;
5774 
5775 	/* Cleanup superblock name */
5776 	strreplace(sb->s_id, '/', '!');
5777 
5778 	sbi->s_sb_block = 1;	/* Default super block location */
5779 	if (ctx->spec & EXT4_SPEC_s_sb_block)
5780 		sbi->s_sb_block = ctx->s_sb_block;
5781 
5782 	ret = __ext4_fill_super(fc, sb);
5783 	if (ret < 0)
5784 		goto free_sbi;
5785 
5786 	if (sbi->s_journal) {
5787 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5788 			descr = " journalled data mode";
5789 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
5790 			descr = " ordered data mode";
5791 		else
5792 			descr = " writeback data mode";
5793 	} else
5794 		descr = "out journal";
5795 
5796 	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
5797 		ext4_msg(sb, KERN_INFO, "mounted filesystem %pU %s with%s. "
5798 			 "Quota mode: %s.", &sb->s_uuid,
5799 			 sb_rdonly(sb) ? "ro" : "r/w", descr,
5800 			 ext4_quota_mode(sb));
5801 
5802 	/* Update the s_overhead_clusters if necessary */
5803 	ext4_update_overhead(sb, false);
5804 	return 0;
5805 
5806 free_sbi:
5807 	ext4_free_sbi(sbi);
5808 	fc->s_fs_info = NULL;
5809 	return ret;
5810 }
5811 
5812 static int ext4_get_tree(struct fs_context *fc)
5813 {
5814 	return get_tree_bdev(fc, ext4_fill_super);
5815 }
5816 
5817 /*
5818  * Setup any per-fs journal parameters now.  We'll do this both on
5819  * initial mount, once the journal has been initialised but before we've
5820  * done any recovery; and again on any subsequent remount.
5821  */
5822 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5823 {
5824 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5825 
5826 	journal->j_commit_interval = sbi->s_commit_interval;
5827 	journal->j_min_batch_time = sbi->s_min_batch_time;
5828 	journal->j_max_batch_time = sbi->s_max_batch_time;
5829 	ext4_fc_init(sb, journal);
5830 
5831 	write_lock(&journal->j_state_lock);
5832 	if (test_opt(sb, BARRIER))
5833 		journal->j_flags |= JBD2_BARRIER;
5834 	else
5835 		journal->j_flags &= ~JBD2_BARRIER;
5836 	/*
5837 	 * Always enable journal cycle record option, letting the journal
5838 	 * records log transactions continuously between each mount.
5839 	 */
5840 	journal->j_flags |= JBD2_CYCLE_RECORD;
5841 	write_unlock(&journal->j_state_lock);
5842 }
5843 
5844 static struct inode *ext4_get_journal_inode(struct super_block *sb,
5845 					     unsigned int journal_inum)
5846 {
5847 	struct inode *journal_inode;
5848 
5849 	/*
5850 	 * Test for the existence of a valid inode on disk.  Bad things
5851 	 * happen if we iget() an unused inode, as the subsequent iput()
5852 	 * will try to delete it.
5853 	 */
5854 	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5855 	if (IS_ERR(journal_inode)) {
5856 		ext4_msg(sb, KERN_ERR, "no journal found");
5857 		return ERR_CAST(journal_inode);
5858 	}
5859 	if (!journal_inode->i_nlink) {
5860 		make_bad_inode(journal_inode);
5861 		iput(journal_inode);
5862 		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5863 		return ERR_PTR(-EFSCORRUPTED);
5864 	}
5865 	if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
5866 		ext4_msg(sb, KERN_ERR, "invalid journal inode");
5867 		iput(journal_inode);
5868 		return ERR_PTR(-EFSCORRUPTED);
5869 	}
5870 
5871 	ext4_debug("Journal inode found at %p: %lld bytes\n",
5872 		  journal_inode, journal_inode->i_size);
5873 	return journal_inode;
5874 }
5875 
5876 static int ext4_journal_bmap(journal_t *journal, sector_t *block)
5877 {
5878 	struct ext4_map_blocks map;
5879 	int ret;
5880 
5881 	if (journal->j_inode == NULL)
5882 		return 0;
5883 
5884 	map.m_lblk = *block;
5885 	map.m_len = 1;
5886 	ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
5887 	if (ret <= 0) {
5888 		ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
5889 			 "journal bmap failed: block %llu ret %d\n",
5890 			 *block, ret);
5891 		jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
5892 		return ret;
5893 	}
5894 	*block = map.m_pblk;
5895 	return 0;
5896 }
5897 
5898 static journal_t *ext4_open_inode_journal(struct super_block *sb,
5899 					  unsigned int journal_inum)
5900 {
5901 	struct inode *journal_inode;
5902 	journal_t *journal;
5903 
5904 	journal_inode = ext4_get_journal_inode(sb, journal_inum);
5905 	if (IS_ERR(journal_inode))
5906 		return ERR_CAST(journal_inode);
5907 
5908 	journal = jbd2_journal_init_inode(journal_inode);
5909 	if (IS_ERR(journal)) {
5910 		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5911 		iput(journal_inode);
5912 		return ERR_CAST(journal);
5913 	}
5914 	journal->j_private = sb;
5915 	journal->j_bmap = ext4_journal_bmap;
5916 	ext4_init_journal_params(sb, journal);
5917 	return journal;
5918 }
5919 
5920 static struct file *ext4_get_journal_blkdev(struct super_block *sb,
5921 					dev_t j_dev, ext4_fsblk_t *j_start,
5922 					ext4_fsblk_t *j_len)
5923 {
5924 	struct buffer_head *bh;
5925 	struct block_device *bdev;
5926 	struct file *bdev_file;
5927 	int hblock, blocksize;
5928 	ext4_fsblk_t sb_block;
5929 	unsigned long offset;
5930 	struct ext4_super_block *es;
5931 	int errno;
5932 
5933 	bdev_file = bdev_file_open_by_dev(j_dev,
5934 		BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
5935 		sb, &fs_holder_ops);
5936 	if (IS_ERR(bdev_file)) {
5937 		ext4_msg(sb, KERN_ERR,
5938 			 "failed to open journal device unknown-block(%u,%u) %ld",
5939 			 MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_file));
5940 		return bdev_file;
5941 	}
5942 
5943 	bdev = file_bdev(bdev_file);
5944 	blocksize = sb->s_blocksize;
5945 	hblock = bdev_logical_block_size(bdev);
5946 	if (blocksize < hblock) {
5947 		ext4_msg(sb, KERN_ERR,
5948 			"blocksize too small for journal device");
5949 		errno = -EINVAL;
5950 		goto out_bdev;
5951 	}
5952 
5953 	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
5954 	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5955 	set_blocksize(bdev_file, blocksize);
5956 	bh = __bread(bdev, sb_block, blocksize);
5957 	if (!bh) {
5958 		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
5959 		       "external journal");
5960 		errno = -EINVAL;
5961 		goto out_bdev;
5962 	}
5963 
5964 	es = (struct ext4_super_block *) (bh->b_data + offset);
5965 	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
5966 	    !(le32_to_cpu(es->s_feature_incompat) &
5967 	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
5968 		ext4_msg(sb, KERN_ERR, "external journal has bad superblock");
5969 		errno = -EFSCORRUPTED;
5970 		goto out_bh;
5971 	}
5972 
5973 	if ((le32_to_cpu(es->s_feature_ro_compat) &
5974 	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
5975 	    es->s_checksum != ext4_superblock_csum(es)) {
5976 		ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock");
5977 		errno = -EFSCORRUPTED;
5978 		goto out_bh;
5979 	}
5980 
5981 	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
5982 		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
5983 		errno = -EFSCORRUPTED;
5984 		goto out_bh;
5985 	}
5986 
5987 	*j_start = sb_block + 1;
5988 	*j_len = ext4_blocks_count(es);
5989 	brelse(bh);
5990 	return bdev_file;
5991 
5992 out_bh:
5993 	brelse(bh);
5994 out_bdev:
5995 	bdev_fput(bdev_file);
5996 	return ERR_PTR(errno);
5997 }
5998 
5999 static journal_t *ext4_open_dev_journal(struct super_block *sb,
6000 					dev_t j_dev)
6001 {
6002 	journal_t *journal;
6003 	ext4_fsblk_t j_start;
6004 	ext4_fsblk_t j_len;
6005 	struct file *bdev_file;
6006 	int errno = 0;
6007 
6008 	bdev_file = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
6009 	if (IS_ERR(bdev_file))
6010 		return ERR_CAST(bdev_file);
6011 
6012 	journal = jbd2_journal_init_dev(file_bdev(bdev_file), sb->s_bdev, j_start,
6013 					j_len, sb->s_blocksize);
6014 	if (IS_ERR(journal)) {
6015 		ext4_msg(sb, KERN_ERR, "failed to create device journal");
6016 		errno = PTR_ERR(journal);
6017 		goto out_bdev;
6018 	}
6019 	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
6020 		ext4_msg(sb, KERN_ERR, "External journal has more than one "
6021 					"user (unsupported) - %d",
6022 			be32_to_cpu(journal->j_superblock->s_nr_users));
6023 		errno = -EINVAL;
6024 		goto out_journal;
6025 	}
6026 	journal->j_private = sb;
6027 	EXT4_SB(sb)->s_journal_bdev_file = bdev_file;
6028 	ext4_init_journal_params(sb, journal);
6029 	return journal;
6030 
6031 out_journal:
6032 	ext4_journal_destroy(EXT4_SB(sb), journal);
6033 out_bdev:
6034 	bdev_fput(bdev_file);
6035 	return ERR_PTR(errno);
6036 }
6037 
6038 static int ext4_load_journal(struct super_block *sb,
6039 			     struct ext4_super_block *es,
6040 			     unsigned long journal_devnum)
6041 {
6042 	journal_t *journal;
6043 	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
6044 	dev_t journal_dev;
6045 	int err = 0;
6046 	int really_read_only;
6047 	int journal_dev_ro;
6048 
6049 	if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
6050 		return -EFSCORRUPTED;
6051 
6052 	if (journal_devnum &&
6053 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
6054 		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
6055 			"numbers have changed");
6056 		journal_dev = new_decode_dev(journal_devnum);
6057 	} else
6058 		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
6059 
6060 	if (journal_inum && journal_dev) {
6061 		ext4_msg(sb, KERN_ERR,
6062 			 "filesystem has both journal inode and journal device!");
6063 		return -EINVAL;
6064 	}
6065 
6066 	if (journal_inum) {
6067 		journal = ext4_open_inode_journal(sb, journal_inum);
6068 		if (IS_ERR(journal))
6069 			return PTR_ERR(journal);
6070 	} else {
6071 		journal = ext4_open_dev_journal(sb, journal_dev);
6072 		if (IS_ERR(journal))
6073 			return PTR_ERR(journal);
6074 	}
6075 
6076 	journal_dev_ro = bdev_read_only(journal->j_dev);
6077 	really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
6078 
6079 	if (journal_dev_ro && !sb_rdonly(sb)) {
6080 		ext4_msg(sb, KERN_ERR,
6081 			 "journal device read-only, try mounting with '-o ro'");
6082 		err = -EROFS;
6083 		goto err_out;
6084 	}
6085 
6086 	/*
6087 	 * Are we loading a blank journal or performing recovery after a
6088 	 * crash?  For recovery, we need to check in advance whether we
6089 	 * can get read-write access to the device.
6090 	 */
6091 	if (ext4_has_feature_journal_needs_recovery(sb)) {
6092 		if (sb_rdonly(sb)) {
6093 			ext4_msg(sb, KERN_INFO, "INFO: recovery "
6094 					"required on readonly filesystem");
6095 			if (really_read_only) {
6096 				ext4_msg(sb, KERN_ERR, "write access "
6097 					"unavailable, cannot proceed "
6098 					"(try mounting with noload)");
6099 				err = -EROFS;
6100 				goto err_out;
6101 			}
6102 			ext4_msg(sb, KERN_INFO, "write access will "
6103 			       "be enabled during recovery");
6104 		}
6105 	}
6106 
6107 	if (!(journal->j_flags & JBD2_BARRIER))
6108 		ext4_msg(sb, KERN_INFO, "barriers disabled");
6109 
6110 	if (!ext4_has_feature_journal_needs_recovery(sb))
6111 		err = jbd2_journal_wipe(journal, !really_read_only);
6112 	if (!err) {
6113 		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
6114 		__le16 orig_state;
6115 		bool changed = false;
6116 
6117 		if (save)
6118 			memcpy(save, ((char *) es) +
6119 			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
6120 		err = jbd2_journal_load(journal);
6121 		if (save && memcmp(((char *) es) + EXT4_S_ERR_START,
6122 				   save, EXT4_S_ERR_LEN)) {
6123 			memcpy(((char *) es) + EXT4_S_ERR_START,
6124 			       save, EXT4_S_ERR_LEN);
6125 			changed = true;
6126 		}
6127 		kfree(save);
6128 		orig_state = es->s_state;
6129 		es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state &
6130 					   EXT4_ERROR_FS);
6131 		if (orig_state != es->s_state)
6132 			changed = true;
6133 		/* Write out restored error information to the superblock */
6134 		if (changed && !really_read_only) {
6135 			int err2;
6136 			err2 = ext4_commit_super(sb);
6137 			err = err ? : err2;
6138 		}
6139 	}
6140 
6141 	if (err) {
6142 		ext4_msg(sb, KERN_ERR, "error loading journal");
6143 		goto err_out;
6144 	}
6145 
6146 	EXT4_SB(sb)->s_journal = journal;
6147 	err = ext4_clear_journal_err(sb, es);
6148 	if (err) {
6149 		ext4_journal_destroy(EXT4_SB(sb), journal);
6150 		return err;
6151 	}
6152 
6153 	if (!really_read_only && journal_devnum &&
6154 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
6155 		es->s_journal_dev = cpu_to_le32(journal_devnum);
6156 		ext4_commit_super(sb);
6157 	}
6158 	if (!really_read_only && journal_inum &&
6159 	    journal_inum != le32_to_cpu(es->s_journal_inum)) {
6160 		es->s_journal_inum = cpu_to_le32(journal_inum);
6161 		ext4_commit_super(sb);
6162 	}
6163 
6164 	return 0;
6165 
6166 err_out:
6167 	ext4_journal_destroy(EXT4_SB(sb), journal);
6168 	return err;
6169 }
6170 
6171 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
6172 static void ext4_update_super(struct super_block *sb)
6173 {
6174 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6175 	struct ext4_super_block *es = sbi->s_es;
6176 	struct buffer_head *sbh = sbi->s_sbh;
6177 
6178 	lock_buffer(sbh);
6179 	/*
6180 	 * If the file system is mounted read-only, don't update the
6181 	 * superblock write time.  This avoids updating the superblock
6182 	 * write time when we are mounting the root file system
6183 	 * read/only but we need to replay the journal; at that point,
6184 	 * for people who are east of GMT and who make their clock
6185 	 * tick in localtime for Windows bug-for-bug compatibility,
6186 	 * the clock is set in the future, and this will cause e2fsck
6187 	 * to complain and force a full file system check.
6188 	 */
6189 	if (!sb_rdonly(sb))
6190 		ext4_update_tstamp(es, s_wtime);
6191 	es->s_kbytes_written =
6192 		cpu_to_le64(sbi->s_kbytes_written +
6193 		    ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
6194 		      sbi->s_sectors_written_start) >> 1));
6195 	if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
6196 		ext4_free_blocks_count_set(es,
6197 			EXT4_C2B(sbi, percpu_counter_sum_positive(
6198 				&sbi->s_freeclusters_counter)));
6199 	if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
6200 		es->s_free_inodes_count =
6201 			cpu_to_le32(percpu_counter_sum_positive(
6202 				&sbi->s_freeinodes_counter));
6203 	/* Copy error information to the on-disk superblock */
6204 	spin_lock(&sbi->s_error_lock);
6205 	if (sbi->s_add_error_count > 0) {
6206 		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
6207 		if (!es->s_first_error_time && !es->s_first_error_time_hi) {
6208 			__ext4_update_tstamp(&es->s_first_error_time,
6209 					     &es->s_first_error_time_hi,
6210 					     sbi->s_first_error_time);
6211 			strtomem_pad(es->s_first_error_func,
6212 				     sbi->s_first_error_func, 0);
6213 			es->s_first_error_line =
6214 				cpu_to_le32(sbi->s_first_error_line);
6215 			es->s_first_error_ino =
6216 				cpu_to_le32(sbi->s_first_error_ino);
6217 			es->s_first_error_block =
6218 				cpu_to_le64(sbi->s_first_error_block);
6219 			es->s_first_error_errcode =
6220 				ext4_errno_to_code(sbi->s_first_error_code);
6221 		}
6222 		__ext4_update_tstamp(&es->s_last_error_time,
6223 				     &es->s_last_error_time_hi,
6224 				     sbi->s_last_error_time);
6225 		strtomem_pad(es->s_last_error_func, sbi->s_last_error_func, 0);
6226 		es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
6227 		es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
6228 		es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
6229 		es->s_last_error_errcode =
6230 				ext4_errno_to_code(sbi->s_last_error_code);
6231 		/*
6232 		 * Start the daily error reporting function if it hasn't been
6233 		 * started already
6234 		 */
6235 		if (!es->s_error_count)
6236 			mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);
6237 		le32_add_cpu(&es->s_error_count, sbi->s_add_error_count);
6238 		sbi->s_add_error_count = 0;
6239 	}
6240 	spin_unlock(&sbi->s_error_lock);
6241 
6242 	ext4_superblock_csum_set(sb);
6243 	unlock_buffer(sbh);
6244 }
6245 
6246 static int ext4_commit_super(struct super_block *sb)
6247 {
6248 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
6249 
6250 	if (!sbh)
6251 		return -EINVAL;
6252 
6253 	ext4_update_super(sb);
6254 
6255 	lock_buffer(sbh);
6256 	/* Buffer got discarded which means block device got invalidated */
6257 	if (!buffer_mapped(sbh)) {
6258 		unlock_buffer(sbh);
6259 		return -EIO;
6260 	}
6261 
6262 	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
6263 		/*
6264 		 * Oh, dear.  A previous attempt to write the
6265 		 * superblock failed.  This could happen because the
6266 		 * USB device was yanked out.  Or it could happen to
6267 		 * be a transient write error and maybe the block will
6268 		 * be remapped.  Nothing we can do but to retry the
6269 		 * write and hope for the best.
6270 		 */
6271 		ext4_msg(sb, KERN_ERR, "previous I/O error to "
6272 		       "superblock detected");
6273 		clear_buffer_write_io_error(sbh);
6274 		set_buffer_uptodate(sbh);
6275 	}
6276 	get_bh(sbh);
6277 	/* Clear potential dirty bit if it was journalled update */
6278 	clear_buffer_dirty(sbh);
6279 	sbh->b_end_io = end_buffer_write_sync;
6280 	submit_bh(REQ_OP_WRITE | REQ_SYNC |
6281 		  (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
6282 	wait_on_buffer(sbh);
6283 	if (buffer_write_io_error(sbh)) {
6284 		ext4_msg(sb, KERN_ERR, "I/O error while writing "
6285 		       "superblock");
6286 		clear_buffer_write_io_error(sbh);
6287 		set_buffer_uptodate(sbh);
6288 		return -EIO;
6289 	}
6290 	return 0;
6291 }
6292 
6293 /*
6294  * Have we just finished recovery?  If so, and if we are mounting (or
6295  * remounting) the filesystem readonly, then we will end up with a
6296  * consistent fs on disk.  Record that fact.
6297  */
6298 static int ext4_mark_recovery_complete(struct super_block *sb,
6299 				       struct ext4_super_block *es)
6300 {
6301 	int err;
6302 	journal_t *journal = EXT4_SB(sb)->s_journal;
6303 
6304 	if (!ext4_has_feature_journal(sb)) {
6305 		if (journal != NULL) {
6306 			ext4_error(sb, "Journal got removed while the fs was "
6307 				   "mounted!");
6308 			return -EFSCORRUPTED;
6309 		}
6310 		return 0;
6311 	}
6312 	jbd2_journal_lock_updates(journal);
6313 	err = jbd2_journal_flush(journal, 0);
6314 	if (err < 0)
6315 		goto out;
6316 
6317 	if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) ||
6318 	    ext4_has_feature_orphan_present(sb))) {
6319 		if (!ext4_orphan_file_empty(sb)) {
6320 			ext4_error(sb, "Orphan file not empty on read-only fs.");
6321 			err = -EFSCORRUPTED;
6322 			goto out;
6323 		}
6324 		ext4_clear_feature_journal_needs_recovery(sb);
6325 		ext4_clear_feature_orphan_present(sb);
6326 		ext4_commit_super(sb);
6327 	}
6328 out:
6329 	jbd2_journal_unlock_updates(journal);
6330 	return err;
6331 }
6332 
6333 /*
6334  * If we are mounting (or read-write remounting) a filesystem whose journal
6335  * has recorded an error from a previous lifetime, move that error to the
6336  * main filesystem now.
6337  */
6338 static int ext4_clear_journal_err(struct super_block *sb,
6339 				   struct ext4_super_block *es)
6340 {
6341 	journal_t *journal;
6342 	int j_errno;
6343 	const char *errstr;
6344 
6345 	if (!ext4_has_feature_journal(sb)) {
6346 		ext4_error(sb, "Journal got removed while the fs was mounted!");
6347 		return -EFSCORRUPTED;
6348 	}
6349 
6350 	journal = EXT4_SB(sb)->s_journal;
6351 
6352 	/*
6353 	 * Now check for any error status which may have been recorded in the
6354 	 * journal by a prior ext4_error() or ext4_abort()
6355 	 */
6356 
6357 	j_errno = jbd2_journal_errno(journal);
6358 	if (j_errno) {
6359 		char nbuf[16];
6360 
6361 		errstr = ext4_decode_error(sb, j_errno, nbuf);
6362 		ext4_warning(sb, "Filesystem error recorded "
6363 			     "from previous mount: %s", errstr);
6364 
6365 		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
6366 		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
6367 		j_errno = ext4_commit_super(sb);
6368 		if (j_errno)
6369 			return j_errno;
6370 		ext4_warning(sb, "Marked fs in need of filesystem check.");
6371 
6372 		jbd2_journal_clear_err(journal);
6373 		jbd2_journal_update_sb_errno(journal);
6374 	}
6375 	return 0;
6376 }
6377 
6378 /*
6379  * Force the running and committing transactions to commit,
6380  * and wait on the commit.
6381  */
6382 int ext4_force_commit(struct super_block *sb)
6383 {
6384 	return ext4_journal_force_commit(EXT4_SB(sb)->s_journal);
6385 }
6386 
6387 static int ext4_sync_fs(struct super_block *sb, int wait)
6388 {
6389 	int ret = 0;
6390 	tid_t target;
6391 	bool needs_barrier = false;
6392 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6393 
6394 	ret = ext4_emergency_state(sb);
6395 	if (unlikely(ret))
6396 		return ret;
6397 
6398 	trace_ext4_sync_fs(sb, wait);
6399 	flush_workqueue(sbi->rsv_conversion_wq);
6400 	/*
6401 	 * Writeback quota in non-journalled quota case - journalled quota has
6402 	 * no dirty dquots
6403 	 */
6404 	dquot_writeback_dquots(sb, -1);
6405 	/*
6406 	 * Data writeback is possible w/o journal transaction, so barrier must
6407 	 * being sent at the end of the function. But we can skip it if
6408 	 * transaction_commit will do it for us.
6409 	 */
6410 	if (sbi->s_journal) {
6411 		target = jbd2_get_latest_transaction(sbi->s_journal);
6412 		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
6413 		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
6414 			needs_barrier = true;
6415 
6416 		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
6417 			if (wait)
6418 				ret = jbd2_log_wait_commit(sbi->s_journal,
6419 							   target);
6420 		}
6421 	} else if (wait && test_opt(sb, BARRIER))
6422 		needs_barrier = true;
6423 	if (needs_barrier) {
6424 		int err;
6425 		err = blkdev_issue_flush(sb->s_bdev);
6426 		if (!ret)
6427 			ret = err;
6428 	}
6429 
6430 	return ret;
6431 }
6432 
6433 /*
6434  * LVM calls this function before a (read-only) snapshot is created.  This
6435  * gives us a chance to flush the journal completely and mark the fs clean.
6436  *
6437  * Note that only this function cannot bring a filesystem to be in a clean
6438  * state independently. It relies on upper layer to stop all data & metadata
6439  * modifications.
6440  */
6441 static int ext4_freeze(struct super_block *sb)
6442 {
6443 	int error = 0;
6444 	journal_t *journal = EXT4_SB(sb)->s_journal;
6445 
6446 	if (journal) {
6447 		/* Now we set up the journal barrier. */
6448 		jbd2_journal_lock_updates(journal);
6449 
6450 		/*
6451 		 * Don't clear the needs_recovery flag if we failed to
6452 		 * flush the journal.
6453 		 */
6454 		error = jbd2_journal_flush(journal, 0);
6455 		if (error < 0)
6456 			goto out;
6457 
6458 		/* Journal blocked and flushed, clear needs_recovery flag. */
6459 		ext4_clear_feature_journal_needs_recovery(sb);
6460 		if (ext4_orphan_file_empty(sb))
6461 			ext4_clear_feature_orphan_present(sb);
6462 	}
6463 
6464 	error = ext4_commit_super(sb);
6465 out:
6466 	if (journal)
6467 		/* we rely on upper layer to stop further updates */
6468 		jbd2_journal_unlock_updates(journal);
6469 	return error;
6470 }
6471 
6472 /*
6473  * Called by LVM after the snapshot is done.  We need to reset the RECOVER
6474  * flag here, even though the filesystem is not technically dirty yet.
6475  */
6476 static int ext4_unfreeze(struct super_block *sb)
6477 {
6478 	if (ext4_emergency_state(sb))
6479 		return 0;
6480 
6481 	if (EXT4_SB(sb)->s_journal) {
6482 		/* Reset the needs_recovery flag before the fs is unlocked. */
6483 		ext4_set_feature_journal_needs_recovery(sb);
6484 		if (ext4_has_feature_orphan_file(sb))
6485 			ext4_set_feature_orphan_present(sb);
6486 	}
6487 
6488 	ext4_commit_super(sb);
6489 	return 0;
6490 }
6491 
6492 /*
6493  * Structure to save mount options for ext4_remount's benefit
6494  */
6495 struct ext4_mount_options {
6496 	unsigned long s_mount_opt;
6497 	unsigned long s_mount_opt2;
6498 	kuid_t s_resuid;
6499 	kgid_t s_resgid;
6500 	unsigned long s_commit_interval;
6501 	u32 s_min_batch_time, s_max_batch_time;
6502 #ifdef CONFIG_QUOTA
6503 	int s_jquota_fmt;
6504 	char *s_qf_names[EXT4_MAXQUOTAS];
6505 #endif
6506 };
6507 
6508 static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
6509 {
6510 	struct ext4_fs_context *ctx = fc->fs_private;
6511 	struct ext4_super_block *es;
6512 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6513 	unsigned long old_sb_flags;
6514 	struct ext4_mount_options old_opts;
6515 	ext4_group_t g;
6516 	int err = 0;
6517 	int alloc_ctx;
6518 #ifdef CONFIG_QUOTA
6519 	int enable_quota = 0;
6520 	int i, j;
6521 	char *to_free[EXT4_MAXQUOTAS];
6522 #endif
6523 
6524 
6525 	/* Store the original options */
6526 	old_sb_flags = sb->s_flags;
6527 	old_opts.s_mount_opt = sbi->s_mount_opt;
6528 	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
6529 	old_opts.s_resuid = sbi->s_resuid;
6530 	old_opts.s_resgid = sbi->s_resgid;
6531 	old_opts.s_commit_interval = sbi->s_commit_interval;
6532 	old_opts.s_min_batch_time = sbi->s_min_batch_time;
6533 	old_opts.s_max_batch_time = sbi->s_max_batch_time;
6534 #ifdef CONFIG_QUOTA
6535 	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
6536 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
6537 		if (sbi->s_qf_names[i]) {
6538 			char *qf_name = get_qf_name(sb, sbi, i);
6539 
6540 			old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
6541 			if (!old_opts.s_qf_names[i]) {
6542 				for (j = 0; j < i; j++)
6543 					kfree(old_opts.s_qf_names[j]);
6544 				return -ENOMEM;
6545 			}
6546 		} else
6547 			old_opts.s_qf_names[i] = NULL;
6548 #endif
6549 	if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) {
6550 		if (sbi->s_journal && sbi->s_journal->j_task->io_context)
6551 			ctx->journal_ioprio =
6552 				sbi->s_journal->j_task->io_context->ioprio;
6553 		else
6554 			ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
6555 
6556 	}
6557 
6558 	if ((ctx->spec & EXT4_SPEC_s_stripe) &&
6559 	    ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
6560 		ext4_msg(sb, KERN_WARNING,
6561 			 "stripe (%lu) is not aligned with cluster size (%u), "
6562 			 "stripe is disabled",
6563 			 ctx->s_stripe, sbi->s_cluster_ratio);
6564 		ctx->s_stripe = 0;
6565 	}
6566 
6567 	/*
6568 	 * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
6569 	 * two calls to ext4_should_dioread_nolock() to return inconsistent
6570 	 * values, triggering WARN_ON in ext4_add_complete_io(). we grab
6571 	 * here s_writepages_rwsem to avoid race between writepages ops and
6572 	 * remount.
6573 	 */
6574 	alloc_ctx = ext4_writepages_down_write(sb);
6575 	ext4_apply_options(fc, sb);
6576 	ext4_writepages_up_write(sb, alloc_ctx);
6577 
6578 	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
6579 	    test_opt(sb, JOURNAL_CHECKSUM)) {
6580 		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
6581 			 "during remount not supported; ignoring");
6582 		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
6583 	}
6584 
6585 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
6586 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
6587 			ext4_msg(sb, KERN_ERR, "can't mount with "
6588 				 "both data=journal and delalloc");
6589 			err = -EINVAL;
6590 			goto restore_opts;
6591 		}
6592 		if (test_opt(sb, DIOREAD_NOLOCK)) {
6593 			ext4_msg(sb, KERN_ERR, "can't mount with "
6594 				 "both data=journal and dioread_nolock");
6595 			err = -EINVAL;
6596 			goto restore_opts;
6597 		}
6598 	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
6599 		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
6600 			ext4_msg(sb, KERN_ERR, "can't mount with "
6601 				"journal_async_commit in data=ordered mode");
6602 			err = -EINVAL;
6603 			goto restore_opts;
6604 		}
6605 	}
6606 
6607 	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
6608 		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
6609 		err = -EINVAL;
6610 		goto restore_opts;
6611 	}
6612 
6613 	if ((old_opts.s_mount_opt & EXT4_MOUNT_DELALLOC) &&
6614 	    !test_opt(sb, DELALLOC)) {
6615 		ext4_msg(sb, KERN_ERR, "can't disable delalloc during remount");
6616 		err = -EINVAL;
6617 		goto restore_opts;
6618 	}
6619 
6620 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
6621 		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
6622 
6623 	es = sbi->s_es;
6624 
6625 	if (sbi->s_journal) {
6626 		ext4_init_journal_params(sb, sbi->s_journal);
6627 		set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
6628 	}
6629 
6630 	/* Flush outstanding errors before changing fs state */
6631 	flush_work(&sbi->s_sb_upd_work);
6632 
6633 	if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
6634 		if (ext4_emergency_state(sb)) {
6635 			err = -EROFS;
6636 			goto restore_opts;
6637 		}
6638 
6639 		if (fc->sb_flags & SB_RDONLY) {
6640 			err = sync_filesystem(sb);
6641 			if (err < 0)
6642 				goto restore_opts;
6643 			err = dquot_suspend(sb, -1);
6644 			if (err < 0)
6645 				goto restore_opts;
6646 
6647 			/*
6648 			 * First of all, the unconditional stuff we have to do
6649 			 * to disable replay of the journal when we next remount
6650 			 */
6651 			sb->s_flags |= SB_RDONLY;
6652 
6653 			/*
6654 			 * OK, test if we are remounting a valid rw partition
6655 			 * readonly, and if so set the rdonly flag and then
6656 			 * mark the partition as valid again.
6657 			 */
6658 			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
6659 			    (sbi->s_mount_state & EXT4_VALID_FS))
6660 				es->s_state = cpu_to_le16(sbi->s_mount_state);
6661 
6662 			if (sbi->s_journal) {
6663 				/*
6664 				 * We let remount-ro finish even if marking fs
6665 				 * as clean failed...
6666 				 */
6667 				ext4_mark_recovery_complete(sb, es);
6668 			}
6669 		} else {
6670 			/* Make sure we can mount this feature set readwrite */
6671 			if (ext4_has_feature_readonly(sb) ||
6672 			    !ext4_feature_set_ok(sb, 0)) {
6673 				err = -EROFS;
6674 				goto restore_opts;
6675 			}
6676 			/*
6677 			 * Make sure the group descriptor checksums
6678 			 * are sane.  If they aren't, refuse to remount r/w.
6679 			 */
6680 			for (g = 0; g < sbi->s_groups_count; g++) {
6681 				struct ext4_group_desc *gdp =
6682 					ext4_get_group_desc(sb, g, NULL);
6683 
6684 				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
6685 					ext4_msg(sb, KERN_ERR,
6686 	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
6687 		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
6688 					       le16_to_cpu(gdp->bg_checksum));
6689 					err = -EFSBADCRC;
6690 					goto restore_opts;
6691 				}
6692 			}
6693 
6694 			/*
6695 			 * If we have an unprocessed orphan list hanging
6696 			 * around from a previously readonly bdev mount,
6697 			 * require a full umount/remount for now.
6698 			 */
6699 			if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) {
6700 				ext4_msg(sb, KERN_WARNING, "Couldn't "
6701 				       "remount RDWR because of unprocessed "
6702 				       "orphan inode list.  Please "
6703 				       "umount/remount instead");
6704 				err = -EINVAL;
6705 				goto restore_opts;
6706 			}
6707 
6708 			/*
6709 			 * Mounting a RDONLY partition read-write, so reread
6710 			 * and store the current valid flag.  (It may have
6711 			 * been changed by e2fsck since we originally mounted
6712 			 * the partition.)
6713 			 */
6714 			if (sbi->s_journal) {
6715 				err = ext4_clear_journal_err(sb, es);
6716 				if (err)
6717 					goto restore_opts;
6718 			}
6719 			sbi->s_mount_state = (le16_to_cpu(es->s_state) &
6720 					      ~EXT4_FC_REPLAY);
6721 
6722 			err = ext4_setup_super(sb, es, 0);
6723 			if (err)
6724 				goto restore_opts;
6725 
6726 			sb->s_flags &= ~SB_RDONLY;
6727 			if (ext4_has_feature_mmp(sb)) {
6728 				err = ext4_multi_mount_protect(sb,
6729 						le64_to_cpu(es->s_mmp_block));
6730 				if (err)
6731 					goto restore_opts;
6732 			}
6733 #ifdef CONFIG_QUOTA
6734 			enable_quota = 1;
6735 #endif
6736 		}
6737 	}
6738 
6739 	/*
6740 	 * Handle creation of system zone data early because it can fail.
6741 	 * Releasing of existing data is done when we are sure remount will
6742 	 * succeed.
6743 	 */
6744 	if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
6745 		err = ext4_setup_system_zone(sb);
6746 		if (err)
6747 			goto restore_opts;
6748 	}
6749 
6750 	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
6751 		err = ext4_commit_super(sb);
6752 		if (err)
6753 			goto restore_opts;
6754 	}
6755 
6756 #ifdef CONFIG_QUOTA
6757 	if (enable_quota) {
6758 		if (sb_any_quota_suspended(sb))
6759 			dquot_resume(sb, -1);
6760 		else if (ext4_has_feature_quota(sb)) {
6761 			err = ext4_enable_quotas(sb);
6762 			if (err)
6763 				goto restore_opts;
6764 		}
6765 	}
6766 	/* Release old quota file names */
6767 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
6768 		kfree(old_opts.s_qf_names[i]);
6769 #endif
6770 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6771 		ext4_release_system_zone(sb);
6772 
6773 	/*
6774 	 * Reinitialize lazy itable initialization thread based on
6775 	 * current settings
6776 	 */
6777 	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
6778 		ext4_unregister_li_request(sb);
6779 	else {
6780 		ext4_group_t first_not_zeroed;
6781 		first_not_zeroed = ext4_has_uninit_itable(sb);
6782 		ext4_register_li_request(sb, first_not_zeroed);
6783 	}
6784 
6785 	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6786 		ext4_stop_mmpd(sbi);
6787 
6788 	/*
6789 	 * Handle aborting the filesystem as the last thing during remount to
6790 	 * avoid obsure errors during remount when some option changes fail to
6791 	 * apply due to shutdown filesystem.
6792 	 */
6793 	if (test_opt2(sb, ABORT))
6794 		ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
6795 
6796 	return 0;
6797 
6798 restore_opts:
6799 	/*
6800 	 * If there was a failing r/w to ro transition, we may need to
6801 	 * re-enable quota
6802 	 */
6803 	if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
6804 	    sb_any_quota_suspended(sb))
6805 		dquot_resume(sb, -1);
6806 
6807 	alloc_ctx = ext4_writepages_down_write(sb);
6808 	sb->s_flags = old_sb_flags;
6809 	sbi->s_mount_opt = old_opts.s_mount_opt;
6810 	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
6811 	sbi->s_resuid = old_opts.s_resuid;
6812 	sbi->s_resgid = old_opts.s_resgid;
6813 	sbi->s_commit_interval = old_opts.s_commit_interval;
6814 	sbi->s_min_batch_time = old_opts.s_min_batch_time;
6815 	sbi->s_max_batch_time = old_opts.s_max_batch_time;
6816 	ext4_writepages_up_write(sb, alloc_ctx);
6817 
6818 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6819 		ext4_release_system_zone(sb);
6820 #ifdef CONFIG_QUOTA
6821 	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
6822 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
6823 		to_free[i] = get_qf_name(sb, sbi, i);
6824 		rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
6825 	}
6826 	synchronize_rcu();
6827 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
6828 		kfree(to_free[i]);
6829 #endif
6830 	if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6831 		ext4_stop_mmpd(sbi);
6832 	return err;
6833 }
6834 
6835 static int ext4_reconfigure(struct fs_context *fc)
6836 {
6837 	struct super_block *sb = fc->root->d_sb;
6838 	int ret;
6839 	bool old_ro = sb_rdonly(sb);
6840 
6841 	fc->s_fs_info = EXT4_SB(sb);
6842 
6843 	ret = ext4_check_opt_consistency(fc, sb);
6844 	if (ret < 0)
6845 		return ret;
6846 
6847 	ret = __ext4_remount(fc, sb);
6848 	if (ret < 0)
6849 		return ret;
6850 
6851 	ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.",
6852 		 &sb->s_uuid,
6853 		 (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
6854 
6855 	return 0;
6856 }
6857 
6858 #ifdef CONFIG_QUOTA
6859 static int ext4_statfs_project(struct super_block *sb,
6860 			       kprojid_t projid, struct kstatfs *buf)
6861 {
6862 	struct kqid qid;
6863 	struct dquot *dquot;
6864 	u64 limit;
6865 	u64 curblock;
6866 
6867 	qid = make_kqid_projid(projid);
6868 	dquot = dqget(sb, qid);
6869 	if (IS_ERR(dquot))
6870 		return PTR_ERR(dquot);
6871 	spin_lock(&dquot->dq_dqb_lock);
6872 
6873 	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
6874 			     dquot->dq_dqb.dqb_bhardlimit);
6875 	limit >>= sb->s_blocksize_bits;
6876 
6877 	if (limit) {
6878 		uint64_t	remaining = 0;
6879 
6880 		curblock = (dquot->dq_dqb.dqb_curspace +
6881 			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
6882 		if (limit > curblock)
6883 			remaining = limit - curblock;
6884 
6885 		buf->f_blocks = min(buf->f_blocks, limit);
6886 		buf->f_bfree = min(buf->f_bfree, remaining);
6887 		buf->f_bavail = min(buf->f_bavail, remaining);
6888 	}
6889 
6890 	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
6891 			     dquot->dq_dqb.dqb_ihardlimit);
6892 	if (limit) {
6893 		uint64_t	remaining = 0;
6894 
6895 		if (limit > dquot->dq_dqb.dqb_curinodes)
6896 			remaining = limit - dquot->dq_dqb.dqb_curinodes;
6897 
6898 		buf->f_files = min(buf->f_files, limit);
6899 		buf->f_ffree = min(buf->f_ffree, remaining);
6900 	}
6901 
6902 	spin_unlock(&dquot->dq_dqb_lock);
6903 	dqput(dquot);
6904 	return 0;
6905 }
6906 #endif
6907 
6908 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6909 {
6910 	struct super_block *sb = dentry->d_sb;
6911 	struct ext4_sb_info *sbi = EXT4_SB(sb);
6912 	struct ext4_super_block *es = sbi->s_es;
6913 	ext4_fsblk_t overhead = 0, resv_blocks;
6914 	s64 bfree;
6915 	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6916 
6917 	if (!test_opt(sb, MINIX_DF))
6918 		overhead = sbi->s_overhead;
6919 
6920 	buf->f_type = EXT4_SUPER_MAGIC;
6921 	buf->f_bsize = sb->s_blocksize;
6922 	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6923 	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
6924 		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6925 	/* prevent underflow in case that few free space is available */
6926 	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
6927 	buf->f_bavail = buf->f_bfree -
6928 			(ext4_r_blocks_count(es) + resv_blocks);
6929 	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6930 		buf->f_bavail = 0;
6931 	buf->f_files = le32_to_cpu(es->s_inodes_count);
6932 	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6933 	buf->f_namelen = EXT4_NAME_LEN;
6934 	buf->f_fsid = uuid_to_fsid(es->s_uuid);
6935 
6936 #ifdef CONFIG_QUOTA
6937 	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
6938 	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
6939 		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
6940 #endif
6941 	return 0;
6942 }
6943 
6944 
6945 #ifdef CONFIG_QUOTA
6946 
6947 /*
6948  * Helper functions so that transaction is started before we acquire dqio_sem
6949  * to keep correct lock ordering of transaction > dqio_sem
6950  */
6951 static inline struct inode *dquot_to_inode(struct dquot *dquot)
6952 {
6953 	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6954 }
6955 
6956 static int ext4_write_dquot(struct dquot *dquot)
6957 {
6958 	int ret, err;
6959 	handle_t *handle;
6960 	struct inode *inode;
6961 
6962 	inode = dquot_to_inode(dquot);
6963 	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
6964 				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
6965 	if (IS_ERR(handle))
6966 		return PTR_ERR(handle);
6967 	ret = dquot_commit(dquot);
6968 	if (ret < 0)
6969 		ext4_error_err(dquot->dq_sb, -ret,
6970 			       "Failed to commit dquot type %d",
6971 			       dquot->dq_id.type);
6972 	err = ext4_journal_stop(handle);
6973 	if (!ret)
6974 		ret = err;
6975 	return ret;
6976 }
6977 
6978 static int ext4_acquire_dquot(struct dquot *dquot)
6979 {
6980 	int ret, err;
6981 	handle_t *handle;
6982 
6983 	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6984 				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
6985 	if (IS_ERR(handle))
6986 		return PTR_ERR(handle);
6987 	ret = dquot_acquire(dquot);
6988 	if (ret < 0)
6989 		ext4_error_err(dquot->dq_sb, -ret,
6990 			      "Failed to acquire dquot type %d",
6991 			      dquot->dq_id.type);
6992 	err = ext4_journal_stop(handle);
6993 	if (!ret)
6994 		ret = err;
6995 	return ret;
6996 }
6997 
6998 static int ext4_release_dquot(struct dquot *dquot)
6999 {
7000 	int ret, err;
7001 	handle_t *handle;
7002 	bool freeze_protected = false;
7003 
7004 	/*
7005 	 * Trying to sb_start_intwrite() in a running transaction
7006 	 * can result in a deadlock. Further, running transactions
7007 	 * are already protected from freezing.
7008 	 */
7009 	if (!ext4_journal_current_handle()) {
7010 		sb_start_intwrite(dquot->dq_sb);
7011 		freeze_protected = true;
7012 	}
7013 
7014 	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
7015 				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
7016 	if (IS_ERR(handle)) {
7017 		/* Release dquot anyway to avoid endless cycle in dqput() */
7018 		dquot_release(dquot);
7019 		if (freeze_protected)
7020 			sb_end_intwrite(dquot->dq_sb);
7021 		return PTR_ERR(handle);
7022 	}
7023 	ret = dquot_release(dquot);
7024 	if (ret < 0)
7025 		ext4_error_err(dquot->dq_sb, -ret,
7026 			       "Failed to release dquot type %d",
7027 			       dquot->dq_id.type);
7028 	err = ext4_journal_stop(handle);
7029 	if (!ret)
7030 		ret = err;
7031 
7032 	if (freeze_protected)
7033 		sb_end_intwrite(dquot->dq_sb);
7034 
7035 	return ret;
7036 }
7037 
7038 static int ext4_mark_dquot_dirty(struct dquot *dquot)
7039 {
7040 	struct super_block *sb = dquot->dq_sb;
7041 
7042 	if (ext4_is_quota_journalled(sb)) {
7043 		dquot_mark_dquot_dirty(dquot);
7044 		return ext4_write_dquot(dquot);
7045 	} else {
7046 		return dquot_mark_dquot_dirty(dquot);
7047 	}
7048 }
7049 
7050 static int ext4_write_info(struct super_block *sb, int type)
7051 {
7052 	int ret, err;
7053 	handle_t *handle;
7054 
7055 	/* Data block + inode block */
7056 	handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
7057 	if (IS_ERR(handle))
7058 		return PTR_ERR(handle);
7059 	ret = dquot_commit_info(sb, type);
7060 	err = ext4_journal_stop(handle);
7061 	if (!ret)
7062 		ret = err;
7063 	return ret;
7064 }
7065 
7066 static void lockdep_set_quota_inode(struct inode *inode, int subclass)
7067 {
7068 	struct ext4_inode_info *ei = EXT4_I(inode);
7069 
7070 	/* The first argument of lockdep_set_subclass has to be
7071 	 * *exactly* the same as the argument to init_rwsem() --- in
7072 	 * this case, in init_once() --- or lockdep gets unhappy
7073 	 * because the name of the lock is set using the
7074 	 * stringification of the argument to init_rwsem().
7075 	 */
7076 	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
7077 	lockdep_set_subclass(&ei->i_data_sem, subclass);
7078 }
7079 
7080 /*
7081  * Standard function to be called on quota_on
7082  */
7083 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
7084 			 const struct path *path)
7085 {
7086 	int err;
7087 
7088 	if (!test_opt(sb, QUOTA))
7089 		return -EINVAL;
7090 
7091 	/* Quotafile not on the same filesystem? */
7092 	if (path->dentry->d_sb != sb)
7093 		return -EXDEV;
7094 
7095 	/* Quota already enabled for this file? */
7096 	if (IS_NOQUOTA(d_inode(path->dentry)))
7097 		return -EBUSY;
7098 
7099 	/* Journaling quota? */
7100 	if (EXT4_SB(sb)->s_qf_names[type]) {
7101 		/* Quotafile not in fs root? */
7102 		if (path->dentry->d_parent != sb->s_root)
7103 			ext4_msg(sb, KERN_WARNING,
7104 				"Quota file not on filesystem root. "
7105 				"Journaled quota will not work");
7106 		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
7107 	} else {
7108 		/*
7109 		 * Clear the flag just in case mount options changed since
7110 		 * last time.
7111 		 */
7112 		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
7113 	}
7114 
7115 	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
7116 	err = dquot_quota_on(sb, type, format_id, path);
7117 	if (!err) {
7118 		struct inode *inode = d_inode(path->dentry);
7119 		handle_t *handle;
7120 
7121 		/*
7122 		 * Set inode flags to prevent userspace from messing with quota
7123 		 * files. If this fails, we return success anyway since quotas
7124 		 * are already enabled and this is not a hard failure.
7125 		 */
7126 		inode_lock(inode);
7127 		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
7128 		if (IS_ERR(handle))
7129 			goto unlock_inode;
7130 		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
7131 		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
7132 				S_NOATIME | S_IMMUTABLE);
7133 		err = ext4_mark_inode_dirty(handle, inode);
7134 		ext4_journal_stop(handle);
7135 	unlock_inode:
7136 		inode_unlock(inode);
7137 		if (err)
7138 			dquot_quota_off(sb, type);
7139 	}
7140 	if (err)
7141 		lockdep_set_quota_inode(path->dentry->d_inode,
7142 					     I_DATA_SEM_NORMAL);
7143 	return err;
7144 }
7145 
7146 static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
7147 {
7148 	switch (type) {
7149 	case USRQUOTA:
7150 		return qf_inum == EXT4_USR_QUOTA_INO;
7151 	case GRPQUOTA:
7152 		return qf_inum == EXT4_GRP_QUOTA_INO;
7153 	case PRJQUOTA:
7154 		return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
7155 	default:
7156 		BUG();
7157 	}
7158 }
7159 
7160 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
7161 			     unsigned int flags)
7162 {
7163 	int err;
7164 	struct inode *qf_inode;
7165 	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
7166 		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
7167 		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
7168 		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
7169 	};
7170 
7171 	BUG_ON(!ext4_has_feature_quota(sb));
7172 
7173 	if (!qf_inums[type])
7174 		return -EPERM;
7175 
7176 	if (!ext4_check_quota_inum(type, qf_inums[type])) {
7177 		ext4_error(sb, "Bad quota inum: %lu, type: %d",
7178 				qf_inums[type], type);
7179 		return -EUCLEAN;
7180 	}
7181 
7182 	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
7183 	if (IS_ERR(qf_inode)) {
7184 		ext4_error(sb, "Bad quota inode: %lu, type: %d",
7185 				qf_inums[type], type);
7186 		return PTR_ERR(qf_inode);
7187 	}
7188 
7189 	/* Don't account quota for quota files to avoid recursion */
7190 	qf_inode->i_flags |= S_NOQUOTA;
7191 	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
7192 	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
7193 	if (err)
7194 		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
7195 	iput(qf_inode);
7196 
7197 	return err;
7198 }
7199 
7200 /* Enable usage tracking for all quota types. */
7201 int ext4_enable_quotas(struct super_block *sb)
7202 {
7203 	int type, err = 0;
7204 	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
7205 		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
7206 		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
7207 		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
7208 	};
7209 	bool quota_mopt[EXT4_MAXQUOTAS] = {
7210 		test_opt(sb, USRQUOTA),
7211 		test_opt(sb, GRPQUOTA),
7212 		test_opt(sb, PRJQUOTA),
7213 	};
7214 
7215 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
7216 	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
7217 		if (qf_inums[type]) {
7218 			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
7219 				DQUOT_USAGE_ENABLED |
7220 				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
7221 			if (err) {
7222 				ext4_warning(sb,
7223 					"Failed to enable quota tracking "
7224 					"(type=%d, err=%d, ino=%lu). "
7225 					"Please run e2fsck to fix.", type,
7226 					err, qf_inums[type]);
7227 
7228 				ext4_quotas_off(sb, type);
7229 				return err;
7230 			}
7231 		}
7232 	}
7233 	return 0;
7234 }
7235 
7236 static int ext4_quota_off(struct super_block *sb, int type)
7237 {
7238 	struct inode *inode = sb_dqopt(sb)->files[type];
7239 	handle_t *handle;
7240 	int err;
7241 
7242 	/* Force all delayed allocation blocks to be allocated.
7243 	 * Caller already holds s_umount sem */
7244 	if (test_opt(sb, DELALLOC))
7245 		sync_filesystem(sb);
7246 
7247 	if (!inode || !igrab(inode))
7248 		goto out;
7249 
7250 	err = dquot_quota_off(sb, type);
7251 	if (err || ext4_has_feature_quota(sb))
7252 		goto out_put;
7253 	/*
7254 	 * When the filesystem was remounted read-only first, we cannot cleanup
7255 	 * inode flags here. Bad luck but people should be using QUOTA feature
7256 	 * these days anyway.
7257 	 */
7258 	if (sb_rdonly(sb))
7259 		goto out_put;
7260 
7261 	inode_lock(inode);
7262 	/*
7263 	 * Update modification times of quota files when userspace can
7264 	 * start looking at them. If we fail, we return success anyway since
7265 	 * this is not a hard failure and quotas are already disabled.
7266 	 */
7267 	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
7268 	if (IS_ERR(handle)) {
7269 		err = PTR_ERR(handle);
7270 		goto out_unlock;
7271 	}
7272 	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
7273 	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
7274 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
7275 	err = ext4_mark_inode_dirty(handle, inode);
7276 	ext4_journal_stop(handle);
7277 out_unlock:
7278 	inode_unlock(inode);
7279 out_put:
7280 	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
7281 	iput(inode);
7282 	return err;
7283 out:
7284 	return dquot_quota_off(sb, type);
7285 }
7286 
7287 /* Read data from quotafile - avoid pagecache and such because we cannot afford
7288  * acquiring the locks... As quota files are never truncated and quota code
7289  * itself serializes the operations (and no one else should touch the files)
7290  * we don't have to be afraid of races */
7291 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
7292 			       size_t len, loff_t off)
7293 {
7294 	struct inode *inode = sb_dqopt(sb)->files[type];
7295 	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
7296 	int offset = off & (sb->s_blocksize - 1);
7297 	int tocopy;
7298 	size_t toread;
7299 	struct buffer_head *bh;
7300 	loff_t i_size = i_size_read(inode);
7301 
7302 	if (off > i_size)
7303 		return 0;
7304 	if (off+len > i_size)
7305 		len = i_size-off;
7306 	toread = len;
7307 	while (toread > 0) {
7308 		tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
7309 		bh = ext4_bread(NULL, inode, blk, 0);
7310 		if (IS_ERR(bh))
7311 			return PTR_ERR(bh);
7312 		if (!bh)	/* A hole? */
7313 			memset(data, 0, tocopy);
7314 		else
7315 			memcpy(data, bh->b_data+offset, tocopy);
7316 		brelse(bh);
7317 		offset = 0;
7318 		toread -= tocopy;
7319 		data += tocopy;
7320 		blk++;
7321 	}
7322 	return len;
7323 }
7324 
7325 /* Write to quotafile (we know the transaction is already started and has
7326  * enough credits) */
7327 static ssize_t ext4_quota_write(struct super_block *sb, int type,
7328 				const char *data, size_t len, loff_t off)
7329 {
7330 	struct inode *inode = sb_dqopt(sb)->files[type];
7331 	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
7332 	int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
7333 	int retries = 0;
7334 	struct buffer_head *bh;
7335 	handle_t *handle = journal_current_handle();
7336 
7337 	if (!handle) {
7338 		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
7339 			" cancelled because transaction is not started",
7340 			(unsigned long long)off, (unsigned long long)len);
7341 		return -EIO;
7342 	}
7343 	/*
7344 	 * Since we account only one data block in transaction credits,
7345 	 * then it is impossible to cross a block boundary.
7346 	 */
7347 	if (sb->s_blocksize - offset < len) {
7348 		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
7349 			" cancelled because not block aligned",
7350 			(unsigned long long)off, (unsigned long long)len);
7351 		return -EIO;
7352 	}
7353 
7354 	do {
7355 		bh = ext4_bread(handle, inode, blk,
7356 				EXT4_GET_BLOCKS_CREATE |
7357 				EXT4_GET_BLOCKS_METADATA_NOFAIL);
7358 	} while (PTR_ERR(bh) == -ENOSPC &&
7359 		 ext4_should_retry_alloc(inode->i_sb, &retries));
7360 	if (IS_ERR(bh))
7361 		return PTR_ERR(bh);
7362 	if (!bh)
7363 		goto out;
7364 	BUFFER_TRACE(bh, "get write access");
7365 	err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
7366 	if (err) {
7367 		brelse(bh);
7368 		return err;
7369 	}
7370 	lock_buffer(bh);
7371 	memcpy(bh->b_data+offset, data, len);
7372 	flush_dcache_folio(bh->b_folio);
7373 	unlock_buffer(bh);
7374 	err = ext4_handle_dirty_metadata(handle, NULL, bh);
7375 	brelse(bh);
7376 out:
7377 	if (inode->i_size < off + len) {
7378 		i_size_write(inode, off + len);
7379 		EXT4_I(inode)->i_disksize = inode->i_size;
7380 		err2 = ext4_mark_inode_dirty(handle, inode);
7381 		if (unlikely(err2 && !err))
7382 			err = err2;
7383 	}
7384 	return err ? err : len;
7385 }
7386 #endif
7387 
7388 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
7389 static inline void register_as_ext2(void)
7390 {
7391 	int err = register_filesystem(&ext2_fs_type);
7392 	if (err)
7393 		printk(KERN_WARNING
7394 		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
7395 }
7396 
7397 static inline void unregister_as_ext2(void)
7398 {
7399 	unregister_filesystem(&ext2_fs_type);
7400 }
7401 
7402 static inline int ext2_feature_set_ok(struct super_block *sb)
7403 {
7404 	if (ext4_has_unknown_ext2_incompat_features(sb))
7405 		return 0;
7406 	if (sb_rdonly(sb))
7407 		return 1;
7408 	if (ext4_has_unknown_ext2_ro_compat_features(sb))
7409 		return 0;
7410 	return 1;
7411 }
7412 #else
7413 static inline void register_as_ext2(void) { }
7414 static inline void unregister_as_ext2(void) { }
7415 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
7416 #endif
7417 
7418 static inline void register_as_ext3(void)
7419 {
7420 	int err = register_filesystem(&ext3_fs_type);
7421 	if (err)
7422 		printk(KERN_WARNING
7423 		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
7424 }
7425 
7426 static inline void unregister_as_ext3(void)
7427 {
7428 	unregister_filesystem(&ext3_fs_type);
7429 }
7430 
7431 static inline int ext3_feature_set_ok(struct super_block *sb)
7432 {
7433 	if (ext4_has_unknown_ext3_incompat_features(sb))
7434 		return 0;
7435 	if (!ext4_has_feature_journal(sb))
7436 		return 0;
7437 	if (sb_rdonly(sb))
7438 		return 1;
7439 	if (ext4_has_unknown_ext3_ro_compat_features(sb))
7440 		return 0;
7441 	return 1;
7442 }
7443 
7444 static void ext4_kill_sb(struct super_block *sb)
7445 {
7446 	struct ext4_sb_info *sbi = EXT4_SB(sb);
7447 	struct file *bdev_file = sbi ? sbi->s_journal_bdev_file : NULL;
7448 
7449 	kill_block_super(sb);
7450 
7451 	if (bdev_file)
7452 		bdev_fput(bdev_file);
7453 }
7454 
7455 static struct file_system_type ext4_fs_type = {
7456 	.owner			= THIS_MODULE,
7457 	.name			= "ext4",
7458 	.init_fs_context	= ext4_init_fs_context,
7459 	.parameters		= ext4_param_specs,
7460 	.kill_sb		= ext4_kill_sb,
7461 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
7462 				  FS_LBS,
7463 };
7464 MODULE_ALIAS_FS("ext4");
7465 
7466 static int __init ext4_init_fs(void)
7467 {
7468 	int err;
7469 
7470 	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
7471 	ext4_li_info = NULL;
7472 
7473 	/* Build-time check for flags consistency */
7474 	ext4_check_flag_values();
7475 
7476 	err = ext4_init_es();
7477 	if (err)
7478 		return err;
7479 
7480 	err = ext4_init_pending();
7481 	if (err)
7482 		goto out7;
7483 
7484 	err = ext4_init_post_read_processing();
7485 	if (err)
7486 		goto out6;
7487 
7488 	err = ext4_init_pageio();
7489 	if (err)
7490 		goto out5;
7491 
7492 	err = ext4_init_system_zone();
7493 	if (err)
7494 		goto out4;
7495 
7496 	err = ext4_init_sysfs();
7497 	if (err)
7498 		goto out3;
7499 
7500 	err = ext4_init_mballoc();
7501 	if (err)
7502 		goto out2;
7503 	err = init_inodecache();
7504 	if (err)
7505 		goto out1;
7506 
7507 	err = ext4_fc_init_dentry_cache();
7508 	if (err)
7509 		goto out05;
7510 
7511 	register_as_ext3();
7512 	register_as_ext2();
7513 	err = register_filesystem(&ext4_fs_type);
7514 	if (err)
7515 		goto out;
7516 
7517 	return 0;
7518 out:
7519 	unregister_as_ext2();
7520 	unregister_as_ext3();
7521 	ext4_fc_destroy_dentry_cache();
7522 out05:
7523 	destroy_inodecache();
7524 out1:
7525 	ext4_exit_mballoc();
7526 out2:
7527 	ext4_exit_sysfs();
7528 out3:
7529 	ext4_exit_system_zone();
7530 out4:
7531 	ext4_exit_pageio();
7532 out5:
7533 	ext4_exit_post_read_processing();
7534 out6:
7535 	ext4_exit_pending();
7536 out7:
7537 	ext4_exit_es();
7538 
7539 	return err;
7540 }
7541 
7542 static void __exit ext4_exit_fs(void)
7543 {
7544 	ext4_destroy_lazyinit_thread();
7545 	unregister_as_ext2();
7546 	unregister_as_ext3();
7547 	unregister_filesystem(&ext4_fs_type);
7548 	ext4_fc_destroy_dentry_cache();
7549 	destroy_inodecache();
7550 	ext4_exit_mballoc();
7551 	ext4_exit_sysfs();
7552 	ext4_exit_system_zone();
7553 	ext4_exit_pageio();
7554 	ext4_exit_post_read_processing();
7555 	ext4_exit_es();
7556 	ext4_exit_pending();
7557 }
7558 
7559 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
7560 MODULE_DESCRIPTION("Fourth Extended Filesystem");
7561 MODULE_LICENSE("GPL");
7562 module_init(ext4_init_fs)
7563 module_exit(ext4_exit_fs)
7564