1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/super.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * Big-endian to little-endian byte-swapping/bitmaps by
17 * David S. Miller (davem@caip.rutgers.edu), 1995
18 */
19
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/parser.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/vfs.h>
33 #include <linux/random.h>
34 #include <linux/mount.h>
35 #include <linux/namei.h>
36 #include <linux/quotaops.h>
37 #include <linux/seq_file.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <linux/dax.h>
42 #include <linux/uaccess.h>
43 #include <linux/iversion.h>
44 #include <linux/unicode.h>
45 #include <linux/part_stat.h>
46 #include <linux/kthread.h>
47 #include <linux/freezer.h>
48 #include <linux/fsnotify.h>
49 #include <linux/fs_context.h>
50 #include <linux/fs_parser.h>
51 #include <linux/fserror.h>
52
53 #include "ext4.h"
54 #include "ext4_extents.h" /* Needed for trace points definition */
55 #include "ext4_jbd2.h"
56 #include "xattr.h"
57 #include "acl.h"
58 #include "mballoc.h"
59 #include "fsmap.h"
60
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/ext4.h>
63
64 static struct ext4_lazy_init *ext4_li_info;
65 static DEFINE_MUTEX(ext4_li_mtx);
66 static struct ratelimit_state ext4_mount_msg_ratelimit;
67
68 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
69 unsigned long journal_devnum);
70 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
71 static void ext4_update_super(struct super_block *sb);
72 static int ext4_commit_super(struct super_block *sb);
73 static int ext4_mark_recovery_complete(struct super_block *sb,
74 struct ext4_super_block *es);
75 static int ext4_clear_journal_err(struct super_block *sb,
76 struct ext4_super_block *es);
77 static int ext4_sync_fs(struct super_block *sb, int wait);
78 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
79 static int ext4_unfreeze(struct super_block *sb);
80 static int ext4_freeze(struct super_block *sb);
81 static inline int ext2_feature_set_ok(struct super_block *sb);
82 static inline int ext3_feature_set_ok(struct super_block *sb);
83 static void ext4_unregister_li_request(struct super_block *sb);
84 static void ext4_clear_request_list(void);
85 static struct inode *ext4_get_journal_inode(struct super_block *sb,
86 unsigned int journal_inum);
87 static int ext4_validate_options(struct fs_context *fc);
88 static int ext4_check_opt_consistency(struct fs_context *fc,
89 struct super_block *sb);
90 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb);
91 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param);
92 static int ext4_get_tree(struct fs_context *fc);
93 static int ext4_reconfigure(struct fs_context *fc);
94 static void ext4_fc_free(struct fs_context *fc);
95 static int ext4_init_fs_context(struct fs_context *fc);
96 static void ext4_kill_sb(struct super_block *sb);
97 static const struct fs_parameter_spec ext4_param_specs[];
98
99 /*
100 * Lock ordering
101 *
102 * page fault path:
103 * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
104 * -> page lock -> i_data_sem (rw)
105 *
106 * buffered write path:
107 * sb_start_write -> i_mutex -> mmap_lock
108 * sb_start_write -> i_mutex -> transaction start -> page lock ->
109 * i_data_sem (rw)
110 *
111 * truncate:
112 * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
113 * page lock
114 * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
115 * i_data_sem (rw)
116 *
117 * direct IO:
118 * sb_start_write -> i_mutex -> mmap_lock
119 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
120 *
121 * writepages:
122 * transaction start -> page lock(s) -> i_data_sem (rw)
123 */
124
125 static const struct fs_context_operations ext4_context_ops = {
126 .parse_param = ext4_parse_param,
127 .get_tree = ext4_get_tree,
128 .reconfigure = ext4_reconfigure,
129 .free = ext4_fc_free,
130 };
131
132
133 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
134 static struct file_system_type ext2_fs_type = {
135 .owner = THIS_MODULE,
136 .name = "ext2",
137 .init_fs_context = ext4_init_fs_context,
138 .parameters = ext4_param_specs,
139 .kill_sb = ext4_kill_sb,
140 .fs_flags = FS_REQUIRES_DEV,
141 };
142 MODULE_ALIAS_FS("ext2");
143 MODULE_ALIAS("ext2");
144 #define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
145 #else
146 #define IS_EXT2_SB(sb) (0)
147 #endif
148
149
150 static struct file_system_type ext3_fs_type = {
151 .owner = THIS_MODULE,
152 .name = "ext3",
153 .init_fs_context = ext4_init_fs_context,
154 .parameters = ext4_param_specs,
155 .kill_sb = ext4_kill_sb,
156 .fs_flags = FS_REQUIRES_DEV,
157 };
158 MODULE_ALIAS_FS("ext3");
159 MODULE_ALIAS("ext3");
160 #define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
161
162
__ext4_read_bh(struct buffer_head * bh,blk_opf_t op_flags,bh_end_io_t * end_io,bool simu_fail)163 static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
164 bh_end_io_t *end_io, bool simu_fail)
165 {
166 if (simu_fail) {
167 clear_buffer_uptodate(bh);
168 unlock_buffer(bh);
169 return;
170 }
171
172 /*
173 * buffer's verified bit is no longer valid after reading from
174 * disk again due to write out error, clear it to make sure we
175 * recheck the buffer contents.
176 */
177 clear_buffer_verified(bh);
178
179 bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
180 get_bh(bh);
181 submit_bh(REQ_OP_READ | op_flags, bh);
182 }
183
ext4_read_bh_nowait(struct buffer_head * bh,blk_opf_t op_flags,bh_end_io_t * end_io,bool simu_fail)184 void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
185 bh_end_io_t *end_io, bool simu_fail)
186 {
187 BUG_ON(!buffer_locked(bh));
188
189 if (ext4_buffer_uptodate(bh)) {
190 unlock_buffer(bh);
191 return;
192 }
193 __ext4_read_bh(bh, op_flags, end_io, simu_fail);
194 }
195
ext4_read_bh(struct buffer_head * bh,blk_opf_t op_flags,bh_end_io_t * end_io,bool simu_fail)196 int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
197 bh_end_io_t *end_io, bool simu_fail)
198 {
199 BUG_ON(!buffer_locked(bh));
200
201 if (ext4_buffer_uptodate(bh)) {
202 unlock_buffer(bh);
203 return 0;
204 }
205
206 __ext4_read_bh(bh, op_flags, end_io, simu_fail);
207
208 wait_on_buffer(bh);
209 if (buffer_uptodate(bh))
210 return 0;
211 return -EIO;
212 }
213
ext4_read_bh_lock(struct buffer_head * bh,blk_opf_t op_flags,bool wait)214 int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
215 {
216 lock_buffer(bh);
217 if (!wait) {
218 ext4_read_bh_nowait(bh, op_flags, NULL, false);
219 return 0;
220 }
221 return ext4_read_bh(bh, op_flags, NULL, false);
222 }
223
224 /*
225 * This works like __bread_gfp() except it uses ERR_PTR for error
226 * returns. Currently with sb_bread it's impossible to distinguish
227 * between ENOMEM and EIO situations (since both result in a NULL
228 * return.
229 */
__ext4_sb_bread_gfp(struct super_block * sb,sector_t block,blk_opf_t op_flags,gfp_t gfp)230 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
231 sector_t block,
232 blk_opf_t op_flags, gfp_t gfp)
233 {
234 struct buffer_head *bh;
235 int ret;
236
237 bh = sb_getblk_gfp(sb, block, gfp);
238 if (bh == NULL)
239 return ERR_PTR(-ENOMEM);
240 if (ext4_buffer_uptodate(bh))
241 return bh;
242
243 ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
244 if (ret) {
245 put_bh(bh);
246 return ERR_PTR(ret);
247 }
248 return bh;
249 }
250
ext4_sb_bread(struct super_block * sb,sector_t block,blk_opf_t op_flags)251 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
252 blk_opf_t op_flags)
253 {
254 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
255 ~__GFP_FS) | __GFP_MOVABLE;
256
257 return __ext4_sb_bread_gfp(sb, block, op_flags, gfp);
258 }
259
ext4_sb_bread_unmovable(struct super_block * sb,sector_t block)260 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
261 sector_t block)
262 {
263 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
264 ~__GFP_FS);
265
266 return __ext4_sb_bread_gfp(sb, block, 0, gfp);
267 }
268
ext4_sb_bread_nofail(struct super_block * sb,sector_t block)269 struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
270 sector_t block)
271 {
272 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
273 ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL;
274
275 return __ext4_sb_bread_gfp(sb, block, 0, gfp);
276 }
277
ext4_sb_breadahead_unmovable(struct super_block * sb,sector_t block)278 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
279 {
280 struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,
281 sb->s_blocksize, GFP_NOWAIT);
282
283 if (likely(bh)) {
284 if (trylock_buffer(bh))
285 ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL, false);
286 brelse(bh);
287 }
288 }
289
ext4_verify_csum_type(struct super_block * sb,struct ext4_super_block * es)290 static int ext4_verify_csum_type(struct super_block *sb,
291 struct ext4_super_block *es)
292 {
293 if (!ext4_has_feature_metadata_csum(sb))
294 return 1;
295
296 return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
297 }
298
ext4_superblock_csum(struct ext4_super_block * es)299 __le32 ext4_superblock_csum(struct ext4_super_block *es)
300 {
301 int offset = offsetof(struct ext4_super_block, s_checksum);
302 __u32 csum;
303
304 csum = ext4_chksum(~0, (char *)es, offset);
305
306 return cpu_to_le32(csum);
307 }
308
ext4_superblock_csum_verify(struct super_block * sb,struct ext4_super_block * es)309 static int ext4_superblock_csum_verify(struct super_block *sb,
310 struct ext4_super_block *es)
311 {
312 if (!ext4_has_feature_metadata_csum(sb))
313 return 1;
314
315 return es->s_checksum == ext4_superblock_csum(es);
316 }
317
ext4_superblock_csum_set(struct super_block * sb)318 void ext4_superblock_csum_set(struct super_block *sb)
319 {
320 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
321
322 if (!ext4_has_feature_metadata_csum(sb))
323 return;
324
325 es->s_checksum = ext4_superblock_csum(es);
326 }
327
ext4_block_bitmap(struct super_block * sb,struct ext4_group_desc * bg)328 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
329 struct ext4_group_desc *bg)
330 {
331 return le32_to_cpu(bg->bg_block_bitmap_lo) |
332 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
333 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
334 }
335
ext4_inode_bitmap(struct super_block * sb,struct ext4_group_desc * bg)336 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
337 struct ext4_group_desc *bg)
338 {
339 return le32_to_cpu(bg->bg_inode_bitmap_lo) |
340 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
341 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
342 }
343
ext4_inode_table(struct super_block * sb,struct ext4_group_desc * bg)344 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
345 struct ext4_group_desc *bg)
346 {
347 return le32_to_cpu(bg->bg_inode_table_lo) |
348 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
349 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
350 }
351
ext4_free_group_clusters(struct super_block * sb,struct ext4_group_desc * bg)352 __u32 ext4_free_group_clusters(struct super_block *sb,
353 struct ext4_group_desc *bg)
354 {
355 return le16_to_cpu(bg->bg_free_blocks_count_lo) |
356 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
357 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
358 }
359
ext4_free_inodes_count(struct super_block * sb,struct ext4_group_desc * bg)360 __u32 ext4_free_inodes_count(struct super_block *sb,
361 struct ext4_group_desc *bg)
362 {
363 return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) |
364 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
365 (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0);
366 }
367
ext4_used_dirs_count(struct super_block * sb,struct ext4_group_desc * bg)368 __u32 ext4_used_dirs_count(struct super_block *sb,
369 struct ext4_group_desc *bg)
370 {
371 return le16_to_cpu(bg->bg_used_dirs_count_lo) |
372 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
373 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
374 }
375
ext4_itable_unused_count(struct super_block * sb,struct ext4_group_desc * bg)376 __u32 ext4_itable_unused_count(struct super_block *sb,
377 struct ext4_group_desc *bg)
378 {
379 return le16_to_cpu(bg->bg_itable_unused_lo) |
380 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
381 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
382 }
383
ext4_block_bitmap_set(struct super_block * sb,struct ext4_group_desc * bg,ext4_fsblk_t blk)384 void ext4_block_bitmap_set(struct super_block *sb,
385 struct ext4_group_desc *bg, ext4_fsblk_t blk)
386 {
387 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
388 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
389 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
390 }
391
ext4_inode_bitmap_set(struct super_block * sb,struct ext4_group_desc * bg,ext4_fsblk_t blk)392 void ext4_inode_bitmap_set(struct super_block *sb,
393 struct ext4_group_desc *bg, ext4_fsblk_t blk)
394 {
395 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
396 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
397 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
398 }
399
ext4_inode_table_set(struct super_block * sb,struct ext4_group_desc * bg,ext4_fsblk_t blk)400 void ext4_inode_table_set(struct super_block *sb,
401 struct ext4_group_desc *bg, ext4_fsblk_t blk)
402 {
403 bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
404 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
405 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
406 }
407
ext4_free_group_clusters_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)408 void ext4_free_group_clusters_set(struct super_block *sb,
409 struct ext4_group_desc *bg, __u32 count)
410 {
411 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
412 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
413 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
414 }
415
ext4_free_inodes_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)416 void ext4_free_inodes_set(struct super_block *sb,
417 struct ext4_group_desc *bg, __u32 count)
418 {
419 WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count));
420 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
421 WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16));
422 }
423
ext4_used_dirs_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)424 void ext4_used_dirs_set(struct super_block *sb,
425 struct ext4_group_desc *bg, __u32 count)
426 {
427 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
428 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
429 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
430 }
431
ext4_itable_unused_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)432 void ext4_itable_unused_set(struct super_block *sb,
433 struct ext4_group_desc *bg, __u32 count)
434 {
435 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
436 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
437 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
438 }
439
__ext4_update_tstamp(__le32 * lo,__u8 * hi,time64_t now)440 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now)
441 {
442 now = clamp_val(now, 0, (1ull << 40) - 1);
443
444 *lo = cpu_to_le32(lower_32_bits(now));
445 *hi = upper_32_bits(now);
446 }
447
__ext4_get_tstamp(__le32 * lo,__u8 * hi)448 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
449 {
450 return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
451 }
452 #define ext4_update_tstamp(es, tstamp) \
453 __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
454 ktime_get_real_seconds())
455 #define ext4_get_tstamp(es, tstamp) \
456 __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
457
458 /*
459 * The ext4_maybe_update_superblock() function checks and updates the
460 * superblock if needed.
461 *
462 * This function is designed to update the on-disk superblock only under
463 * certain conditions to prevent excessive disk writes and unnecessary
464 * waking of the disk from sleep. The superblock will be updated if:
465 * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last
466 * superblock update
467 * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the
468 * last superblock update.
469 *
470 * @sb: The superblock
471 */
ext4_maybe_update_superblock(struct super_block * sb)472 static void ext4_maybe_update_superblock(struct super_block *sb)
473 {
474 struct ext4_sb_info *sbi = EXT4_SB(sb);
475 struct ext4_super_block *es = sbi->s_es;
476 journal_t *journal = sbi->s_journal;
477 time64_t now;
478 __u64 last_update;
479 __u64 lifetime_write_kbytes;
480 __u64 diff_size;
481
482 if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
483 !(sb->s_flags & SB_ACTIVE) || !journal ||
484 journal->j_flags & JBD2_UNMOUNT)
485 return;
486
487 now = ktime_get_real_seconds();
488 last_update = ext4_get_tstamp(es, s_wtime);
489
490 if (likely(now - last_update < sbi->s_sb_update_sec))
491 return;
492
493 lifetime_write_kbytes = sbi->s_kbytes_written +
494 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
495 sbi->s_sectors_written_start) >> 1);
496
497 /* Get the number of kilobytes not written to disk to account
498 * for statistics and compare with a multiple of 16 MB. This
499 * is used to determine when the next superblock commit should
500 * occur (i.e. not more often than once per 16MB if there was
501 * less written in an hour).
502 */
503 diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written);
504
505 if (diff_size > sbi->s_sb_update_kb)
506 schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
507 }
508
ext4_journal_commit_callback(journal_t * journal,transaction_t * txn)509 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
510 {
511 struct super_block *sb = journal->j_private;
512
513 BUG_ON(txn->t_state == T_FINISHED);
514
515 ext4_process_freed_data(sb, txn->t_tid);
516 ext4_maybe_update_superblock(sb);
517 }
518
ext4_journalled_writepage_needs_redirty(struct jbd2_inode * jinode,struct folio * folio)519 static bool ext4_journalled_writepage_needs_redirty(struct jbd2_inode *jinode,
520 struct folio *folio)
521 {
522 struct buffer_head *bh, *head;
523 struct journal_head *jh;
524
525 bh = head = folio_buffers(folio);
526 do {
527 /*
528 * We have to redirty a page in these cases:
529 * 1) If buffer is dirty, it means the page was dirty because it
530 * contains a buffer that needs checkpointing. So the dirty bit
531 * needs to be preserved so that checkpointing writes the buffer
532 * properly.
533 * 2) If buffer is not part of the committing transaction
534 * (we may have just accidentally come across this buffer because
535 * inode range tracking is not exact) or if the currently running
536 * transaction already contains this buffer as well, dirty bit
537 * needs to be preserved so that the buffer gets writeprotected
538 * properly on running transaction's commit.
539 */
540 jh = bh2jh(bh);
541 if (buffer_dirty(bh) ||
542 (jh && (jh->b_transaction != jinode->i_transaction ||
543 jh->b_next_transaction)))
544 return true;
545 } while ((bh = bh->b_this_page) != head);
546
547 return false;
548 }
549
ext4_journalled_submit_inode_data_buffers(struct jbd2_inode * jinode)550 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
551 {
552 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
553 struct writeback_control wbc = {
554 .sync_mode = WB_SYNC_ALL,
555 .nr_to_write = LONG_MAX,
556 .range_start = jinode->i_dirty_start,
557 .range_end = jinode->i_dirty_end,
558 };
559 struct folio *folio = NULL;
560 int error;
561
562 /*
563 * writeback_iter() already checks for dirty pages and calls
564 * folio_clear_dirty_for_io(), which we want to write protect the
565 * folios.
566 *
567 * However, we may have to redirty a folio sometimes.
568 */
569 while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
570 if (ext4_journalled_writepage_needs_redirty(jinode, folio))
571 folio_redirty_for_writepage(&wbc, folio);
572 folio_unlock(folio);
573 }
574
575 return error;
576 }
577
ext4_journal_submit_inode_data_buffers(struct jbd2_inode * jinode)578 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
579 {
580 int ret;
581
582 if (ext4_should_journal_data(jinode->i_vfs_inode))
583 ret = ext4_journalled_submit_inode_data_buffers(jinode);
584 else
585 ret = ext4_normal_submit_inode_data_buffers(jinode);
586 return ret;
587 }
588
ext4_journal_finish_inode_data_buffers(struct jbd2_inode * jinode)589 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
590 {
591 int ret = 0;
592
593 if (!ext4_should_journal_data(jinode->i_vfs_inode))
594 ret = jbd2_journal_finish_inode_data_buffers(jinode);
595
596 return ret;
597 }
598
system_going_down(void)599 static bool system_going_down(void)
600 {
601 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
602 || system_state == SYSTEM_RESTART;
603 }
604
605 struct ext4_err_translation {
606 int code;
607 int errno;
608 };
609
610 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }
611
612 static struct ext4_err_translation err_translation[] = {
613 EXT4_ERR_TRANSLATE(EIO),
614 EXT4_ERR_TRANSLATE(ENOMEM),
615 EXT4_ERR_TRANSLATE(EFSBADCRC),
616 EXT4_ERR_TRANSLATE(EFSCORRUPTED),
617 EXT4_ERR_TRANSLATE(ENOSPC),
618 EXT4_ERR_TRANSLATE(ENOKEY),
619 EXT4_ERR_TRANSLATE(EROFS),
620 EXT4_ERR_TRANSLATE(EFBIG),
621 EXT4_ERR_TRANSLATE(EEXIST),
622 EXT4_ERR_TRANSLATE(ERANGE),
623 EXT4_ERR_TRANSLATE(EOVERFLOW),
624 EXT4_ERR_TRANSLATE(EBUSY),
625 EXT4_ERR_TRANSLATE(ENOTDIR),
626 EXT4_ERR_TRANSLATE(ENOTEMPTY),
627 EXT4_ERR_TRANSLATE(ESHUTDOWN),
628 EXT4_ERR_TRANSLATE(EFAULT),
629 };
630
ext4_errno_to_code(int errno)631 static int ext4_errno_to_code(int errno)
632 {
633 int i;
634
635 for (i = 0; i < ARRAY_SIZE(err_translation); i++)
636 if (err_translation[i].errno == errno)
637 return err_translation[i].code;
638 return EXT4_ERR_UNKNOWN;
639 }
640
save_error_info(struct super_block * sb,int error,__u32 ino,__u64 block,const char * func,unsigned int line)641 static void save_error_info(struct super_block *sb, int error,
642 __u32 ino, __u64 block,
643 const char *func, unsigned int line)
644 {
645 struct ext4_sb_info *sbi = EXT4_SB(sb);
646
647 /* We default to EFSCORRUPTED error... */
648 if (error == 0)
649 error = EFSCORRUPTED;
650
651 spin_lock(&sbi->s_error_lock);
652 sbi->s_add_error_count++;
653 sbi->s_last_error_code = error;
654 sbi->s_last_error_line = line;
655 sbi->s_last_error_ino = ino;
656 sbi->s_last_error_block = block;
657 sbi->s_last_error_func = func;
658 sbi->s_last_error_time = ktime_get_real_seconds();
659 if (!sbi->s_first_error_time) {
660 sbi->s_first_error_code = error;
661 sbi->s_first_error_line = line;
662 sbi->s_first_error_ino = ino;
663 sbi->s_first_error_block = block;
664 sbi->s_first_error_func = func;
665 sbi->s_first_error_time = sbi->s_last_error_time;
666 }
667 spin_unlock(&sbi->s_error_lock);
668 }
669
670 /* Deal with the reporting of failure conditions on a filesystem such as
671 * inconsistencies detected or read IO failures.
672 *
673 * On ext2, we can store the error state of the filesystem in the
674 * superblock. That is not possible on ext4, because we may have other
675 * write ordering constraints on the superblock which prevent us from
676 * writing it out straight away; and given that the journal is about to
677 * be aborted, we can't rely on the current, or future, transactions to
678 * write out the superblock safely.
679 *
680 * We'll just use the jbd2_journal_abort() error code to record an error in
681 * the journal instead. On recovery, the journal will complain about
682 * that error until we've noted it down and cleared it.
683 *
684 * If force_ro is set, we unconditionally force the filesystem into an
685 * ABORT|READONLY state, unless the error response on the fs has been set to
686 * panic in which case we take the easy way out and panic immediately. This is
687 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
688 * at a critical moment in log management.
689 */
ext4_handle_error(struct super_block * sb,bool force_ro,int error,__u32 ino,__u64 block,const char * func,unsigned int line)690 static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
691 __u32 ino, __u64 block,
692 const char *func, unsigned int line)
693 {
694 journal_t *journal = EXT4_SB(sb)->s_journal;
695 bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
696
697 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
698 if (test_opt(sb, WARN_ON_ERROR))
699 WARN_ON_ONCE(1);
700
701 if (!continue_fs && !ext4_emergency_ro(sb) && journal)
702 jbd2_journal_abort(journal, -error);
703
704 if (!bdev_read_only(sb->s_bdev)) {
705 save_error_info(sb, error, ino, block, func, line);
706 /*
707 * In case the fs should keep running, we need to writeout
708 * superblock through the journal. Due to lock ordering
709 * constraints, it may not be safe to do it right here so we
710 * defer superblock flushing to a workqueue. We just need to be
711 * careful when the journal is already shutting down. If we get
712 * here in that case, just update the sb directly as the last
713 * transaction won't commit anyway.
714 */
715 if (continue_fs && journal &&
716 !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
717 schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
718 else
719 ext4_commit_super(sb);
720 }
721
722 /*
723 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
724 * could panic during 'reboot -f' as the underlying device got already
725 * disabled.
726 */
727 if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
728 panic("EXT4-fs (device %s): panic forced after error\n",
729 sb->s_id);
730 }
731
732 if (ext4_emergency_ro(sb) || continue_fs)
733 return;
734
735 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
736 /*
737 * We don't set SB_RDONLY because that requires sb->s_umount
738 * semaphore and setting it without proper remount procedure is
739 * confusing code such as freeze_super() leading to deadlocks
740 * and other problems.
741 */
742 set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
743 }
744
update_super_work(struct work_struct * work)745 static void update_super_work(struct work_struct *work)
746 {
747 struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
748 s_sb_upd_work);
749 journal_t *journal = sbi->s_journal;
750 handle_t *handle;
751
752 /*
753 * If the journal is still running, we have to write out superblock
754 * through the journal to avoid collisions of other journalled sb
755 * updates.
756 *
757 * We use directly jbd2 functions here to avoid recursing back into
758 * ext4 error handling code during handling of previous errors.
759 */
760 if (!ext4_emergency_state(sbi->s_sb) &&
761 !sb_rdonly(sbi->s_sb) && journal) {
762 struct buffer_head *sbh = sbi->s_sbh;
763 bool call_notify_err = false;
764
765 handle = jbd2_journal_start(journal, 1);
766 if (IS_ERR(handle))
767 goto write_directly;
768 if (jbd2_journal_get_write_access(handle, sbh)) {
769 jbd2_journal_stop(handle);
770 goto write_directly;
771 }
772
773 if (sbi->s_add_error_count > 0)
774 call_notify_err = true;
775
776 ext4_update_super(sbi->s_sb);
777 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
778 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
779 "superblock detected");
780 clear_buffer_write_io_error(sbh);
781 set_buffer_uptodate(sbh);
782 }
783
784 if (jbd2_journal_dirty_metadata(handle, sbh)) {
785 jbd2_journal_stop(handle);
786 goto write_directly;
787 }
788 jbd2_journal_stop(handle);
789
790 if (call_notify_err)
791 ext4_notify_error_sysfs(sbi);
792
793 return;
794 }
795 write_directly:
796 /*
797 * Write through journal failed. Write sb directly to get error info
798 * out and hope for the best.
799 */
800 ext4_commit_super(sbi->s_sb);
801 ext4_notify_error_sysfs(sbi);
802 }
803
804 #define ext4_error_ratelimit(sb) \
805 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
806 "EXT4-fs error")
807
__ext4_error(struct super_block * sb,const char * function,unsigned int line,bool force_ro,int error,__u64 block,const char * fmt,...)808 void __ext4_error(struct super_block *sb, const char *function,
809 unsigned int line, bool force_ro, int error, __u64 block,
810 const char *fmt, ...)
811 {
812 struct va_format vaf;
813 va_list args;
814
815 if (unlikely(ext4_emergency_state(sb)))
816 return;
817
818 trace_ext4_error(sb, function, line);
819 if (ext4_error_ratelimit(sb)) {
820 va_start(args, fmt);
821 vaf.fmt = fmt;
822 vaf.va = &args;
823 printk(KERN_CRIT
824 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
825 sb->s_id, function, line, current->comm, &vaf);
826 va_end(args);
827 }
828 fserror_report_metadata(sb, error ? -abs(error) : -EFSCORRUPTED,
829 GFP_ATOMIC);
830
831 ext4_handle_error(sb, force_ro, error, 0, block, function, line);
832 }
833
__ext4_error_inode(struct inode * inode,const char * function,unsigned int line,ext4_fsblk_t block,int error,const char * fmt,...)834 void __ext4_error_inode(struct inode *inode, const char *function,
835 unsigned int line, ext4_fsblk_t block, int error,
836 const char *fmt, ...)
837 {
838 va_list args;
839 struct va_format vaf;
840
841 if (unlikely(ext4_emergency_state(inode->i_sb)))
842 return;
843
844 trace_ext4_error(inode->i_sb, function, line);
845 if (ext4_error_ratelimit(inode->i_sb)) {
846 va_start(args, fmt);
847 vaf.fmt = fmt;
848 vaf.va = &args;
849 if (block)
850 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
851 "inode #%lu: block %llu: comm %s: %pV\n",
852 inode->i_sb->s_id, function, line, inode->i_ino,
853 block, current->comm, &vaf);
854 else
855 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
856 "inode #%lu: comm %s: %pV\n",
857 inode->i_sb->s_id, function, line, inode->i_ino,
858 current->comm, &vaf);
859 va_end(args);
860 }
861 fserror_report_file_metadata(inode,
862 error ? -abs(error) : -EFSCORRUPTED,
863 GFP_ATOMIC);
864
865 ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
866 function, line);
867 }
868
__ext4_error_file(struct file * file,const char * function,unsigned int line,ext4_fsblk_t block,const char * fmt,...)869 void __ext4_error_file(struct file *file, const char *function,
870 unsigned int line, ext4_fsblk_t block,
871 const char *fmt, ...)
872 {
873 va_list args;
874 struct va_format vaf;
875 struct inode *inode = file_inode(file);
876 char pathname[80], *path;
877
878 if (unlikely(ext4_emergency_state(inode->i_sb)))
879 return;
880
881 trace_ext4_error(inode->i_sb, function, line);
882 if (ext4_error_ratelimit(inode->i_sb)) {
883 path = file_path(file, pathname, sizeof(pathname));
884 if (IS_ERR(path))
885 path = "(unknown)";
886 va_start(args, fmt);
887 vaf.fmt = fmt;
888 vaf.va = &args;
889 if (block)
890 printk(KERN_CRIT
891 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
892 "block %llu: comm %s: path %s: %pV\n",
893 inode->i_sb->s_id, function, line, inode->i_ino,
894 block, current->comm, path, &vaf);
895 else
896 printk(KERN_CRIT
897 "EXT4-fs error (device %s): %s:%d: inode #%lu: "
898 "comm %s: path %s: %pV\n",
899 inode->i_sb->s_id, function, line, inode->i_ino,
900 current->comm, path, &vaf);
901 va_end(args);
902 }
903 fserror_report_file_metadata(inode, -EFSCORRUPTED, GFP_ATOMIC);
904
905 ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
906 function, line);
907 }
908
ext4_decode_error(struct super_block * sb,int errno,char nbuf[16])909 const char *ext4_decode_error(struct super_block *sb, int errno,
910 char nbuf[16])
911 {
912 char *errstr = NULL;
913
914 switch (errno) {
915 case -EFSCORRUPTED:
916 errstr = "Corrupt filesystem";
917 break;
918 case -EFSBADCRC:
919 errstr = "Filesystem failed CRC";
920 break;
921 case -EIO:
922 errstr = "IO failure";
923 break;
924 case -ENOMEM:
925 errstr = "Out of memory";
926 break;
927 case -EROFS:
928 if (!sb || (EXT4_SB(sb)->s_journal &&
929 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
930 errstr = "Journal has aborted";
931 else
932 errstr = "Readonly filesystem";
933 break;
934 default:
935 /* If the caller passed in an extra buffer for unknown
936 * errors, textualise them now. Else we just return
937 * NULL. */
938 if (nbuf) {
939 /* Check for truncated error codes... */
940 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
941 errstr = nbuf;
942 }
943 break;
944 }
945
946 return errstr;
947 }
948
949 /* __ext4_std_error decodes expected errors from journaling functions
950 * automatically and invokes the appropriate error response. */
951
__ext4_std_error(struct super_block * sb,const char * function,unsigned int line,int errno)952 void __ext4_std_error(struct super_block *sb, const char *function,
953 unsigned int line, int errno)
954 {
955 char nbuf[16];
956 const char *errstr;
957
958 if (unlikely(ext4_emergency_state(sb)))
959 return;
960
961 /* Special case: if the error is EROFS, and we're not already
962 * inside a transaction, then there's really no point in logging
963 * an error. */
964 if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
965 return;
966
967 if (ext4_error_ratelimit(sb)) {
968 errstr = ext4_decode_error(sb, errno, nbuf);
969 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
970 sb->s_id, function, line, errstr);
971 }
972 fserror_report_metadata(sb, errno ? -abs(errno) : -EFSCORRUPTED,
973 GFP_ATOMIC);
974
975 ext4_handle_error(sb, false, -errno, 0, 0, function, line);
976 }
977
__ext4_msg(struct super_block * sb,const char * prefix,const char * fmt,...)978 void __ext4_msg(struct super_block *sb,
979 const char *prefix, const char *fmt, ...)
980 {
981 struct va_format vaf;
982 va_list args;
983
984 if (sb) {
985 atomic_inc(&EXT4_SB(sb)->s_msg_count);
986 if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state),
987 "EXT4-fs"))
988 return;
989 }
990
991 va_start(args, fmt);
992 vaf.fmt = fmt;
993 vaf.va = &args;
994 if (sb)
995 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
996 else
997 printk("%sEXT4-fs: %pV\n", prefix, &vaf);
998 va_end(args);
999 }
1000
ext4_warning_ratelimit(struct super_block * sb)1001 static int ext4_warning_ratelimit(struct super_block *sb)
1002 {
1003 atomic_inc(&EXT4_SB(sb)->s_warning_count);
1004 return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
1005 "EXT4-fs warning");
1006 }
1007
__ext4_warning(struct super_block * sb,const char * function,unsigned int line,const char * fmt,...)1008 void __ext4_warning(struct super_block *sb, const char *function,
1009 unsigned int line, const char *fmt, ...)
1010 {
1011 struct va_format vaf;
1012 va_list args;
1013
1014 if (!ext4_warning_ratelimit(sb))
1015 return;
1016
1017 va_start(args, fmt);
1018 vaf.fmt = fmt;
1019 vaf.va = &args;
1020 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
1021 sb->s_id, function, line, &vaf);
1022 va_end(args);
1023 }
1024
__ext4_warning_inode(const struct inode * inode,const char * function,unsigned int line,const char * fmt,...)1025 void __ext4_warning_inode(const struct inode *inode, const char *function,
1026 unsigned int line, const char *fmt, ...)
1027 {
1028 struct va_format vaf;
1029 va_list args;
1030
1031 if (!ext4_warning_ratelimit(inode->i_sb))
1032 return;
1033
1034 va_start(args, fmt);
1035 vaf.fmt = fmt;
1036 vaf.va = &args;
1037 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
1038 "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
1039 function, line, inode->i_ino, current->comm, &vaf);
1040 va_end(args);
1041 }
1042
__ext4_grp_locked_error(const char * function,unsigned int line,struct super_block * sb,ext4_group_t grp,unsigned long ino,ext4_fsblk_t block,const char * fmt,...)1043 void __ext4_grp_locked_error(const char *function, unsigned int line,
1044 struct super_block *sb, ext4_group_t grp,
1045 unsigned long ino, ext4_fsblk_t block,
1046 const char *fmt, ...)
1047 __releases(bitlock)
1048 __acquires(bitlock)
1049 {
1050 struct va_format vaf;
1051 va_list args;
1052
1053 if (unlikely(ext4_emergency_state(sb)))
1054 return;
1055
1056 trace_ext4_error(sb, function, line);
1057 if (ext4_error_ratelimit(sb)) {
1058 va_start(args, fmt);
1059 vaf.fmt = fmt;
1060 vaf.va = &args;
1061 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
1062 sb->s_id, function, line, grp);
1063 if (ino)
1064 printk(KERN_CONT "inode %lu: ", ino);
1065 if (block)
1066 printk(KERN_CONT "block %llu:",
1067 (unsigned long long) block);
1068 printk(KERN_CONT "%pV\n", &vaf);
1069 va_end(args);
1070 }
1071
1072 if (test_opt(sb, ERRORS_CONT)) {
1073 if (test_opt(sb, WARN_ON_ERROR))
1074 WARN_ON_ONCE(1);
1075 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
1076 if (!bdev_read_only(sb->s_bdev)) {
1077 save_error_info(sb, EFSCORRUPTED, ino, block, function,
1078 line);
1079 schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
1080 }
1081 return;
1082 }
1083 ext4_unlock_group(sb, grp);
1084 ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
1085 /*
1086 * We only get here in the ERRORS_RO case; relocking the group
1087 * may be dangerous, but nothing bad will happen since the
1088 * filesystem will have already been marked read/only and the
1089 * journal has been aborted. We return 1 as a hint to callers
1090 * who might what to use the return value from
1091 * ext4_grp_locked_error() to distinguish between the
1092 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
1093 * aggressively from the ext4 function in question, with a
1094 * more appropriate error code.
1095 */
1096 ext4_lock_group(sb, grp);
1097 return;
1098 }
1099
ext4_mark_group_bitmap_corrupted(struct super_block * sb,ext4_group_t group,unsigned int flags)1100 void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
1101 ext4_group_t group,
1102 unsigned int flags)
1103 {
1104 struct ext4_sb_info *sbi = EXT4_SB(sb);
1105 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1106 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1107 int ret;
1108
1109 if (!grp || !gdp)
1110 return;
1111 if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
1112 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1113 &grp->bb_state);
1114 if (!ret)
1115 percpu_counter_sub(&sbi->s_freeclusters_counter,
1116 grp->bb_free);
1117 }
1118
1119 if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
1120 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
1121 &grp->bb_state);
1122 if (!ret && gdp) {
1123 int count;
1124
1125 count = ext4_free_inodes_count(sb, gdp);
1126 percpu_counter_sub(&sbi->s_freeinodes_counter,
1127 count);
1128 }
1129 }
1130 }
1131
ext4_update_dynamic_rev(struct super_block * sb)1132 void ext4_update_dynamic_rev(struct super_block *sb)
1133 {
1134 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1135
1136 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1137 return;
1138
1139 ext4_warning(sb,
1140 "updating to rev %d because of new feature flag, "
1141 "running e2fsck is recommended",
1142 EXT4_DYNAMIC_REV);
1143
1144 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
1145 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
1146 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1147 /* leave es->s_feature_*compat flags alone */
1148 /* es->s_uuid will be set by e2fsck if empty */
1149
1150 /*
1151 * The rest of the superblock fields should be zero, and if not it
1152 * means they are likely already in use, so leave them alone. We
1153 * can leave it up to e2fsck to clean up any inconsistencies there.
1154 */
1155 }
1156
orphan_list_entry(struct list_head * l)1157 static inline struct inode *orphan_list_entry(struct list_head *l)
1158 {
1159 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1160 }
1161
dump_orphan_list(struct super_block * sb,struct ext4_sb_info * sbi)1162 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1163 {
1164 struct list_head *l;
1165
1166 ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
1167 le32_to_cpu(sbi->s_es->s_last_orphan));
1168
1169 printk(KERN_ERR "sb_info orphan list:\n");
1170 list_for_each(l, &sbi->s_orphan) {
1171 struct inode *inode = orphan_list_entry(l);
1172 printk(KERN_ERR " "
1173 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
1174 inode->i_sb->s_id, inode->i_ino, inode,
1175 inode->i_mode, inode->i_nlink,
1176 NEXT_ORPHAN(inode));
1177 }
1178 }
1179
1180 #ifdef CONFIG_QUOTA
1181 static int ext4_quota_off(struct super_block *sb, int type);
1182
ext4_quotas_off(struct super_block * sb,int type)1183 static inline void ext4_quotas_off(struct super_block *sb, int type)
1184 {
1185 BUG_ON(type > EXT4_MAXQUOTAS);
1186
1187 /* Use our quota_off function to clear inode flags etc. */
1188 for (type--; type >= 0; type--)
1189 ext4_quota_off(sb, type);
1190 }
1191
1192 /*
1193 * This is a helper function which is used in the mount/remount
1194 * codepaths (which holds s_umount) to fetch the quota file name.
1195 */
get_qf_name(struct super_block * sb,struct ext4_sb_info * sbi,int type)1196 static inline char *get_qf_name(struct super_block *sb,
1197 struct ext4_sb_info *sbi,
1198 int type)
1199 {
1200 return rcu_dereference_protected(sbi->s_qf_names[type],
1201 lockdep_is_held(&sb->s_umount));
1202 }
1203 #else
ext4_quotas_off(struct super_block * sb,int type)1204 static inline void ext4_quotas_off(struct super_block *sb, int type)
1205 {
1206 }
1207 #endif
1208
ext4_percpu_param_init(struct ext4_sb_info * sbi)1209 static int ext4_percpu_param_init(struct ext4_sb_info *sbi)
1210 {
1211 ext4_fsblk_t block;
1212 int err;
1213
1214 block = ext4_count_free_clusters(sbi->s_sb);
1215 ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block));
1216 err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
1217 GFP_KERNEL);
1218 if (!err) {
1219 unsigned long freei = ext4_count_free_inodes(sbi->s_sb);
1220 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
1221 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
1222 GFP_KERNEL);
1223 }
1224 if (!err)
1225 err = percpu_counter_init(&sbi->s_dirs_counter,
1226 ext4_count_dirs(sbi->s_sb), GFP_KERNEL);
1227 if (!err)
1228 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
1229 GFP_KERNEL);
1230 if (!err)
1231 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
1232 GFP_KERNEL);
1233 if (!err)
1234 err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
1235
1236 if (err)
1237 ext4_msg(sbi->s_sb, KERN_ERR, "insufficient memory");
1238
1239 return err;
1240 }
1241
ext4_percpu_param_destroy(struct ext4_sb_info * sbi)1242 static void ext4_percpu_param_destroy(struct ext4_sb_info *sbi)
1243 {
1244 percpu_counter_destroy(&sbi->s_freeclusters_counter);
1245 percpu_counter_destroy(&sbi->s_freeinodes_counter);
1246 percpu_counter_destroy(&sbi->s_dirs_counter);
1247 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1248 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
1249 percpu_free_rwsem(&sbi->s_writepages_rwsem);
1250 }
1251
ext4_group_desc_free(struct ext4_sb_info * sbi)1252 static void ext4_group_desc_free(struct ext4_sb_info *sbi)
1253 {
1254 struct buffer_head **group_desc;
1255 int i;
1256
1257 rcu_read_lock();
1258 group_desc = rcu_dereference(sbi->s_group_desc);
1259 for (i = 0; i < sbi->s_gdb_count; i++)
1260 brelse(group_desc[i]);
1261 kvfree(group_desc);
1262 rcu_read_unlock();
1263 }
1264
ext4_flex_groups_free(struct ext4_sb_info * sbi)1265 static void ext4_flex_groups_free(struct ext4_sb_info *sbi)
1266 {
1267 struct flex_groups **flex_groups;
1268 int i;
1269
1270 rcu_read_lock();
1271 flex_groups = rcu_dereference(sbi->s_flex_groups);
1272 if (flex_groups) {
1273 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
1274 kvfree(flex_groups[i]);
1275 kvfree(flex_groups);
1276 }
1277 rcu_read_unlock();
1278 }
1279
ext4_put_super(struct super_block * sb)1280 static void ext4_put_super(struct super_block *sb)
1281 {
1282 struct ext4_sb_info *sbi = EXT4_SB(sb);
1283 struct ext4_super_block *es = sbi->s_es;
1284 int aborted = 0;
1285 int err;
1286
1287 /*
1288 * Unregister sysfs before destroying jbd2 journal.
1289 * Since we could still access attr_journal_task attribute via sysfs
1290 * path which could have sbi->s_journal->j_task as NULL
1291 * Unregister sysfs before flush sbi->s_sb_upd_work.
1292 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
1293 * read metadata verify failed then will queue error work.
1294 * update_super_work will call start_this_handle may trigger
1295 * BUG_ON.
1296 */
1297 ext4_unregister_sysfs(sb);
1298
1299 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount"))
1300 ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.",
1301 &sb->s_uuid);
1302
1303 ext4_unregister_li_request(sb);
1304 ext4_quotas_off(sb, EXT4_MAXQUOTAS);
1305
1306 destroy_workqueue(sbi->rsv_conversion_wq);
1307 ext4_release_orphan_info(sb);
1308
1309 if (sbi->s_journal) {
1310 aborted = is_journal_aborted(sbi->s_journal);
1311 err = ext4_journal_destroy(sbi, sbi->s_journal);
1312 if ((err < 0) && !aborted) {
1313 ext4_abort(sb, -err, "Couldn't clean up the journal");
1314 }
1315 } else
1316 flush_work(&sbi->s_sb_upd_work);
1317
1318 ext4_es_unregister_shrinker(sbi);
1319 timer_shutdown_sync(&sbi->s_err_report);
1320 ext4_release_system_zone(sb);
1321 ext4_mb_release(sb);
1322 ext4_ext_release(sb);
1323
1324 if (!ext4_emergency_state(sb) && !sb_rdonly(sb)) {
1325 if (!aborted) {
1326 ext4_clear_feature_journal_needs_recovery(sb);
1327 ext4_clear_feature_orphan_present(sb);
1328 es->s_state = cpu_to_le16(sbi->s_mount_state);
1329 }
1330 ext4_commit_super(sb);
1331 }
1332
1333 ext4_group_desc_free(sbi);
1334 ext4_flex_groups_free(sbi);
1335
1336 WARN_ON_ONCE(!(sbi->s_mount_state & EXT4_ERROR_FS) &&
1337 percpu_counter_sum(&sbi->s_dirtyclusters_counter));
1338 ext4_percpu_param_destroy(sbi);
1339 #ifdef CONFIG_QUOTA
1340 for (int i = 0; i < EXT4_MAXQUOTAS; i++)
1341 kfree(get_qf_name(sb, sbi, i));
1342 #endif
1343
1344 /* Debugging code just in case the in-memory inode orphan list
1345 * isn't empty. The on-disk one can be non-empty if we've
1346 * detected an error and taken the fs readonly, but the
1347 * in-memory list had better be clean by this point. */
1348 if (!list_empty(&sbi->s_orphan))
1349 dump_orphan_list(sb, sbi);
1350 ASSERT(list_empty(&sbi->s_orphan));
1351
1352 sync_blockdev(sb->s_bdev);
1353 invalidate_bdev(sb->s_bdev);
1354 if (sbi->s_journal_bdev_file) {
1355 /*
1356 * Invalidate the journal device's buffers. We don't want them
1357 * floating about in memory - the physical journal device may
1358 * hotswapped, and it breaks the `ro-after' testing code.
1359 */
1360 sync_blockdev(file_bdev(sbi->s_journal_bdev_file));
1361 invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
1362 }
1363
1364 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
1365 sbi->s_ea_inode_cache = NULL;
1366
1367 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
1368 sbi->s_ea_block_cache = NULL;
1369
1370 ext4_stop_mmpd(sbi);
1371
1372 brelse(sbi->s_sbh);
1373 sb->s_fs_info = NULL;
1374 /*
1375 * Now that we are completely done shutting down the
1376 * superblock, we need to actually destroy the kobject.
1377 */
1378 kobject_put(&sbi->s_kobj);
1379 wait_for_completion(&sbi->s_kobj_unregister);
1380 kfree(sbi->s_blockgroup_lock);
1381 fs_put_dax(sbi->s_daxdev, NULL);
1382 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1383 #if IS_ENABLED(CONFIG_UNICODE)
1384 utf8_unload(sb->s_encoding);
1385 #endif
1386 kfree(sbi);
1387 }
1388
1389 static struct kmem_cache *ext4_inode_cachep;
1390
1391 /*
1392 * Called inside transaction, so use GFP_NOFS
1393 */
ext4_alloc_inode(struct super_block * sb)1394 static struct inode *ext4_alloc_inode(struct super_block *sb)
1395 {
1396 struct ext4_inode_info *ei;
1397
1398 ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS);
1399 if (!ei)
1400 return NULL;
1401
1402 inode_set_iversion(&ei->vfs_inode, 1);
1403 ei->i_flags = 0;
1404 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1405 spin_lock_init(&ei->i_raw_lock);
1406 ei->i_prealloc_node = RB_ROOT;
1407 atomic_set(&ei->i_prealloc_active, 0);
1408 rwlock_init(&ei->i_prealloc_lock);
1409 ext4_es_init_tree(&ei->i_es_tree);
1410 rwlock_init(&ei->i_es_lock);
1411 INIT_LIST_HEAD(&ei->i_es_list);
1412 ei->i_es_all_nr = 0;
1413 ei->i_es_shk_nr = 0;
1414 ei->i_es_shrink_lblk = 0;
1415 ei->i_es_seq = 0;
1416 ei->i_reserved_data_blocks = 0;
1417 spin_lock_init(&(ei->i_block_reservation_lock));
1418 ext4_init_pending_tree(&ei->i_pending_tree);
1419 #ifdef CONFIG_QUOTA
1420 ei->i_reserved_quota = 0;
1421 memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1422 #endif
1423 ei->jinode = NULL;
1424 INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1425 spin_lock_init(&ei->i_completed_io_lock);
1426 ei->i_sync_tid = 0;
1427 ei->i_datasync_tid = 0;
1428 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1429 ext4_fc_init_inode(&ei->vfs_inode);
1430 spin_lock_init(&ei->i_fc_lock);
1431 return &ei->vfs_inode;
1432 }
1433
ext4_drop_inode(struct inode * inode)1434 static int ext4_drop_inode(struct inode *inode)
1435 {
1436 int drop = inode_generic_drop(inode);
1437
1438 if (!drop)
1439 drop = fscrypt_drop_inode(inode);
1440
1441 trace_ext4_drop_inode(inode, drop);
1442 return drop;
1443 }
1444
ext4_free_in_core_inode(struct inode * inode)1445 static void ext4_free_in_core_inode(struct inode *inode)
1446 {
1447 fscrypt_free_inode(inode);
1448 if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
1449 pr_warn("%s: inode %ld still in fc list",
1450 __func__, inode->i_ino);
1451 }
1452 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1453 }
1454
ext4_destroy_inode(struct inode * inode)1455 static void ext4_destroy_inode(struct inode *inode)
1456 {
1457 if (ext4_inode_orphan_tracked(inode)) {
1458 ext4_msg(inode->i_sb, KERN_ERR,
1459 "Inode %lu (%p): inode tracked as orphan!",
1460 inode->i_ino, EXT4_I(inode));
1461 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1462 EXT4_I(inode), sizeof(struct ext4_inode_info),
1463 true);
1464 dump_stack();
1465 }
1466
1467 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ERROR_FS) &&
1468 WARN_ON_ONCE(EXT4_I(inode)->i_reserved_data_blocks))
1469 ext4_msg(inode->i_sb, KERN_ERR,
1470 "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
1471 inode->i_ino, EXT4_I(inode),
1472 EXT4_I(inode)->i_reserved_data_blocks);
1473 }
1474
ext4_shutdown(struct super_block * sb)1475 static void ext4_shutdown(struct super_block *sb)
1476 {
1477 ext4_force_shutdown(sb, EXT4_GOING_FLAGS_NOLOGFLUSH);
1478 }
1479
init_once(void * foo)1480 static void init_once(void *foo)
1481 {
1482 struct ext4_inode_info *ei = foo;
1483
1484 INIT_LIST_HEAD(&ei->i_orphan);
1485 init_rwsem(&ei->xattr_sem);
1486 init_rwsem(&ei->i_data_sem);
1487 inode_init_once(&ei->vfs_inode);
1488 ext4_fc_init_inode(&ei->vfs_inode);
1489 #ifdef CONFIG_FS_ENCRYPTION
1490 ei->i_crypt_info = NULL;
1491 #endif
1492 }
1493
init_inodecache(void)1494 static int __init init_inodecache(void)
1495 {
1496 struct kmem_cache_args args = {
1497 .useroffset = offsetof(struct ext4_inode_info, i_data),
1498 .usersize = sizeof_field(struct ext4_inode_info, i_data),
1499 .use_freeptr_offset = true,
1500 .freeptr_offset = offsetof(struct ext4_inode_info, i_flags),
1501 .ctor = init_once,
1502 };
1503
1504 ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
1505 sizeof(struct ext4_inode_info),
1506 &args,
1507 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT);
1508
1509 if (ext4_inode_cachep == NULL)
1510 return -ENOMEM;
1511 return 0;
1512 }
1513
destroy_inodecache(void)1514 static void destroy_inodecache(void)
1515 {
1516 /*
1517 * Make sure all delayed rcu free inodes are flushed before we
1518 * destroy cache.
1519 */
1520 rcu_barrier();
1521 kmem_cache_destroy(ext4_inode_cachep);
1522 }
1523
ext4_clear_inode(struct inode * inode)1524 void ext4_clear_inode(struct inode *inode)
1525 {
1526 ext4_fc_del(inode);
1527 invalidate_inode_buffers(inode);
1528 clear_inode(inode);
1529 ext4_discard_preallocations(inode);
1530 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1531 dquot_drop(inode);
1532 if (EXT4_I(inode)->jinode) {
1533 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1534 EXT4_I(inode)->jinode);
1535 jbd2_free_inode(EXT4_I(inode)->jinode);
1536 EXT4_I(inode)->jinode = NULL;
1537 }
1538 fscrypt_put_encryption_info(inode);
1539 }
1540
ext4_nfs_get_inode(struct super_block * sb,u64 ino,u32 generation)1541 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1542 u64 ino, u32 generation)
1543 {
1544 struct inode *inode;
1545
1546 /*
1547 * Currently we don't know the generation for parent directory, so
1548 * a generation of 0 means "accept any"
1549 */
1550 inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1551 if (IS_ERR(inode))
1552 return ERR_CAST(inode);
1553 if (generation && inode->i_generation != generation) {
1554 iput(inode);
1555 return ERR_PTR(-ESTALE);
1556 }
1557
1558 return inode;
1559 }
1560
ext4_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)1561 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1562 int fh_len, int fh_type)
1563 {
1564 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1565 ext4_nfs_get_inode);
1566 }
1567
ext4_fh_to_parent(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)1568 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1569 int fh_len, int fh_type)
1570 {
1571 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1572 ext4_nfs_get_inode);
1573 }
1574
ext4_nfs_commit_metadata(struct inode * inode)1575 static int ext4_nfs_commit_metadata(struct inode *inode)
1576 {
1577 struct writeback_control wbc = {
1578 .sync_mode = WB_SYNC_ALL
1579 };
1580
1581 trace_ext4_nfs_commit_metadata(inode);
1582 return ext4_write_inode(inode, &wbc);
1583 }
1584
1585 #ifdef CONFIG_QUOTA
1586 static const char * const quotatypes[] = INITQFNAMES;
1587 #define QTYPE2NAME(t) (quotatypes[t])
1588
1589 static int ext4_write_dquot(struct dquot *dquot);
1590 static int ext4_acquire_dquot(struct dquot *dquot);
1591 static int ext4_release_dquot(struct dquot *dquot);
1592 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1593 static int ext4_write_info(struct super_block *sb, int type);
1594 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1595 const struct path *path);
1596 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1597 size_t len, loff_t off);
1598 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1599 const char *data, size_t len, loff_t off);
1600 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1601 unsigned int flags);
1602
ext4_get_dquots(struct inode * inode)1603 static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
1604 {
1605 return EXT4_I(inode)->i_dquot;
1606 }
1607
1608 static const struct dquot_operations ext4_quota_operations = {
1609 .get_reserved_space = ext4_get_reserved_space,
1610 .write_dquot = ext4_write_dquot,
1611 .acquire_dquot = ext4_acquire_dquot,
1612 .release_dquot = ext4_release_dquot,
1613 .mark_dirty = ext4_mark_dquot_dirty,
1614 .write_info = ext4_write_info,
1615 .alloc_dquot = dquot_alloc,
1616 .destroy_dquot = dquot_destroy,
1617 .get_projid = ext4_get_projid,
1618 .get_inode_usage = ext4_get_inode_usage,
1619 .get_next_id = dquot_get_next_id,
1620 };
1621
1622 static const struct quotactl_ops ext4_qctl_operations = {
1623 .quota_on = ext4_quota_on,
1624 .quota_off = ext4_quota_off,
1625 .quota_sync = dquot_quota_sync,
1626 .get_state = dquot_get_state,
1627 .set_info = dquot_set_dqinfo,
1628 .get_dqblk = dquot_get_dqblk,
1629 .set_dqblk = dquot_set_dqblk,
1630 .get_nextdqblk = dquot_get_next_dqblk,
1631 };
1632 #endif
1633
1634 static const struct super_operations ext4_sops = {
1635 .alloc_inode = ext4_alloc_inode,
1636 .free_inode = ext4_free_in_core_inode,
1637 .destroy_inode = ext4_destroy_inode,
1638 .write_inode = ext4_write_inode,
1639 .dirty_inode = ext4_dirty_inode,
1640 .drop_inode = ext4_drop_inode,
1641 .evict_inode = ext4_evict_inode,
1642 .put_super = ext4_put_super,
1643 .sync_fs = ext4_sync_fs,
1644 .freeze_fs = ext4_freeze,
1645 .unfreeze_fs = ext4_unfreeze,
1646 .statfs = ext4_statfs,
1647 .show_options = ext4_show_options,
1648 .shutdown = ext4_shutdown,
1649 #ifdef CONFIG_QUOTA
1650 .quota_read = ext4_quota_read,
1651 .quota_write = ext4_quota_write,
1652 .get_dquots = ext4_get_dquots,
1653 #endif
1654 };
1655
1656 static const struct export_operations ext4_export_ops = {
1657 .encode_fh = generic_encode_ino32_fh,
1658 .fh_to_dentry = ext4_fh_to_dentry,
1659 .fh_to_parent = ext4_fh_to_parent,
1660 .get_parent = ext4_get_parent,
1661 .commit_metadata = ext4_nfs_commit_metadata,
1662 };
1663
1664 enum {
1665 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1666 Opt_resgid, Opt_resuid, Opt_sb,
1667 Opt_nouid32, Opt_debug, Opt_removed,
1668 Opt_user_xattr, Opt_acl,
1669 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1670 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1671 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1672 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1673 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1674 Opt_inlinecrypt,
1675 Opt_usrjquota, Opt_grpjquota, Opt_quota,
1676 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1677 Opt_usrquota, Opt_grpquota, Opt_prjquota,
1678 Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1679 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
1680 Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize,
1681 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1682 Opt_inode_readahead_blks, Opt_journal_ioprio,
1683 Opt_dioread_nolock, Opt_dioread_lock,
1684 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1685 Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1686 Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan,
1687 Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type,
1688 #ifdef CONFIG_EXT4_DEBUG
1689 Opt_fc_debug_max_replay, Opt_fc_debug_force
1690 #endif
1691 };
1692
1693 static const struct constant_table ext4_param_errors[] = {
1694 {"continue", EXT4_MOUNT_ERRORS_CONT},
1695 {"panic", EXT4_MOUNT_ERRORS_PANIC},
1696 {"remount-ro", EXT4_MOUNT_ERRORS_RO},
1697 {}
1698 };
1699
1700 static const struct constant_table ext4_param_data[] = {
1701 {"journal", EXT4_MOUNT_JOURNAL_DATA},
1702 {"ordered", EXT4_MOUNT_ORDERED_DATA},
1703 {"writeback", EXT4_MOUNT_WRITEBACK_DATA},
1704 {}
1705 };
1706
1707 static const struct constant_table ext4_param_data_err[] = {
1708 {"abort", Opt_data_err_abort},
1709 {"ignore", Opt_data_err_ignore},
1710 {}
1711 };
1712
1713 static const struct constant_table ext4_param_jqfmt[] = {
1714 {"vfsold", QFMT_VFS_OLD},
1715 {"vfsv0", QFMT_VFS_V0},
1716 {"vfsv1", QFMT_VFS_V1},
1717 {}
1718 };
1719
1720 static const struct constant_table ext4_param_dax[] = {
1721 {"always", Opt_dax_always},
1722 {"inode", Opt_dax_inode},
1723 {"never", Opt_dax_never},
1724 {}
1725 };
1726
1727 /*
1728 * Mount option specification
1729 * We don't use fsparam_flag_no because of the way we set the
1730 * options and the way we show them in _ext4_show_options(). To
1731 * keep the changes to a minimum, let's keep the negative options
1732 * separate for now.
1733 */
1734 static const struct fs_parameter_spec ext4_param_specs[] = {
1735 fsparam_flag ("bsddf", Opt_bsd_df),
1736 fsparam_flag ("minixdf", Opt_minix_df),
1737 fsparam_flag ("grpid", Opt_grpid),
1738 fsparam_flag ("bsdgroups", Opt_grpid),
1739 fsparam_flag ("nogrpid", Opt_nogrpid),
1740 fsparam_flag ("sysvgroups", Opt_nogrpid),
1741 fsparam_gid ("resgid", Opt_resgid),
1742 fsparam_uid ("resuid", Opt_resuid),
1743 fsparam_u32 ("sb", Opt_sb),
1744 fsparam_enum ("errors", Opt_errors, ext4_param_errors),
1745 fsparam_flag ("nouid32", Opt_nouid32),
1746 fsparam_flag ("debug", Opt_debug),
1747 fsparam_flag ("oldalloc", Opt_removed),
1748 fsparam_flag ("orlov", Opt_removed),
1749 fsparam_flag ("user_xattr", Opt_user_xattr),
1750 fsparam_flag ("acl", Opt_acl),
1751 fsparam_flag ("norecovery", Opt_noload),
1752 fsparam_flag ("noload", Opt_noload),
1753 fsparam_flag ("bh", Opt_removed),
1754 fsparam_flag ("nobh", Opt_removed),
1755 fsparam_u32 ("commit", Opt_commit),
1756 fsparam_u32 ("min_batch_time", Opt_min_batch_time),
1757 fsparam_u32 ("max_batch_time", Opt_max_batch_time),
1758 fsparam_u32 ("journal_dev", Opt_journal_dev),
1759 fsparam_bdev ("journal_path", Opt_journal_path),
1760 fsparam_flag ("journal_checksum", Opt_journal_checksum),
1761 fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum),
1762 fsparam_flag ("journal_async_commit",Opt_journal_async_commit),
1763 fsparam_flag ("abort", Opt_abort),
1764 fsparam_enum ("data", Opt_data, ext4_param_data),
1765 fsparam_enum ("data_err", Opt_data_err,
1766 ext4_param_data_err),
1767 fsparam_string_empty
1768 ("usrjquota", Opt_usrjquota),
1769 fsparam_string_empty
1770 ("grpjquota", Opt_grpjquota),
1771 fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt),
1772 fsparam_flag ("grpquota", Opt_grpquota),
1773 fsparam_flag ("quota", Opt_quota),
1774 fsparam_flag ("noquota", Opt_noquota),
1775 fsparam_flag ("usrquota", Opt_usrquota),
1776 fsparam_flag ("prjquota", Opt_prjquota),
1777 fsparam_flag ("barrier", Opt_barrier),
1778 fsparam_u32 ("barrier", Opt_barrier),
1779 fsparam_flag ("nobarrier", Opt_nobarrier),
1780 fsparam_flag ("i_version", Opt_removed),
1781 fsparam_flag ("dax", Opt_dax),
1782 fsparam_enum ("dax", Opt_dax_type, ext4_param_dax),
1783 fsparam_u32 ("stripe", Opt_stripe),
1784 fsparam_flag ("delalloc", Opt_delalloc),
1785 fsparam_flag ("nodelalloc", Opt_nodelalloc),
1786 fsparam_flag ("warn_on_error", Opt_warn_on_error),
1787 fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error),
1788 fsparam_u32 ("debug_want_extra_isize",
1789 Opt_debug_want_extra_isize),
1790 fsparam_flag ("mblk_io_submit", Opt_removed),
1791 fsparam_flag ("nomblk_io_submit", Opt_removed),
1792 fsparam_flag ("block_validity", Opt_block_validity),
1793 fsparam_flag ("noblock_validity", Opt_noblock_validity),
1794 fsparam_u32 ("inode_readahead_blks",
1795 Opt_inode_readahead_blks),
1796 fsparam_u32 ("journal_ioprio", Opt_journal_ioprio),
1797 fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc),
1798 fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc),
1799 fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc),
1800 fsparam_flag ("dioread_nolock", Opt_dioread_nolock),
1801 fsparam_flag ("nodioread_nolock", Opt_dioread_lock),
1802 fsparam_flag ("dioread_lock", Opt_dioread_lock),
1803 fsparam_flag ("discard", Opt_discard),
1804 fsparam_flag ("nodiscard", Opt_nodiscard),
1805 fsparam_u32 ("init_itable", Opt_init_itable),
1806 fsparam_flag ("init_itable", Opt_init_itable),
1807 fsparam_flag ("noinit_itable", Opt_noinit_itable),
1808 #ifdef CONFIG_EXT4_DEBUG
1809 fsparam_flag ("fc_debug_force", Opt_fc_debug_force),
1810 fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay),
1811 #endif
1812 fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb),
1813 fsparam_flag ("test_dummy_encryption",
1814 Opt_test_dummy_encryption),
1815 fsparam_string ("test_dummy_encryption",
1816 Opt_test_dummy_encryption),
1817 fsparam_flag ("inlinecrypt", Opt_inlinecrypt),
1818 fsparam_flag ("nombcache", Opt_nombcache),
1819 fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */
1820 fsparam_flag ("prefetch_block_bitmaps",
1821 Opt_removed),
1822 fsparam_flag ("no_prefetch_block_bitmaps",
1823 Opt_no_prefetch_block_bitmaps),
1824 fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan),
1825 fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */
1826 fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */
1827 fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */
1828 fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */
1829 fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */
1830 {}
1831 };
1832
1833
1834 #define MOPT_SET 0x0001
1835 #define MOPT_CLEAR 0x0002
1836 #define MOPT_NOSUPPORT 0x0004
1837 #define MOPT_EXPLICIT 0x0008
1838 #ifdef CONFIG_QUOTA
1839 #define MOPT_Q 0
1840 #define MOPT_QFMT 0x0010
1841 #else
1842 #define MOPT_Q MOPT_NOSUPPORT
1843 #define MOPT_QFMT MOPT_NOSUPPORT
1844 #endif
1845 #define MOPT_NO_EXT2 0x0020
1846 #define MOPT_NO_EXT3 0x0040
1847 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1848 #define MOPT_SKIP 0x0080
1849 #define MOPT_2 0x0100
1850
1851 static const struct mount_opts {
1852 int token;
1853 int mount_opt;
1854 int flags;
1855 } ext4_mount_opts[] = {
1856 {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1857 {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1858 {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1859 {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1860 {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1861 {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1862 {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1863 MOPT_EXT4_ONLY | MOPT_SET},
1864 {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1865 MOPT_EXT4_ONLY | MOPT_CLEAR},
1866 {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1867 {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1868 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1869 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1870 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1871 MOPT_EXT4_ONLY | MOPT_CLEAR},
1872 {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
1873 {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1874 {Opt_commit, 0, MOPT_NO_EXT2},
1875 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1876 MOPT_EXT4_ONLY | MOPT_CLEAR},
1877 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1878 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1879 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1880 EXT4_MOUNT_JOURNAL_CHECKSUM),
1881 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1882 {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1883 {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2},
1884 {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1885 {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1886 {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1887 {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1888 {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1889 {Opt_dax_type, 0, MOPT_EXT4_ONLY},
1890 {Opt_journal_dev, 0, MOPT_NO_EXT2},
1891 {Opt_journal_path, 0, MOPT_NO_EXT2},
1892 {Opt_journal_ioprio, 0, MOPT_NO_EXT2},
1893 {Opt_data, 0, MOPT_NO_EXT2},
1894 {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
1895 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1896 {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
1897 #else
1898 {Opt_acl, 0, MOPT_NOSUPPORT},
1899 #endif
1900 {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
1901 {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1902 {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
1903 {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
1904 MOPT_SET | MOPT_Q},
1905 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1906 MOPT_SET | MOPT_Q},
1907 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1908 MOPT_SET | MOPT_Q},
1909 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1910 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1911 MOPT_CLEAR | MOPT_Q},
1912 {Opt_usrjquota, 0, MOPT_Q},
1913 {Opt_grpjquota, 0, MOPT_Q},
1914 {Opt_jqfmt, 0, MOPT_QFMT},
1915 {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1916 {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS,
1917 MOPT_SET},
1918 #ifdef CONFIG_EXT4_DEBUG
1919 {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
1920 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
1921 #endif
1922 {Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2},
1923 {Opt_err, 0, 0}
1924 };
1925
1926 #if IS_ENABLED(CONFIG_UNICODE)
1927 static const struct ext4_sb_encodings {
1928 __u16 magic;
1929 char *name;
1930 unsigned int version;
1931 } ext4_sb_encoding_map[] = {
1932 {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
1933 };
1934
1935 static const struct ext4_sb_encodings *
ext4_sb_read_encoding(const struct ext4_super_block * es)1936 ext4_sb_read_encoding(const struct ext4_super_block *es)
1937 {
1938 __u16 magic = le16_to_cpu(es->s_encoding);
1939 int i;
1940
1941 for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
1942 if (magic == ext4_sb_encoding_map[i].magic)
1943 return &ext4_sb_encoding_map[i];
1944
1945 return NULL;
1946 }
1947 #endif
1948
1949 #define EXT4_SPEC_JQUOTA (1 << 0)
1950 #define EXT4_SPEC_JQFMT (1 << 1)
1951 #define EXT4_SPEC_DATAJ (1 << 2)
1952 #define EXT4_SPEC_SB_BLOCK (1 << 3)
1953 #define EXT4_SPEC_JOURNAL_DEV (1 << 4)
1954 #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5)
1955 #define EXT4_SPEC_s_want_extra_isize (1 << 7)
1956 #define EXT4_SPEC_s_max_batch_time (1 << 8)
1957 #define EXT4_SPEC_s_min_batch_time (1 << 9)
1958 #define EXT4_SPEC_s_inode_readahead_blks (1 << 10)
1959 #define EXT4_SPEC_s_li_wait_mult (1 << 11)
1960 #define EXT4_SPEC_s_max_dir_size_kb (1 << 12)
1961 #define EXT4_SPEC_s_stripe (1 << 13)
1962 #define EXT4_SPEC_s_resuid (1 << 14)
1963 #define EXT4_SPEC_s_resgid (1 << 15)
1964 #define EXT4_SPEC_s_commit_interval (1 << 16)
1965 #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17)
1966 #define EXT4_SPEC_s_sb_block (1 << 18)
1967 #define EXT4_SPEC_mb_optimize_scan (1 << 19)
1968
1969 struct ext4_fs_context {
1970 char *s_qf_names[EXT4_MAXQUOTAS];
1971 struct fscrypt_dummy_policy dummy_enc_policy;
1972 int s_jquota_fmt; /* Format of quota to use */
1973 #ifdef CONFIG_EXT4_DEBUG
1974 int s_fc_debug_max_replay;
1975 #endif
1976 unsigned short qname_spec;
1977 unsigned long vals_s_flags; /* Bits to set in s_flags */
1978 unsigned long mask_s_flags; /* Bits changed in s_flags */
1979 unsigned long journal_devnum;
1980 unsigned long s_commit_interval;
1981 unsigned long s_stripe;
1982 unsigned int s_inode_readahead_blks;
1983 unsigned int s_want_extra_isize;
1984 unsigned int s_li_wait_mult;
1985 unsigned int s_max_dir_size_kb;
1986 unsigned int journal_ioprio;
1987 unsigned int vals_s_mount_opt;
1988 unsigned int mask_s_mount_opt;
1989 unsigned int vals_s_mount_opt2;
1990 unsigned int mask_s_mount_opt2;
1991 unsigned int opt_flags; /* MOPT flags */
1992 unsigned int spec;
1993 u32 s_max_batch_time;
1994 u32 s_min_batch_time;
1995 kuid_t s_resuid;
1996 kgid_t s_resgid;
1997 ext4_fsblk_t s_sb_block;
1998 };
1999
ext4_fc_free(struct fs_context * fc)2000 static void ext4_fc_free(struct fs_context *fc)
2001 {
2002 struct ext4_fs_context *ctx = fc->fs_private;
2003 int i;
2004
2005 if (!ctx)
2006 return;
2007
2008 for (i = 0; i < EXT4_MAXQUOTAS; i++)
2009 kfree(ctx->s_qf_names[i]);
2010
2011 fscrypt_free_dummy_policy(&ctx->dummy_enc_policy);
2012 kfree(ctx);
2013 }
2014
ext4_init_fs_context(struct fs_context * fc)2015 int ext4_init_fs_context(struct fs_context *fc)
2016 {
2017 struct ext4_fs_context *ctx;
2018
2019 ctx = kzalloc_obj(struct ext4_fs_context);
2020 if (!ctx)
2021 return -ENOMEM;
2022
2023 fc->fs_private = ctx;
2024 fc->ops = &ext4_context_ops;
2025
2026 /* i_version is always enabled now */
2027 fc->sb_flags |= SB_I_VERSION;
2028
2029 return 0;
2030 }
2031
2032 #ifdef CONFIG_QUOTA
2033 /*
2034 * Note the name of the specified quota file.
2035 */
note_qf_name(struct fs_context * fc,int qtype,struct fs_parameter * param)2036 static int note_qf_name(struct fs_context *fc, int qtype,
2037 struct fs_parameter *param)
2038 {
2039 struct ext4_fs_context *ctx = fc->fs_private;
2040 char *qname;
2041
2042 if (param->size < 1) {
2043 ext4_msg(NULL, KERN_ERR, "Missing quota name");
2044 return -EINVAL;
2045 }
2046 if (strchr(param->string, '/')) {
2047 ext4_msg(NULL, KERN_ERR,
2048 "quotafile must be on filesystem root");
2049 return -EINVAL;
2050 }
2051 if (ctx->s_qf_names[qtype]) {
2052 if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) {
2053 ext4_msg(NULL, KERN_ERR,
2054 "%s quota file already specified",
2055 QTYPE2NAME(qtype));
2056 return -EINVAL;
2057 }
2058 return 0;
2059 }
2060
2061 qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
2062 if (!qname) {
2063 ext4_msg(NULL, KERN_ERR,
2064 "Not enough memory for storing quotafile name");
2065 return -ENOMEM;
2066 }
2067 ctx->s_qf_names[qtype] = qname;
2068 ctx->qname_spec |= 1 << qtype;
2069 ctx->spec |= EXT4_SPEC_JQUOTA;
2070 return 0;
2071 }
2072
2073 /*
2074 * Clear the name of the specified quota file.
2075 */
unnote_qf_name(struct fs_context * fc,int qtype)2076 static int unnote_qf_name(struct fs_context *fc, int qtype)
2077 {
2078 struct ext4_fs_context *ctx = fc->fs_private;
2079
2080 kfree(ctx->s_qf_names[qtype]);
2081
2082 ctx->s_qf_names[qtype] = NULL;
2083 ctx->qname_spec |= 1 << qtype;
2084 ctx->spec |= EXT4_SPEC_JQUOTA;
2085 return 0;
2086 }
2087 #endif
2088
ext4_parse_test_dummy_encryption(const struct fs_parameter * param,struct ext4_fs_context * ctx)2089 static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param,
2090 struct ext4_fs_context *ctx)
2091 {
2092 int err;
2093
2094 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
2095 ext4_msg(NULL, KERN_WARNING,
2096 "test_dummy_encryption option not supported");
2097 return -EINVAL;
2098 }
2099 err = fscrypt_parse_test_dummy_encryption(param,
2100 &ctx->dummy_enc_policy);
2101 if (err == -EINVAL) {
2102 ext4_msg(NULL, KERN_WARNING,
2103 "Value of option \"%s\" is unrecognized", param->key);
2104 } else if (err == -EEXIST) {
2105 ext4_msg(NULL, KERN_WARNING,
2106 "Conflicting test_dummy_encryption options");
2107 return -EINVAL;
2108 }
2109 return err;
2110 }
2111
2112 #define EXT4_SET_CTX(name) \
2113 static inline __maybe_unused \
2114 void ctx_set_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2115 { \
2116 ctx->mask_s_##name |= flag; \
2117 ctx->vals_s_##name |= flag; \
2118 }
2119
2120 #define EXT4_CLEAR_CTX(name) \
2121 static inline __maybe_unused \
2122 void ctx_clear_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2123 { \
2124 ctx->mask_s_##name |= flag; \
2125 ctx->vals_s_##name &= ~flag; \
2126 }
2127
2128 #define EXT4_TEST_CTX(name) \
2129 static inline unsigned long \
2130 ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2131 { \
2132 return (ctx->vals_s_##name & flag); \
2133 }
2134
2135 EXT4_SET_CTX(flags); /* set only */
2136 EXT4_SET_CTX(mount_opt);
2137 EXT4_CLEAR_CTX(mount_opt);
2138 EXT4_TEST_CTX(mount_opt);
2139 EXT4_SET_CTX(mount_opt2);
2140 EXT4_CLEAR_CTX(mount_opt2);
2141 EXT4_TEST_CTX(mount_opt2);
2142
ext4_parse_param(struct fs_context * fc,struct fs_parameter * param)2143 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
2144 {
2145 struct ext4_fs_context *ctx = fc->fs_private;
2146 struct fs_parse_result result;
2147 const struct mount_opts *m;
2148 int is_remount;
2149 int token;
2150
2151 token = fs_parse(fc, ext4_param_specs, param, &result);
2152 if (token < 0)
2153 return token;
2154 is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
2155
2156 for (m = ext4_mount_opts; m->token != Opt_err; m++)
2157 if (token == m->token)
2158 break;
2159
2160 ctx->opt_flags |= m->flags;
2161
2162 if (m->flags & MOPT_EXPLICIT) {
2163 if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
2164 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC);
2165 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
2166 ctx_set_mount_opt2(ctx,
2167 EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM);
2168 } else
2169 return -EINVAL;
2170 }
2171
2172 if (m->flags & MOPT_NOSUPPORT) {
2173 ext4_msg(NULL, KERN_ERR, "%s option not supported",
2174 param->key);
2175 return 0;
2176 }
2177
2178 switch (token) {
2179 #ifdef CONFIG_QUOTA
2180 case Opt_usrjquota:
2181 if (!*param->string)
2182 return unnote_qf_name(fc, USRQUOTA);
2183 else
2184 return note_qf_name(fc, USRQUOTA, param);
2185 case Opt_grpjquota:
2186 if (!*param->string)
2187 return unnote_qf_name(fc, GRPQUOTA);
2188 else
2189 return note_qf_name(fc, GRPQUOTA, param);
2190 #endif
2191 case Opt_sb:
2192 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2193 ext4_msg(NULL, KERN_WARNING,
2194 "Ignoring %s option on remount", param->key);
2195 } else {
2196 ctx->s_sb_block = result.uint_32;
2197 ctx->spec |= EXT4_SPEC_s_sb_block;
2198 }
2199 return 0;
2200 case Opt_removed:
2201 ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option",
2202 param->key);
2203 return 0;
2204 case Opt_inlinecrypt:
2205 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
2206 ctx_set_flags(ctx, SB_INLINECRYPT);
2207 #else
2208 ext4_msg(NULL, KERN_ERR, "inline encryption not supported");
2209 #endif
2210 return 0;
2211 case Opt_errors:
2212 ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK);
2213 ctx_set_mount_opt(ctx, result.uint_32);
2214 return 0;
2215 #ifdef CONFIG_QUOTA
2216 case Opt_jqfmt:
2217 ctx->s_jquota_fmt = result.uint_32;
2218 ctx->spec |= EXT4_SPEC_JQFMT;
2219 return 0;
2220 #endif
2221 case Opt_data:
2222 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
2223 ctx_set_mount_opt(ctx, result.uint_32);
2224 ctx->spec |= EXT4_SPEC_DATAJ;
2225 return 0;
2226 case Opt_commit:
2227 if (result.uint_32 == 0)
2228 result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE;
2229 else if (result.uint_32 > INT_MAX / HZ) {
2230 ext4_msg(NULL, KERN_ERR,
2231 "Invalid commit interval %d, "
2232 "must be smaller than %d",
2233 result.uint_32, INT_MAX / HZ);
2234 return -EINVAL;
2235 }
2236 ctx->s_commit_interval = HZ * result.uint_32;
2237 ctx->spec |= EXT4_SPEC_s_commit_interval;
2238 return 0;
2239 case Opt_debug_want_extra_isize:
2240 if ((result.uint_32 & 1) || (result.uint_32 < 4)) {
2241 ext4_msg(NULL, KERN_ERR,
2242 "Invalid want_extra_isize %d", result.uint_32);
2243 return -EINVAL;
2244 }
2245 ctx->s_want_extra_isize = result.uint_32;
2246 ctx->spec |= EXT4_SPEC_s_want_extra_isize;
2247 return 0;
2248 case Opt_max_batch_time:
2249 ctx->s_max_batch_time = result.uint_32;
2250 ctx->spec |= EXT4_SPEC_s_max_batch_time;
2251 return 0;
2252 case Opt_min_batch_time:
2253 ctx->s_min_batch_time = result.uint_32;
2254 ctx->spec |= EXT4_SPEC_s_min_batch_time;
2255 return 0;
2256 case Opt_inode_readahead_blks:
2257 if (result.uint_32 &&
2258 (result.uint_32 > (1 << 30) ||
2259 !is_power_of_2(result.uint_32))) {
2260 ext4_msg(NULL, KERN_ERR,
2261 "EXT4-fs: inode_readahead_blks must be "
2262 "0 or a power of 2 smaller than 2^31");
2263 return -EINVAL;
2264 }
2265 ctx->s_inode_readahead_blks = result.uint_32;
2266 ctx->spec |= EXT4_SPEC_s_inode_readahead_blks;
2267 return 0;
2268 case Opt_init_itable:
2269 ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE);
2270 ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
2271 if (param->type == fs_value_is_string)
2272 ctx->s_li_wait_mult = result.uint_32;
2273 ctx->spec |= EXT4_SPEC_s_li_wait_mult;
2274 return 0;
2275 case Opt_max_dir_size_kb:
2276 ctx->s_max_dir_size_kb = result.uint_32;
2277 ctx->spec |= EXT4_SPEC_s_max_dir_size_kb;
2278 return 0;
2279 #ifdef CONFIG_EXT4_DEBUG
2280 case Opt_fc_debug_max_replay:
2281 ctx->s_fc_debug_max_replay = result.uint_32;
2282 ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay;
2283 return 0;
2284 #endif
2285 case Opt_stripe:
2286 ctx->s_stripe = result.uint_32;
2287 ctx->spec |= EXT4_SPEC_s_stripe;
2288 return 0;
2289 case Opt_resuid:
2290 ctx->s_resuid = result.uid;
2291 ctx->spec |= EXT4_SPEC_s_resuid;
2292 return 0;
2293 case Opt_resgid:
2294 ctx->s_resgid = result.gid;
2295 ctx->spec |= EXT4_SPEC_s_resgid;
2296 return 0;
2297 case Opt_journal_dev:
2298 if (is_remount) {
2299 ext4_msg(NULL, KERN_ERR,
2300 "Cannot specify journal on remount");
2301 return -EINVAL;
2302 }
2303 ctx->journal_devnum = result.uint_32;
2304 ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
2305 return 0;
2306 case Opt_journal_path:
2307 {
2308 struct inode *journal_inode;
2309 struct path path;
2310 int error;
2311
2312 if (is_remount) {
2313 ext4_msg(NULL, KERN_ERR,
2314 "Cannot specify journal on remount");
2315 return -EINVAL;
2316 }
2317
2318 error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path);
2319 if (error) {
2320 ext4_msg(NULL, KERN_ERR, "error: could not find "
2321 "journal device path");
2322 return -EINVAL;
2323 }
2324
2325 journal_inode = d_inode(path.dentry);
2326 ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev);
2327 ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
2328 path_put(&path);
2329 return 0;
2330 }
2331 case Opt_journal_ioprio:
2332 if (result.uint_32 > 7) {
2333 ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority"
2334 " (must be 0-7)");
2335 return -EINVAL;
2336 }
2337 ctx->journal_ioprio =
2338 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32);
2339 ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO;
2340 return 0;
2341 case Opt_test_dummy_encryption:
2342 return ext4_parse_test_dummy_encryption(param, ctx);
2343 case Opt_dax:
2344 case Opt_dax_type:
2345 #ifdef CONFIG_FS_DAX
2346 {
2347 int type = (token == Opt_dax) ?
2348 Opt_dax : result.uint_32;
2349
2350 switch (type) {
2351 case Opt_dax:
2352 case Opt_dax_always:
2353 ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2354 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2355 break;
2356 case Opt_dax_never:
2357 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2358 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2359 break;
2360 case Opt_dax_inode:
2361 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2362 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2363 /* Strictly for printing options */
2364 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE);
2365 break;
2366 }
2367 return 0;
2368 }
2369 #else
2370 ext4_msg(NULL, KERN_INFO, "dax option not supported");
2371 return -EINVAL;
2372 #endif
2373 case Opt_data_err:
2374 if (result.uint_32 == Opt_data_err_abort)
2375 ctx_set_mount_opt(ctx, m->mount_opt);
2376 else if (result.uint_32 == Opt_data_err_ignore)
2377 ctx_clear_mount_opt(ctx, m->mount_opt);
2378 return 0;
2379 case Opt_mb_optimize_scan:
2380 if (result.int_32 == 1) {
2381 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
2382 ctx->spec |= EXT4_SPEC_mb_optimize_scan;
2383 } else if (result.int_32 == 0) {
2384 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
2385 ctx->spec |= EXT4_SPEC_mb_optimize_scan;
2386 } else {
2387 ext4_msg(NULL, KERN_WARNING,
2388 "mb_optimize_scan should be set to 0 or 1.");
2389 return -EINVAL;
2390 }
2391 return 0;
2392 }
2393
2394 /*
2395 * At this point we should only be getting options requiring MOPT_SET,
2396 * or MOPT_CLEAR. Anything else is a bug
2397 */
2398 if (m->token == Opt_err) {
2399 ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s",
2400 param->key);
2401 WARN_ON(1);
2402 return -EINVAL;
2403 }
2404
2405 else {
2406 unsigned int set = 0;
2407
2408 if ((param->type == fs_value_is_flag) ||
2409 result.uint_32 > 0)
2410 set = 1;
2411
2412 if (m->flags & MOPT_CLEAR)
2413 set = !set;
2414 else if (unlikely(!(m->flags & MOPT_SET))) {
2415 ext4_msg(NULL, KERN_WARNING,
2416 "buggy handling of option %s",
2417 param->key);
2418 WARN_ON(1);
2419 return -EINVAL;
2420 }
2421 if (m->flags & MOPT_2) {
2422 if (set != 0)
2423 ctx_set_mount_opt2(ctx, m->mount_opt);
2424 else
2425 ctx_clear_mount_opt2(ctx, m->mount_opt);
2426 } else {
2427 if (set != 0)
2428 ctx_set_mount_opt(ctx, m->mount_opt);
2429 else
2430 ctx_clear_mount_opt(ctx, m->mount_opt);
2431 }
2432 }
2433
2434 return 0;
2435 }
2436
parse_options(struct fs_context * fc,char * options)2437 static int parse_options(struct fs_context *fc, char *options)
2438 {
2439 struct fs_parameter param;
2440 int ret;
2441 char *key;
2442
2443 if (!options)
2444 return 0;
2445
2446 while ((key = strsep(&options, ",")) != NULL) {
2447 if (*key) {
2448 size_t v_len = 0;
2449 char *value = strchr(key, '=');
2450
2451 param.type = fs_value_is_flag;
2452 param.string = NULL;
2453
2454 if (value) {
2455 if (value == key)
2456 continue;
2457
2458 *value++ = 0;
2459 v_len = strlen(value);
2460 param.string = kmemdup_nul(value, v_len,
2461 GFP_KERNEL);
2462 if (!param.string)
2463 return -ENOMEM;
2464 param.type = fs_value_is_string;
2465 }
2466
2467 param.key = key;
2468 param.size = v_len;
2469
2470 ret = ext4_parse_param(fc, ¶m);
2471 kfree(param.string);
2472 if (ret < 0)
2473 return ret;
2474 }
2475 }
2476
2477 ret = ext4_validate_options(fc);
2478 if (ret < 0)
2479 return ret;
2480
2481 return 0;
2482 }
2483
parse_apply_sb_mount_options(struct super_block * sb,struct ext4_fs_context * m_ctx)2484 static int parse_apply_sb_mount_options(struct super_block *sb,
2485 struct ext4_fs_context *m_ctx)
2486 {
2487 struct ext4_sb_info *sbi = EXT4_SB(sb);
2488 char s_mount_opts[64];
2489 struct ext4_fs_context *s_ctx = NULL;
2490 struct fs_context *fc = NULL;
2491 int ret = -ENOMEM;
2492
2493 if (!sbi->s_es->s_mount_opts[0])
2494 return 0;
2495
2496 if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0)
2497 return -E2BIG;
2498
2499 fc = kzalloc_obj(struct fs_context);
2500 if (!fc)
2501 return -ENOMEM;
2502
2503 s_ctx = kzalloc_obj(struct ext4_fs_context);
2504 if (!s_ctx)
2505 goto out_free;
2506
2507 fc->fs_private = s_ctx;
2508 fc->s_fs_info = sbi;
2509
2510 ret = parse_options(fc, s_mount_opts);
2511 if (ret < 0)
2512 goto parse_failed;
2513
2514 ret = ext4_check_opt_consistency(fc, sb);
2515 if (ret < 0) {
2516 parse_failed:
2517 ext4_msg(sb, KERN_WARNING,
2518 "failed to parse options in superblock: %s",
2519 s_mount_opts);
2520 ret = 0;
2521 goto out_free;
2522 }
2523
2524 if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV)
2525 m_ctx->journal_devnum = s_ctx->journal_devnum;
2526 if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)
2527 m_ctx->journal_ioprio = s_ctx->journal_ioprio;
2528
2529 ext4_apply_options(fc, sb);
2530 ret = 0;
2531
2532 out_free:
2533 ext4_fc_free(fc);
2534 kfree(fc);
2535 return ret;
2536 }
2537
ext4_apply_quota_options(struct fs_context * fc,struct super_block * sb)2538 static void ext4_apply_quota_options(struct fs_context *fc,
2539 struct super_block *sb)
2540 {
2541 #ifdef CONFIG_QUOTA
2542 bool quota_feature = ext4_has_feature_quota(sb);
2543 struct ext4_fs_context *ctx = fc->fs_private;
2544 struct ext4_sb_info *sbi = EXT4_SB(sb);
2545 char *qname;
2546 int i;
2547
2548 if (quota_feature)
2549 return;
2550
2551 if (ctx->spec & EXT4_SPEC_JQUOTA) {
2552 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2553 if (!(ctx->qname_spec & (1 << i)))
2554 continue;
2555
2556 qname = ctx->s_qf_names[i]; /* May be NULL */
2557 if (qname)
2558 set_opt(sb, QUOTA);
2559 ctx->s_qf_names[i] = NULL;
2560 qname = rcu_replace_pointer(sbi->s_qf_names[i], qname,
2561 lockdep_is_held(&sb->s_umount));
2562 if (qname)
2563 kfree_rcu_mightsleep(qname);
2564 }
2565 }
2566
2567 if (ctx->spec & EXT4_SPEC_JQFMT)
2568 sbi->s_jquota_fmt = ctx->s_jquota_fmt;
2569 #endif
2570 }
2571
2572 /*
2573 * Check quota settings consistency.
2574 */
ext4_check_quota_consistency(struct fs_context * fc,struct super_block * sb)2575 static int ext4_check_quota_consistency(struct fs_context *fc,
2576 struct super_block *sb)
2577 {
2578 #ifdef CONFIG_QUOTA
2579 struct ext4_fs_context *ctx = fc->fs_private;
2580 struct ext4_sb_info *sbi = EXT4_SB(sb);
2581 bool quota_feature = ext4_has_feature_quota(sb);
2582 bool quota_loaded = sb_any_quota_loaded(sb);
2583 bool usr_qf_name, grp_qf_name, usrquota, grpquota;
2584 int quota_flags, i;
2585
2586 /*
2587 * We do the test below only for project quotas. 'usrquota' and
2588 * 'grpquota' mount options are allowed even without quota feature
2589 * to support legacy quotas in quota files.
2590 */
2591 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) &&
2592 !ext4_has_feature_project(sb)) {
2593 ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. "
2594 "Cannot enable project quota enforcement.");
2595 return -EINVAL;
2596 }
2597
2598 quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
2599 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA;
2600 if (quota_loaded &&
2601 ctx->mask_s_mount_opt & quota_flags &&
2602 !ctx_test_mount_opt(ctx, quota_flags))
2603 goto err_quota_change;
2604
2605 if (ctx->spec & EXT4_SPEC_JQUOTA) {
2606
2607 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2608 if (!(ctx->qname_spec & (1 << i)))
2609 continue;
2610
2611 if (quota_loaded &&
2612 !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i])
2613 goto err_jquota_change;
2614
2615 if (sbi->s_qf_names[i] && ctx->s_qf_names[i] &&
2616 strcmp(get_qf_name(sb, sbi, i),
2617 ctx->s_qf_names[i]) != 0)
2618 goto err_jquota_specified;
2619 }
2620
2621 if (quota_feature) {
2622 ext4_msg(NULL, KERN_INFO,
2623 "Journaled quota options ignored when "
2624 "QUOTA feature is enabled");
2625 return 0;
2626 }
2627 }
2628
2629 if (ctx->spec & EXT4_SPEC_JQFMT) {
2630 if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded)
2631 goto err_jquota_change;
2632 if (quota_feature) {
2633 ext4_msg(NULL, KERN_INFO, "Quota format mount options "
2634 "ignored when QUOTA feature is enabled");
2635 return 0;
2636 }
2637 }
2638
2639 /* Make sure we don't mix old and new quota format */
2640 usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) ||
2641 ctx->s_qf_names[USRQUOTA]);
2642 grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) ||
2643 ctx->s_qf_names[GRPQUOTA]);
2644
2645 usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
2646 test_opt(sb, USRQUOTA));
2647
2648 grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) ||
2649 test_opt(sb, GRPQUOTA));
2650
2651 if (usr_qf_name) {
2652 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
2653 usrquota = false;
2654 }
2655 if (grp_qf_name) {
2656 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
2657 grpquota = false;
2658 }
2659
2660 if (usr_qf_name || grp_qf_name) {
2661 if (usrquota || grpquota) {
2662 ext4_msg(NULL, KERN_ERR, "old and new quota "
2663 "format mixing");
2664 return -EINVAL;
2665 }
2666
2667 if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) {
2668 ext4_msg(NULL, KERN_ERR, "journaled quota format "
2669 "not specified");
2670 return -EINVAL;
2671 }
2672 }
2673
2674 return 0;
2675
2676 err_quota_change:
2677 ext4_msg(NULL, KERN_ERR,
2678 "Cannot change quota options when quota turned on");
2679 return -EINVAL;
2680 err_jquota_change:
2681 ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota "
2682 "options when quota turned on");
2683 return -EINVAL;
2684 err_jquota_specified:
2685 ext4_msg(NULL, KERN_ERR, "%s quota file already specified",
2686 QTYPE2NAME(i));
2687 return -EINVAL;
2688 #else
2689 return 0;
2690 #endif
2691 }
2692
ext4_check_test_dummy_encryption(const struct fs_context * fc,struct super_block * sb)2693 static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
2694 struct super_block *sb)
2695 {
2696 const struct ext4_fs_context *ctx = fc->fs_private;
2697 const struct ext4_sb_info *sbi = EXT4_SB(sb);
2698
2699 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy))
2700 return 0;
2701
2702 if (!ext4_has_feature_encrypt(sb)) {
2703 ext4_msg(NULL, KERN_WARNING,
2704 "test_dummy_encryption requires encrypt feature");
2705 return -EINVAL;
2706 }
2707 /*
2708 * This mount option is just for testing, and it's not worthwhile to
2709 * implement the extra complexity (e.g. RCU protection) that would be
2710 * needed to allow it to be set or changed during remount. We do allow
2711 * it to be specified during remount, but only if there is no change.
2712 */
2713 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2714 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
2715 &ctx->dummy_enc_policy))
2716 return 0;
2717 ext4_msg(NULL, KERN_WARNING,
2718 "Can't set or change test_dummy_encryption on remount");
2719 return -EINVAL;
2720 }
2721 /* Also make sure s_mount_opts didn't contain a conflicting value. */
2722 if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) {
2723 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
2724 &ctx->dummy_enc_policy))
2725 return 0;
2726 ext4_msg(NULL, KERN_WARNING,
2727 "Conflicting test_dummy_encryption options");
2728 return -EINVAL;
2729 }
2730 return 0;
2731 }
2732
ext4_apply_test_dummy_encryption(struct ext4_fs_context * ctx,struct super_block * sb)2733 static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx,
2734 struct super_block *sb)
2735 {
2736 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) ||
2737 /* if already set, it was already verified to be the same */
2738 fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy))
2739 return;
2740 EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy;
2741 memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy));
2742 ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
2743 }
2744
ext4_check_opt_consistency(struct fs_context * fc,struct super_block * sb)2745 static int ext4_check_opt_consistency(struct fs_context *fc,
2746 struct super_block *sb)
2747 {
2748 struct ext4_fs_context *ctx = fc->fs_private;
2749 struct ext4_sb_info *sbi = fc->s_fs_info;
2750 int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
2751 int err;
2752
2753 if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
2754 ext4_msg(NULL, KERN_ERR,
2755 "Mount option(s) incompatible with ext2");
2756 return -EINVAL;
2757 }
2758 if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
2759 ext4_msg(NULL, KERN_ERR,
2760 "Mount option(s) incompatible with ext3");
2761 return -EINVAL;
2762 }
2763
2764 if (ctx->s_want_extra_isize >
2765 (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) {
2766 ext4_msg(NULL, KERN_ERR,
2767 "Invalid want_extra_isize %d",
2768 ctx->s_want_extra_isize);
2769 return -EINVAL;
2770 }
2771
2772 err = ext4_check_test_dummy_encryption(fc, sb);
2773 if (err)
2774 return err;
2775
2776 if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) {
2777 if (!sbi->s_journal) {
2778 ext4_msg(NULL, KERN_WARNING,
2779 "Remounting file system with no journal "
2780 "so ignoring journalled data option");
2781 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
2782 } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) !=
2783 test_opt(sb, DATA_FLAGS)) {
2784 ext4_msg(NULL, KERN_ERR, "Cannot change data mode "
2785 "on remount");
2786 return -EINVAL;
2787 }
2788 }
2789
2790 if (is_remount) {
2791 if (!sbi->s_journal &&
2792 ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) {
2793 ext4_msg(NULL, KERN_WARNING,
2794 "Remounting fs w/o journal so ignoring data_err option");
2795 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT);
2796 }
2797
2798 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
2799 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
2800 ext4_msg(NULL, KERN_ERR, "can't mount with "
2801 "both data=journal and dax");
2802 return -EINVAL;
2803 }
2804
2805 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
2806 (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2807 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
2808 fail_dax_change_remount:
2809 ext4_msg(NULL, KERN_ERR, "can't change "
2810 "dax mount option while remounting");
2811 return -EINVAL;
2812 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) &&
2813 (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2814 (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) {
2815 goto fail_dax_change_remount;
2816 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) &&
2817 ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2818 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2819 !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) {
2820 goto fail_dax_change_remount;
2821 }
2822 }
2823
2824 return ext4_check_quota_consistency(fc, sb);
2825 }
2826
ext4_apply_options(struct fs_context * fc,struct super_block * sb)2827 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb)
2828 {
2829 struct ext4_fs_context *ctx = fc->fs_private;
2830 struct ext4_sb_info *sbi = fc->s_fs_info;
2831
2832 sbi->s_mount_opt &= ~ctx->mask_s_mount_opt;
2833 sbi->s_mount_opt |= ctx->vals_s_mount_opt;
2834 sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2;
2835 sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2;
2836 sb->s_flags &= ~ctx->mask_s_flags;
2837 sb->s_flags |= ctx->vals_s_flags;
2838
2839 #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; })
2840 APPLY(s_commit_interval);
2841 APPLY(s_stripe);
2842 APPLY(s_max_batch_time);
2843 APPLY(s_min_batch_time);
2844 APPLY(s_want_extra_isize);
2845 APPLY(s_inode_readahead_blks);
2846 APPLY(s_max_dir_size_kb);
2847 APPLY(s_li_wait_mult);
2848 APPLY(s_resgid);
2849 APPLY(s_resuid);
2850
2851 #ifdef CONFIG_EXT4_DEBUG
2852 APPLY(s_fc_debug_max_replay);
2853 #endif
2854
2855 ext4_apply_quota_options(fc, sb);
2856 ext4_apply_test_dummy_encryption(ctx, sb);
2857 }
2858
2859
ext4_validate_options(struct fs_context * fc)2860 static int ext4_validate_options(struct fs_context *fc)
2861 {
2862 #ifdef CONFIG_QUOTA
2863 struct ext4_fs_context *ctx = fc->fs_private;
2864 char *usr_qf_name, *grp_qf_name;
2865
2866 usr_qf_name = ctx->s_qf_names[USRQUOTA];
2867 grp_qf_name = ctx->s_qf_names[GRPQUOTA];
2868
2869 if (usr_qf_name || grp_qf_name) {
2870 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name)
2871 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
2872
2873 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name)
2874 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
2875
2876 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
2877 ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) {
2878 ext4_msg(NULL, KERN_ERR, "old and new quota "
2879 "format mixing");
2880 return -EINVAL;
2881 }
2882 }
2883 #endif
2884 return 1;
2885 }
2886
ext4_show_quota_options(struct seq_file * seq,struct super_block * sb)2887 static inline void ext4_show_quota_options(struct seq_file *seq,
2888 struct super_block *sb)
2889 {
2890 #if defined(CONFIG_QUOTA)
2891 struct ext4_sb_info *sbi = EXT4_SB(sb);
2892 char *usr_qf_name, *grp_qf_name;
2893
2894 if (sbi->s_jquota_fmt) {
2895 char *fmtname = "";
2896
2897 switch (sbi->s_jquota_fmt) {
2898 case QFMT_VFS_OLD:
2899 fmtname = "vfsold";
2900 break;
2901 case QFMT_VFS_V0:
2902 fmtname = "vfsv0";
2903 break;
2904 case QFMT_VFS_V1:
2905 fmtname = "vfsv1";
2906 break;
2907 }
2908 seq_printf(seq, ",jqfmt=%s", fmtname);
2909 }
2910
2911 rcu_read_lock();
2912 usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
2913 grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
2914 if (usr_qf_name)
2915 seq_show_option(seq, "usrjquota", usr_qf_name);
2916 if (grp_qf_name)
2917 seq_show_option(seq, "grpjquota", grp_qf_name);
2918 rcu_read_unlock();
2919 #endif
2920 }
2921
token2str(int token)2922 static const char *token2str(int token)
2923 {
2924 const struct fs_parameter_spec *spec;
2925
2926 for (spec = ext4_param_specs; spec->name != NULL; spec++)
2927 if (spec->opt == token && !spec->type)
2928 break;
2929 return spec->name;
2930 }
2931
2932 /*
2933 * Show an option if
2934 * - it's set to a non-default value OR
2935 * - if the per-sb default is different from the global default
2936 */
_ext4_show_options(struct seq_file * seq,struct super_block * sb,int nodefs)2937 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2938 int nodefs)
2939 {
2940 struct ext4_sb_info *sbi = EXT4_SB(sb);
2941 struct ext4_super_block *es = sbi->s_es;
2942 int def_errors;
2943 const struct mount_opts *m;
2944 char sep = nodefs ? '\n' : ',';
2945
2946 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2947 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2948
2949 if (sbi->s_sb_block != 1)
2950 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2951
2952 for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2953 int want_set = m->flags & MOPT_SET;
2954 int opt_2 = m->flags & MOPT_2;
2955 unsigned int mount_opt, def_mount_opt;
2956
2957 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2958 m->flags & MOPT_SKIP)
2959 continue;
2960
2961 if (opt_2) {
2962 mount_opt = sbi->s_mount_opt2;
2963 def_mount_opt = sbi->s_def_mount_opt2;
2964 } else {
2965 mount_opt = sbi->s_mount_opt;
2966 def_mount_opt = sbi->s_def_mount_opt;
2967 }
2968 /* skip if same as the default */
2969 if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
2970 continue;
2971 /* select Opt_noFoo vs Opt_Foo */
2972 if ((want_set &&
2973 (mount_opt & m->mount_opt) != m->mount_opt) ||
2974 (!want_set && (mount_opt & m->mount_opt)))
2975 continue;
2976 SEQ_OPTS_PRINT("%s", token2str(m->token));
2977 }
2978
2979 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2980 ext4_get_resuid(es) != EXT4_DEF_RESUID)
2981 SEQ_OPTS_PRINT("resuid=%u",
2982 from_kuid_munged(&init_user_ns, sbi->s_resuid));
2983 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2984 ext4_get_resgid(es) != EXT4_DEF_RESGID)
2985 SEQ_OPTS_PRINT("resgid=%u",
2986 from_kgid_munged(&init_user_ns, sbi->s_resgid));
2987 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2988 if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
2989 SEQ_OPTS_PUTS("errors=remount-ro");
2990 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2991 SEQ_OPTS_PUTS("errors=continue");
2992 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2993 SEQ_OPTS_PUTS("errors=panic");
2994 if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2995 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2996 if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2997 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2998 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2999 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
3000 if (nodefs && sb->s_flags & SB_I_VERSION)
3001 SEQ_OPTS_PUTS("i_version");
3002 if (nodefs || sbi->s_stripe)
3003 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
3004 if (nodefs || EXT4_MOUNT_DATA_FLAGS &
3005 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
3006 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
3007 SEQ_OPTS_PUTS("data=journal");
3008 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
3009 SEQ_OPTS_PUTS("data=ordered");
3010 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
3011 SEQ_OPTS_PUTS("data=writeback");
3012 }
3013 if (nodefs ||
3014 sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
3015 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
3016 sbi->s_inode_readahead_blks);
3017
3018 if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
3019 (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
3020 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
3021 if (nodefs || sbi->s_max_dir_size_kb)
3022 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
3023 if (test_opt(sb, DATA_ERR_ABORT))
3024 SEQ_OPTS_PUTS("data_err=abort");
3025
3026 fscrypt_show_test_dummy_encryption(seq, sep, sb);
3027
3028 if (sb->s_flags & SB_INLINECRYPT)
3029 SEQ_OPTS_PUTS("inlinecrypt");
3030
3031 if (test_opt(sb, DAX_ALWAYS)) {
3032 if (IS_EXT2_SB(sb))
3033 SEQ_OPTS_PUTS("dax");
3034 else
3035 SEQ_OPTS_PUTS("dax=always");
3036 } else if (test_opt2(sb, DAX_NEVER)) {
3037 SEQ_OPTS_PUTS("dax=never");
3038 } else if (test_opt2(sb, DAX_INODE)) {
3039 SEQ_OPTS_PUTS("dax=inode");
3040 }
3041
3042 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
3043 !test_opt2(sb, MB_OPTIMIZE_SCAN)) {
3044 SEQ_OPTS_PUTS("mb_optimize_scan=0");
3045 } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
3046 test_opt2(sb, MB_OPTIMIZE_SCAN)) {
3047 SEQ_OPTS_PUTS("mb_optimize_scan=1");
3048 }
3049
3050 if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS))
3051 SEQ_OPTS_PUTS("prefetch_block_bitmaps");
3052
3053 if (ext4_emergency_ro(sb))
3054 SEQ_OPTS_PUTS("emergency_ro");
3055
3056 if (ext4_forced_shutdown(sb))
3057 SEQ_OPTS_PUTS("shutdown");
3058
3059 ext4_show_quota_options(seq, sb);
3060 return 0;
3061 }
3062
ext4_show_options(struct seq_file * seq,struct dentry * root)3063 static int ext4_show_options(struct seq_file *seq, struct dentry *root)
3064 {
3065 return _ext4_show_options(seq, root->d_sb, 0);
3066 }
3067
ext4_seq_options_show(struct seq_file * seq,void * offset)3068 int ext4_seq_options_show(struct seq_file *seq, void *offset)
3069 {
3070 struct super_block *sb = seq->private;
3071 int rc;
3072
3073 seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
3074 rc = _ext4_show_options(seq, sb, 1);
3075 seq_putc(seq, '\n');
3076 return rc;
3077 }
3078
ext4_setup_super(struct super_block * sb,struct ext4_super_block * es,int read_only)3079 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
3080 int read_only)
3081 {
3082 struct ext4_sb_info *sbi = EXT4_SB(sb);
3083 int err = 0;
3084
3085 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
3086 ext4_msg(sb, KERN_ERR, "revision level too high, "
3087 "forcing read-only mode");
3088 err = -EROFS;
3089 goto done;
3090 }
3091 if (read_only)
3092 goto done;
3093 if (!(sbi->s_mount_state & EXT4_VALID_FS))
3094 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
3095 "running e2fsck is recommended");
3096 else if (sbi->s_mount_state & EXT4_ERROR_FS)
3097 ext4_msg(sb, KERN_WARNING,
3098 "warning: mounting fs with errors, "
3099 "running e2fsck is recommended");
3100 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
3101 le16_to_cpu(es->s_mnt_count) >=
3102 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
3103 ext4_msg(sb, KERN_WARNING,
3104 "warning: maximal mount count reached, "
3105 "running e2fsck is recommended");
3106 else if (le32_to_cpu(es->s_checkinterval) &&
3107 (ext4_get_tstamp(es, s_lastcheck) +
3108 le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
3109 ext4_msg(sb, KERN_WARNING,
3110 "warning: checktime reached, "
3111 "running e2fsck is recommended");
3112 if (!sbi->s_journal)
3113 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
3114 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
3115 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
3116 le16_add_cpu(&es->s_mnt_count, 1);
3117 ext4_update_tstamp(es, s_mtime);
3118 if (sbi->s_journal) {
3119 ext4_set_feature_journal_needs_recovery(sb);
3120 if (ext4_has_feature_orphan_file(sb))
3121 ext4_set_feature_orphan_present(sb);
3122 }
3123
3124 err = ext4_commit_super(sb);
3125 done:
3126 if (test_opt(sb, DEBUG))
3127 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
3128 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
3129 sb->s_blocksize,
3130 sbi->s_groups_count,
3131 EXT4_BLOCKS_PER_GROUP(sb),
3132 EXT4_INODES_PER_GROUP(sb),
3133 sbi->s_mount_opt, sbi->s_mount_opt2);
3134 return err;
3135 }
3136
ext4_alloc_flex_bg_array(struct super_block * sb,ext4_group_t ngroup)3137 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3138 {
3139 struct ext4_sb_info *sbi = EXT4_SB(sb);
3140 struct flex_groups **old_groups, **new_groups;
3141 int size, i, j;
3142
3143 if (!sbi->s_log_groups_per_flex)
3144 return 0;
3145
3146 size = ext4_flex_group(sbi, ngroup - 1) + 1;
3147 if (size <= sbi->s_flex_groups_allocated)
3148 return 0;
3149
3150 new_groups = kvzalloc(roundup_pow_of_two(size *
3151 sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
3152 if (!new_groups) {
3153 ext4_msg(sb, KERN_ERR,
3154 "not enough memory for %d flex group pointers", size);
3155 return -ENOMEM;
3156 }
3157 for (i = sbi->s_flex_groups_allocated; i < size; i++) {
3158 new_groups[i] = kvzalloc(roundup_pow_of_two(
3159 sizeof(struct flex_groups)),
3160 GFP_KERNEL);
3161 if (!new_groups[i]) {
3162 for (j = sbi->s_flex_groups_allocated; j < i; j++)
3163 kvfree(new_groups[j]);
3164 kvfree(new_groups);
3165 ext4_msg(sb, KERN_ERR,
3166 "not enough memory for %d flex groups", size);
3167 return -ENOMEM;
3168 }
3169 }
3170 rcu_read_lock();
3171 old_groups = rcu_dereference(sbi->s_flex_groups);
3172 if (old_groups)
3173 memcpy(new_groups, old_groups,
3174 (sbi->s_flex_groups_allocated *
3175 sizeof(struct flex_groups *)));
3176 rcu_read_unlock();
3177 rcu_assign_pointer(sbi->s_flex_groups, new_groups);
3178 sbi->s_flex_groups_allocated = size;
3179 if (old_groups)
3180 ext4_kvfree_array_rcu(old_groups);
3181 return 0;
3182 }
3183
ext4_fill_flex_info(struct super_block * sb)3184 static int ext4_fill_flex_info(struct super_block *sb)
3185 {
3186 struct ext4_sb_info *sbi = EXT4_SB(sb);
3187 struct ext4_group_desc *gdp = NULL;
3188 struct flex_groups *fg;
3189 ext4_group_t flex_group;
3190 int i, err;
3191
3192 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
3193 if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
3194 sbi->s_log_groups_per_flex = 0;
3195 return 1;
3196 }
3197
3198 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
3199 if (err)
3200 goto failed;
3201
3202 for (i = 0; i < sbi->s_groups_count; i++) {
3203 gdp = ext4_get_group_desc(sb, i, NULL);
3204
3205 flex_group = ext4_flex_group(sbi, i);
3206 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
3207 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
3208 atomic64_add(ext4_free_group_clusters(sb, gdp),
3209 &fg->free_clusters);
3210 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
3211 }
3212
3213 return 1;
3214 failed:
3215 return 0;
3216 }
3217
ext4_group_desc_csum(struct super_block * sb,__u32 block_group,struct ext4_group_desc * gdp)3218 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
3219 struct ext4_group_desc *gdp)
3220 {
3221 int offset = offsetof(struct ext4_group_desc, bg_checksum);
3222 __u16 crc = 0;
3223 __le32 le_group = cpu_to_le32(block_group);
3224 struct ext4_sb_info *sbi = EXT4_SB(sb);
3225
3226 if (ext4_has_feature_metadata_csum(sbi->s_sb)) {
3227 /* Use new metadata_csum algorithm */
3228 __u32 csum32;
3229 __u16 dummy_csum = 0;
3230
3231 csum32 = ext4_chksum(sbi->s_csum_seed, (__u8 *)&le_group,
3232 sizeof(le_group));
3233 csum32 = ext4_chksum(csum32, (__u8 *)gdp, offset);
3234 csum32 = ext4_chksum(csum32, (__u8 *)&dummy_csum,
3235 sizeof(dummy_csum));
3236 offset += sizeof(dummy_csum);
3237 if (offset < sbi->s_desc_size)
3238 csum32 = ext4_chksum(csum32, (__u8 *)gdp + offset,
3239 sbi->s_desc_size - offset);
3240
3241 crc = csum32 & 0xFFFF;
3242 goto out;
3243 }
3244
3245 /* old crc16 code */
3246 if (!ext4_has_feature_gdt_csum(sb))
3247 return 0;
3248
3249 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
3250 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
3251 crc = crc16(crc, (__u8 *)gdp, offset);
3252 offset += sizeof(gdp->bg_checksum); /* skip checksum */
3253 /* for checksum of struct ext4_group_desc do the rest...*/
3254 if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
3255 crc = crc16(crc, (__u8 *)gdp + offset,
3256 sbi->s_desc_size - offset);
3257
3258 out:
3259 return cpu_to_le16(crc);
3260 }
3261
ext4_group_desc_csum_verify(struct super_block * sb,__u32 block_group,struct ext4_group_desc * gdp)3262 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
3263 struct ext4_group_desc *gdp)
3264 {
3265 if (ext4_has_group_desc_csum(sb) &&
3266 (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
3267 return 0;
3268
3269 return 1;
3270 }
3271
ext4_group_desc_csum_set(struct super_block * sb,__u32 block_group,struct ext4_group_desc * gdp)3272 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
3273 struct ext4_group_desc *gdp)
3274 {
3275 if (!ext4_has_group_desc_csum(sb))
3276 return;
3277 gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
3278 }
3279
3280 /* Called at mount-time, super-block is locked */
ext4_check_descriptors(struct super_block * sb,ext4_fsblk_t sb_block,ext4_group_t * first_not_zeroed)3281 static int ext4_check_descriptors(struct super_block *sb,
3282 ext4_fsblk_t sb_block,
3283 ext4_group_t *first_not_zeroed)
3284 {
3285 struct ext4_sb_info *sbi = EXT4_SB(sb);
3286 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
3287 ext4_fsblk_t last_block;
3288 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
3289 ext4_fsblk_t block_bitmap;
3290 ext4_fsblk_t inode_bitmap;
3291 ext4_fsblk_t inode_table;
3292 int flexbg_flag = 0;
3293 ext4_group_t i, grp = sbi->s_groups_count;
3294
3295 if (ext4_has_feature_flex_bg(sb))
3296 flexbg_flag = 1;
3297
3298 ext4_debug("Checking group descriptors");
3299
3300 for (i = 0; i < sbi->s_groups_count; i++) {
3301 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
3302
3303 if (i == sbi->s_groups_count - 1 || flexbg_flag)
3304 last_block = ext4_blocks_count(sbi->s_es) - 1;
3305 else
3306 last_block = first_block +
3307 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
3308
3309 if ((grp == sbi->s_groups_count) &&
3310 !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3311 grp = i;
3312
3313 block_bitmap = ext4_block_bitmap(sb, gdp);
3314 if (block_bitmap == sb_block) {
3315 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3316 "Block bitmap for group %u overlaps "
3317 "superblock", i);
3318 if (!sb_rdonly(sb))
3319 return 0;
3320 }
3321 if (block_bitmap >= sb_block + 1 &&
3322 block_bitmap <= last_bg_block) {
3323 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3324 "Block bitmap for group %u overlaps "
3325 "block group descriptors", i);
3326 if (!sb_rdonly(sb))
3327 return 0;
3328 }
3329 if (block_bitmap < first_block || block_bitmap > last_block) {
3330 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3331 "Block bitmap for group %u not in group "
3332 "(block %llu)!", i, block_bitmap);
3333 return 0;
3334 }
3335 inode_bitmap = ext4_inode_bitmap(sb, gdp);
3336 if (inode_bitmap == sb_block) {
3337 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3338 "Inode bitmap for group %u overlaps "
3339 "superblock", i);
3340 if (!sb_rdonly(sb))
3341 return 0;
3342 }
3343 if (inode_bitmap >= sb_block + 1 &&
3344 inode_bitmap <= last_bg_block) {
3345 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3346 "Inode bitmap for group %u overlaps "
3347 "block group descriptors", i);
3348 if (!sb_rdonly(sb))
3349 return 0;
3350 }
3351 if (inode_bitmap < first_block || inode_bitmap > last_block) {
3352 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3353 "Inode bitmap for group %u not in group "
3354 "(block %llu)!", i, inode_bitmap);
3355 return 0;
3356 }
3357 inode_table = ext4_inode_table(sb, gdp);
3358 if (inode_table == sb_block) {
3359 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3360 "Inode table for group %u overlaps "
3361 "superblock", i);
3362 if (!sb_rdonly(sb))
3363 return 0;
3364 }
3365 if (inode_table >= sb_block + 1 &&
3366 inode_table <= last_bg_block) {
3367 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3368 "Inode table for group %u overlaps "
3369 "block group descriptors", i);
3370 if (!sb_rdonly(sb))
3371 return 0;
3372 }
3373 if (inode_table < first_block ||
3374 inode_table + sbi->s_itb_per_group - 1 > last_block) {
3375 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3376 "Inode table for group %u not in group "
3377 "(block %llu)!", i, inode_table);
3378 return 0;
3379 }
3380 ext4_lock_group(sb, i);
3381 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
3382 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3383 "Checksum for group %u failed (%u!=%u)",
3384 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
3385 gdp)), le16_to_cpu(gdp->bg_checksum));
3386 if (!sb_rdonly(sb)) {
3387 ext4_unlock_group(sb, i);
3388 return 0;
3389 }
3390 }
3391 ext4_unlock_group(sb, i);
3392 if (!flexbg_flag)
3393 first_block += EXT4_BLOCKS_PER_GROUP(sb);
3394 }
3395 if (NULL != first_not_zeroed)
3396 *first_not_zeroed = grp;
3397 return 1;
3398 }
3399
3400 /*
3401 * Maximal extent format file size.
3402 * Resulting logical blkno at s_maxbytes must fit in our on-disk
3403 * extent format containers, within a sector_t, and within i_blocks
3404 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
3405 * so that won't be a limiting factor.
3406 *
3407 * However there is other limiting factor. We do store extents in the form
3408 * of starting block and length, hence the resulting length of the extent
3409 * covering maximum file size must fit into on-disk format containers as
3410 * well. Given that length is always by 1 unit bigger than max unit (because
3411 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
3412 *
3413 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
3414 */
ext4_max_size(int blkbits,int has_huge_files)3415 static loff_t ext4_max_size(int blkbits, int has_huge_files)
3416 {
3417 loff_t res;
3418 loff_t upper_limit = MAX_LFS_FILESIZE;
3419
3420 BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
3421
3422 if (!has_huge_files) {
3423 upper_limit = (1LL << 32) - 1;
3424
3425 /* total blocks in file system block size */
3426 upper_limit >>= (blkbits - 9);
3427 upper_limit <<= blkbits;
3428 }
3429
3430 /*
3431 * 32-bit extent-start container, ee_block. We lower the maxbytes
3432 * by one fs block, so ee_len can cover the extent of maximum file
3433 * size
3434 */
3435 res = (1LL << 32) - 1;
3436 res <<= blkbits;
3437
3438 /* Sanity check against vm- & vfs- imposed limits */
3439 if (res > upper_limit)
3440 res = upper_limit;
3441
3442 return res;
3443 }
3444
3445 /*
3446 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
3447 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
3448 * We need to be 1 filesystem block less than the 2^48 sector limit.
3449 */
ext4_max_bitmap_size(int bits,int has_huge_files)3450 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3451 {
3452 loff_t upper_limit, res = EXT4_NDIR_BLOCKS;
3453 int meta_blocks;
3454 unsigned int ppb = 1 << (bits - 2);
3455
3456 /*
3457 * This is calculated to be the largest file size for a dense, block
3458 * mapped file such that the file's total number of 512-byte sectors,
3459 * including data and all indirect blocks, does not exceed (2^48 - 1).
3460 *
3461 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
3462 * number of 512-byte sectors of the file.
3463 */
3464 if (!has_huge_files) {
3465 /*
3466 * !has_huge_files or implies that the inode i_block field
3467 * represents total file blocks in 2^32 512-byte sectors ==
3468 * size of vfs inode i_blocks * 8
3469 */
3470 upper_limit = (1LL << 32) - 1;
3471
3472 /* total blocks in file system block size */
3473 upper_limit >>= (bits - 9);
3474
3475 } else {
3476 /*
3477 * We use 48 bit ext4_inode i_blocks
3478 * With EXT4_HUGE_FILE_FL set the i_blocks
3479 * represent total number of blocks in
3480 * file system block size
3481 */
3482 upper_limit = (1LL << 48) - 1;
3483
3484 }
3485
3486 /* Compute how many blocks we can address by block tree */
3487 res += ppb;
3488 res += ppb * ppb;
3489 res += ((loff_t)ppb) * ppb * ppb;
3490 /* Compute how many metadata blocks are needed */
3491 meta_blocks = 1;
3492 meta_blocks += 1 + ppb;
3493 meta_blocks += 1 + ppb + ppb * ppb;
3494 /* Does block tree limit file size? */
3495 if (res + meta_blocks <= upper_limit)
3496 goto check_lfs;
3497
3498 res = upper_limit;
3499 /* How many metadata blocks are needed for addressing upper_limit? */
3500 upper_limit -= EXT4_NDIR_BLOCKS;
3501 /* indirect blocks */
3502 meta_blocks = 1;
3503 upper_limit -= ppb;
3504 /* double indirect blocks */
3505 if (upper_limit < ppb * ppb) {
3506 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb);
3507 res -= meta_blocks;
3508 goto check_lfs;
3509 }
3510 meta_blocks += 1 + ppb;
3511 upper_limit -= ppb * ppb;
3512 /* tripple indirect blocks for the rest */
3513 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) +
3514 DIV_ROUND_UP_ULL(upper_limit, ppb*ppb);
3515 res -= meta_blocks;
3516 check_lfs:
3517 res <<= bits;
3518 if (res > MAX_LFS_FILESIZE)
3519 res = MAX_LFS_FILESIZE;
3520
3521 return res;
3522 }
3523
descriptor_loc(struct super_block * sb,ext4_fsblk_t logical_sb_block,int nr)3524 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3525 ext4_fsblk_t logical_sb_block, int nr)
3526 {
3527 struct ext4_sb_info *sbi = EXT4_SB(sb);
3528 ext4_group_t bg, first_meta_bg;
3529 int has_super = 0;
3530
3531 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
3532
3533 if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3534 return logical_sb_block + nr + 1;
3535 bg = sbi->s_desc_per_block * nr;
3536 if (ext4_bg_has_super(sb, bg))
3537 has_super = 1;
3538
3539 /*
3540 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
3541 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
3542 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
3543 * compensate.
3544 */
3545 if (sb->s_blocksize == 1024 && nr == 0 &&
3546 le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3547 has_super++;
3548
3549 return (has_super + ext4_group_first_block_no(sb, bg));
3550 }
3551
3552 /**
3553 * ext4_get_stripe_size: Get the stripe size.
3554 * @sbi: In memory super block info
3555 *
3556 * If we have specified it via mount option, then
3557 * use the mount option value. If the value specified at mount time is
3558 * greater than the blocks per group use the super block value.
3559 * If the super block value is greater than blocks per group return 0.
3560 * Allocator needs it be less than blocks per group.
3561 *
3562 */
ext4_get_stripe_size(struct ext4_sb_info * sbi)3563 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
3564 {
3565 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
3566 unsigned long stripe_width =
3567 le32_to_cpu(sbi->s_es->s_raid_stripe_width);
3568 int ret;
3569
3570 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
3571 ret = sbi->s_stripe;
3572 else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
3573 ret = stripe_width;
3574 else if (stride && stride <= sbi->s_blocks_per_group)
3575 ret = stride;
3576 else
3577 ret = 0;
3578
3579 /*
3580 * If the stripe width is 1, this makes no sense and
3581 * we set it to 0 to turn off stripe handling code.
3582 */
3583 if (ret <= 1)
3584 ret = 0;
3585
3586 return ret;
3587 }
3588
3589 /*
3590 * Check whether this filesystem can be mounted based on
3591 * the features present and the RDONLY/RDWR mount requested.
3592 * Returns 1 if this filesystem can be mounted as requested,
3593 * 0 if it cannot be.
3594 */
ext4_feature_set_ok(struct super_block * sb,int readonly)3595 int ext4_feature_set_ok(struct super_block *sb, int readonly)
3596 {
3597 if (ext4_has_unknown_ext4_incompat_features(sb)) {
3598 ext4_msg(sb, KERN_ERR,
3599 "Couldn't mount because of "
3600 "unsupported optional features (%x)",
3601 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
3602 ~EXT4_FEATURE_INCOMPAT_SUPP));
3603 return 0;
3604 }
3605
3606 if (!IS_ENABLED(CONFIG_UNICODE) && ext4_has_feature_casefold(sb)) {
3607 ext4_msg(sb, KERN_ERR,
3608 "Filesystem with casefold feature cannot be "
3609 "mounted without CONFIG_UNICODE");
3610 return 0;
3611 }
3612
3613 if (readonly)
3614 return 1;
3615
3616 if (ext4_has_feature_readonly(sb)) {
3617 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3618 sb->s_flags |= SB_RDONLY;
3619 return 1;
3620 }
3621
3622 /* Check that feature set is OK for a read-write mount */
3623 if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3624 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
3625 "unsupported optional features (%x)",
3626 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
3627 ~EXT4_FEATURE_RO_COMPAT_SUPP));
3628 return 0;
3629 }
3630 if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3631 ext4_msg(sb, KERN_ERR,
3632 "Can't support bigalloc feature without "
3633 "extents feature\n");
3634 return 0;
3635 }
3636
3637 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3638 if (!readonly && (ext4_has_feature_quota(sb) ||
3639 ext4_has_feature_project(sb))) {
3640 ext4_msg(sb, KERN_ERR,
3641 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
3642 return 0;
3643 }
3644 #endif /* CONFIG_QUOTA */
3645 return 1;
3646 }
3647
3648 /*
3649 * This function is called once a day by default if we have errors logged
3650 * on the file system.
3651 * Use the err_report_sec sysfs attribute to disable or adjust its call
3652 * freequency.
3653 */
print_daily_error_info(struct timer_list * t)3654 void print_daily_error_info(struct timer_list *t)
3655 {
3656 struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report);
3657 struct super_block *sb = sbi->s_sb;
3658 struct ext4_super_block *es = sbi->s_es;
3659
3660 if (es->s_error_count)
3661 /* fsck newer than v1.41.13 is needed to clean this condition. */
3662 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3663 le32_to_cpu(es->s_error_count));
3664 if (es->s_first_error_time) {
3665 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3666 sb->s_id,
3667 ext4_get_tstamp(es, s_first_error_time),
3668 (int) sizeof(es->s_first_error_func),
3669 es->s_first_error_func,
3670 le32_to_cpu(es->s_first_error_line));
3671 if (es->s_first_error_ino)
3672 printk(KERN_CONT ": inode %u",
3673 le32_to_cpu(es->s_first_error_ino));
3674 if (es->s_first_error_block)
3675 printk(KERN_CONT ": block %llu", (unsigned long long)
3676 le64_to_cpu(es->s_first_error_block));
3677 printk(KERN_CONT "\n");
3678 }
3679 if (es->s_last_error_time) {
3680 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
3681 sb->s_id,
3682 ext4_get_tstamp(es, s_last_error_time),
3683 (int) sizeof(es->s_last_error_func),
3684 es->s_last_error_func,
3685 le32_to_cpu(es->s_last_error_line));
3686 if (es->s_last_error_ino)
3687 printk(KERN_CONT ": inode %u",
3688 le32_to_cpu(es->s_last_error_ino));
3689 if (es->s_last_error_block)
3690 printk(KERN_CONT ": block %llu", (unsigned long long)
3691 le64_to_cpu(es->s_last_error_block));
3692 printk(KERN_CONT "\n");
3693 }
3694
3695 if (sbi->s_err_report_sec)
3696 mod_timer(&sbi->s_err_report, jiffies + secs_to_jiffies(sbi->s_err_report_sec));
3697 }
3698
3699 /* Find next suitable group and run ext4_init_inode_table */
ext4_run_li_request(struct ext4_li_request * elr)3700 static int ext4_run_li_request(struct ext4_li_request *elr)
3701 {
3702 struct ext4_group_desc *gdp = NULL;
3703 struct super_block *sb = elr->lr_super;
3704 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3705 ext4_group_t group = elr->lr_next_group;
3706 unsigned int prefetch_ios = 0;
3707 int ret = 0;
3708 int nr = EXT4_SB(sb)->s_mb_prefetch;
3709 u64 start_time;
3710
3711 if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
3712 elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios);
3713 ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr);
3714 trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr);
3715 if (group >= elr->lr_next_group) {
3716 ret = 1;
3717 if (elr->lr_first_not_zeroed != ngroups &&
3718 !ext4_emergency_state(sb) && !sb_rdonly(sb) &&
3719 test_opt(sb, INIT_INODE_TABLE)) {
3720 elr->lr_next_group = elr->lr_first_not_zeroed;
3721 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3722 ret = 0;
3723 }
3724 }
3725 return ret;
3726 }
3727
3728 for (; group < ngroups; group++) {
3729 gdp = ext4_get_group_desc(sb, group, NULL);
3730 if (!gdp) {
3731 ret = 1;
3732 break;
3733 }
3734
3735 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3736 break;
3737 }
3738
3739 if (group >= ngroups)
3740 ret = 1;
3741
3742 if (!ret) {
3743 start_time = ktime_get_ns();
3744 ret = ext4_init_inode_table(sb, group,
3745 elr->lr_timeout ? 0 : 1);
3746 trace_ext4_lazy_itable_init(sb, group);
3747 if (elr->lr_timeout == 0) {
3748 elr->lr_timeout = nsecs_to_jiffies((ktime_get_ns() - start_time) *
3749 EXT4_SB(elr->lr_super)->s_li_wait_mult);
3750 }
3751 elr->lr_next_sched = jiffies + elr->lr_timeout;
3752 elr->lr_next_group = group + 1;
3753 }
3754 return ret;
3755 }
3756
3757 /*
3758 * Remove lr_request from the list_request and free the
3759 * request structure. Should be called with li_list_mtx held
3760 */
ext4_remove_li_request(struct ext4_li_request * elr)3761 static void ext4_remove_li_request(struct ext4_li_request *elr)
3762 {
3763 if (!elr)
3764 return;
3765
3766 list_del(&elr->lr_request);
3767 EXT4_SB(elr->lr_super)->s_li_request = NULL;
3768 kfree(elr);
3769 }
3770
ext4_unregister_li_request(struct super_block * sb)3771 static void ext4_unregister_li_request(struct super_block *sb)
3772 {
3773 mutex_lock(&ext4_li_mtx);
3774 if (!ext4_li_info) {
3775 mutex_unlock(&ext4_li_mtx);
3776 return;
3777 }
3778
3779 mutex_lock(&ext4_li_info->li_list_mtx);
3780 ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3781 mutex_unlock(&ext4_li_info->li_list_mtx);
3782 mutex_unlock(&ext4_li_mtx);
3783 }
3784
3785 static struct task_struct *ext4_lazyinit_task;
3786
3787 /*
3788 * This is the function where ext4lazyinit thread lives. It walks
3789 * through the request list searching for next scheduled filesystem.
3790 * When such a fs is found, run the lazy initialization request
3791 * (ext4_rn_li_request) and keep track of the time spend in this
3792 * function. Based on that time we compute next schedule time of
3793 * the request. When walking through the list is complete, compute
3794 * next waking time and put itself into sleep.
3795 */
ext4_lazyinit_thread(void * arg)3796 static int ext4_lazyinit_thread(void *arg)
3797 {
3798 struct ext4_lazy_init *eli = arg;
3799 struct list_head *pos, *n;
3800 struct ext4_li_request *elr;
3801 unsigned long next_wakeup, cur;
3802
3803 BUG_ON(NULL == eli);
3804 set_freezable();
3805
3806 cont_thread:
3807 while (true) {
3808 bool next_wakeup_initialized = false;
3809
3810 next_wakeup = 0;
3811 mutex_lock(&eli->li_list_mtx);
3812 if (list_empty(&eli->li_request_list)) {
3813 mutex_unlock(&eli->li_list_mtx);
3814 goto exit_thread;
3815 }
3816 list_for_each_safe(pos, n, &eli->li_request_list) {
3817 int err = 0;
3818 int progress = 0;
3819 elr = list_entry(pos, struct ext4_li_request,
3820 lr_request);
3821
3822 if (time_before(jiffies, elr->lr_next_sched)) {
3823 if (!next_wakeup_initialized ||
3824 time_before(elr->lr_next_sched, next_wakeup)) {
3825 next_wakeup = elr->lr_next_sched;
3826 next_wakeup_initialized = true;
3827 }
3828 continue;
3829 }
3830 if (down_read_trylock(&elr->lr_super->s_umount)) {
3831 if (sb_start_write_trylock(elr->lr_super)) {
3832 progress = 1;
3833 /*
3834 * We hold sb->s_umount, sb can not
3835 * be removed from the list, it is
3836 * now safe to drop li_list_mtx
3837 */
3838 mutex_unlock(&eli->li_list_mtx);
3839 err = ext4_run_li_request(elr);
3840 sb_end_write(elr->lr_super);
3841 mutex_lock(&eli->li_list_mtx);
3842 n = pos->next;
3843 }
3844 up_read((&elr->lr_super->s_umount));
3845 }
3846 /* error, remove the lazy_init job */
3847 if (err) {
3848 ext4_remove_li_request(elr);
3849 continue;
3850 }
3851 if (!progress) {
3852 elr->lr_next_sched = jiffies +
3853 get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
3854 }
3855 if (!next_wakeup_initialized ||
3856 time_before(elr->lr_next_sched, next_wakeup)) {
3857 next_wakeup = elr->lr_next_sched;
3858 next_wakeup_initialized = true;
3859 }
3860 }
3861 mutex_unlock(&eli->li_list_mtx);
3862
3863 try_to_freeze();
3864
3865 cur = jiffies;
3866 if (!next_wakeup_initialized || time_after_eq(cur, next_wakeup)) {
3867 cond_resched();
3868 continue;
3869 }
3870
3871 schedule_timeout_interruptible(next_wakeup - cur);
3872
3873 if (kthread_should_stop()) {
3874 ext4_clear_request_list();
3875 goto exit_thread;
3876 }
3877 }
3878
3879 exit_thread:
3880 /*
3881 * It looks like the request list is empty, but we need
3882 * to check it under the li_list_mtx lock, to prevent any
3883 * additions into it, and of course we should lock ext4_li_mtx
3884 * to atomically free the list and ext4_li_info, because at
3885 * this point another ext4 filesystem could be registering
3886 * new one.
3887 */
3888 mutex_lock(&ext4_li_mtx);
3889 mutex_lock(&eli->li_list_mtx);
3890 if (!list_empty(&eli->li_request_list)) {
3891 mutex_unlock(&eli->li_list_mtx);
3892 mutex_unlock(&ext4_li_mtx);
3893 goto cont_thread;
3894 }
3895 mutex_unlock(&eli->li_list_mtx);
3896 kfree(ext4_li_info);
3897 ext4_li_info = NULL;
3898 mutex_unlock(&ext4_li_mtx);
3899
3900 return 0;
3901 }
3902
ext4_clear_request_list(void)3903 static void ext4_clear_request_list(void)
3904 {
3905 struct list_head *pos, *n;
3906 struct ext4_li_request *elr;
3907
3908 mutex_lock(&ext4_li_info->li_list_mtx);
3909 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
3910 elr = list_entry(pos, struct ext4_li_request,
3911 lr_request);
3912 ext4_remove_li_request(elr);
3913 }
3914 mutex_unlock(&ext4_li_info->li_list_mtx);
3915 }
3916
ext4_run_lazyinit_thread(void)3917 static int ext4_run_lazyinit_thread(void)
3918 {
3919 ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3920 ext4_li_info, "ext4lazyinit");
3921 if (IS_ERR(ext4_lazyinit_task)) {
3922 int err = PTR_ERR(ext4_lazyinit_task);
3923 ext4_clear_request_list();
3924 kfree(ext4_li_info);
3925 ext4_li_info = NULL;
3926 printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3927 "initialization thread\n",
3928 err);
3929 return err;
3930 }
3931 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3932 return 0;
3933 }
3934
3935 /*
3936 * Check whether it make sense to run itable init. thread or not.
3937 * If there is at least one uninitialized inode table, return
3938 * corresponding group number, else the loop goes through all
3939 * groups and return total number of groups.
3940 */
ext4_has_uninit_itable(struct super_block * sb)3941 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3942 {
3943 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3944 struct ext4_group_desc *gdp = NULL;
3945
3946 if (!ext4_has_group_desc_csum(sb))
3947 return ngroups;
3948
3949 for (group = 0; group < ngroups; group++) {
3950 gdp = ext4_get_group_desc(sb, group, NULL);
3951 if (!gdp)
3952 continue;
3953
3954 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3955 break;
3956 }
3957
3958 return group;
3959 }
3960
ext4_li_info_new(void)3961 static int ext4_li_info_new(void)
3962 {
3963 struct ext4_lazy_init *eli = NULL;
3964
3965 eli = kzalloc_obj(*eli);
3966 if (!eli)
3967 return -ENOMEM;
3968
3969 INIT_LIST_HEAD(&eli->li_request_list);
3970 mutex_init(&eli->li_list_mtx);
3971
3972 eli->li_state |= EXT4_LAZYINIT_QUIT;
3973
3974 ext4_li_info = eli;
3975
3976 return 0;
3977 }
3978
ext4_li_request_new(struct super_block * sb,ext4_group_t start)3979 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3980 ext4_group_t start)
3981 {
3982 struct ext4_li_request *elr;
3983
3984 elr = kzalloc_obj(*elr);
3985 if (!elr)
3986 return NULL;
3987
3988 elr->lr_super = sb;
3989 elr->lr_first_not_zeroed = start;
3990 if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) {
3991 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3992 elr->lr_next_group = start;
3993 } else {
3994 elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
3995 }
3996
3997 /*
3998 * Randomize first schedule time of the request to
3999 * spread the inode table initialization requests
4000 * better.
4001 */
4002 elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
4003 return elr;
4004 }
4005
ext4_register_li_request(struct super_block * sb,ext4_group_t first_not_zeroed)4006 int ext4_register_li_request(struct super_block *sb,
4007 ext4_group_t first_not_zeroed)
4008 {
4009 struct ext4_sb_info *sbi = EXT4_SB(sb);
4010 struct ext4_li_request *elr = NULL;
4011 ext4_group_t ngroups = sbi->s_groups_count;
4012 int ret = 0;
4013
4014 mutex_lock(&ext4_li_mtx);
4015 if (sbi->s_li_request != NULL) {
4016 /*
4017 * Reset timeout so it can be computed again, because
4018 * s_li_wait_mult might have changed.
4019 */
4020 sbi->s_li_request->lr_timeout = 0;
4021 goto out;
4022 }
4023
4024 if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
4025 (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
4026 (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE))))
4027 goto out;
4028
4029 elr = ext4_li_request_new(sb, first_not_zeroed);
4030 if (!elr) {
4031 ret = -ENOMEM;
4032 goto out;
4033 }
4034
4035 if (NULL == ext4_li_info) {
4036 ret = ext4_li_info_new();
4037 if (ret)
4038 goto out;
4039 }
4040
4041 mutex_lock(&ext4_li_info->li_list_mtx);
4042 list_add(&elr->lr_request, &ext4_li_info->li_request_list);
4043 mutex_unlock(&ext4_li_info->li_list_mtx);
4044
4045 sbi->s_li_request = elr;
4046 /*
4047 * set elr to NULL here since it has been inserted to
4048 * the request_list and the removal and free of it is
4049 * handled by ext4_clear_request_list from now on.
4050 */
4051 elr = NULL;
4052
4053 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
4054 ret = ext4_run_lazyinit_thread();
4055 if (ret)
4056 goto out;
4057 }
4058 out:
4059 mutex_unlock(&ext4_li_mtx);
4060 if (ret)
4061 kfree(elr);
4062 return ret;
4063 }
4064
4065 /*
4066 * We do not need to lock anything since this is called on
4067 * module unload.
4068 */
ext4_destroy_lazyinit_thread(void)4069 static void ext4_destroy_lazyinit_thread(void)
4070 {
4071 /*
4072 * If thread exited earlier
4073 * there's nothing to be done.
4074 */
4075 if (!ext4_li_info || !ext4_lazyinit_task)
4076 return;
4077
4078 kthread_stop(ext4_lazyinit_task);
4079 }
4080
set_journal_csum_feature_set(struct super_block * sb)4081 static int set_journal_csum_feature_set(struct super_block *sb)
4082 {
4083 int ret = 1;
4084 int compat, incompat;
4085 struct ext4_sb_info *sbi = EXT4_SB(sb);
4086
4087 if (ext4_has_feature_metadata_csum(sb)) {
4088 /* journal checksum v3 */
4089 compat = 0;
4090 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
4091 } else {
4092 /* journal checksum v1 */
4093 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
4094 incompat = 0;
4095 }
4096
4097 jbd2_journal_clear_features(sbi->s_journal,
4098 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
4099 JBD2_FEATURE_INCOMPAT_CSUM_V3 |
4100 JBD2_FEATURE_INCOMPAT_CSUM_V2);
4101 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4102 ret = jbd2_journal_set_features(sbi->s_journal,
4103 compat, 0,
4104 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
4105 incompat);
4106 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
4107 ret = jbd2_journal_set_features(sbi->s_journal,
4108 compat, 0,
4109 incompat);
4110 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
4111 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
4112 } else {
4113 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
4114 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
4115 }
4116
4117 return ret;
4118 }
4119
4120 /*
4121 * Note: calculating the overhead so we can be compatible with
4122 * historical BSD practice is quite difficult in the face of
4123 * clusters/bigalloc. This is because multiple metadata blocks from
4124 * different block group can end up in the same allocation cluster.
4125 * Calculating the exact overhead in the face of clustered allocation
4126 * requires either O(all block bitmaps) in memory or O(number of block
4127 * groups**2) in time. We will still calculate the superblock for
4128 * older file systems --- and if we come across with a bigalloc file
4129 * system with zero in s_overhead_clusters the estimate will be close to
4130 * correct especially for very large cluster sizes --- but for newer
4131 * file systems, it's better to calculate this figure once at mkfs
4132 * time, and store it in the superblock. If the superblock value is
4133 * present (even for non-bigalloc file systems), we will use it.
4134 */
count_overhead(struct super_block * sb,ext4_group_t grp,char * buf)4135 static int count_overhead(struct super_block *sb, ext4_group_t grp,
4136 char *buf)
4137 {
4138 struct ext4_sb_info *sbi = EXT4_SB(sb);
4139 struct ext4_group_desc *gdp;
4140 ext4_fsblk_t first_block, last_block, b;
4141 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4142 int s, j, count = 0;
4143 int has_super = ext4_bg_has_super(sb, grp);
4144
4145 if (!ext4_has_feature_bigalloc(sb))
4146 return (has_super + ext4_bg_num_gdb(sb, grp) +
4147 (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
4148 sbi->s_itb_per_group + 2);
4149
4150 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
4151 (grp * EXT4_BLOCKS_PER_GROUP(sb));
4152 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
4153 for (i = 0; i < ngroups; i++) {
4154 gdp = ext4_get_group_desc(sb, i, NULL);
4155 b = ext4_block_bitmap(sb, gdp);
4156 if (b >= first_block && b <= last_block) {
4157 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
4158 count++;
4159 }
4160 b = ext4_inode_bitmap(sb, gdp);
4161 if (b >= first_block && b <= last_block) {
4162 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
4163 count++;
4164 }
4165 b = ext4_inode_table(sb, gdp);
4166 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
4167 for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
4168 int c = EXT4_B2C(sbi, b - first_block);
4169 ext4_set_bit(c, buf);
4170 count++;
4171 }
4172 if (i != grp)
4173 continue;
4174 s = 0;
4175 if (ext4_bg_has_super(sb, grp)) {
4176 ext4_set_bit(s++, buf);
4177 count++;
4178 }
4179 j = ext4_bg_num_gdb(sb, grp);
4180 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
4181 ext4_error(sb, "Invalid number of block group "
4182 "descriptor blocks: %d", j);
4183 j = EXT4_BLOCKS_PER_GROUP(sb) - s;
4184 }
4185 count += j;
4186 for (; j > 0; j--)
4187 ext4_set_bit(EXT4_B2C(sbi, s++), buf);
4188 }
4189 if (!count)
4190 return 0;
4191 return EXT4_CLUSTERS_PER_GROUP(sb) -
4192 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
4193 }
4194
4195 /*
4196 * Compute the overhead and stash it in sbi->s_overhead
4197 */
ext4_calculate_overhead(struct super_block * sb)4198 int ext4_calculate_overhead(struct super_block *sb)
4199 {
4200 struct ext4_sb_info *sbi = EXT4_SB(sb);
4201 struct ext4_super_block *es = sbi->s_es;
4202 struct inode *j_inode;
4203 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
4204 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4205 ext4_fsblk_t overhead = 0;
4206 char *buf = kvmalloc(sb->s_blocksize, GFP_NOFS | __GFP_ZERO);
4207
4208 if (!buf)
4209 return -ENOMEM;
4210
4211 /*
4212 * Compute the overhead (FS structures). This is constant
4213 * for a given filesystem unless the number of block groups
4214 * changes so we cache the previous value until it does.
4215 */
4216
4217 /*
4218 * All of the blocks before first_data_block are overhead
4219 */
4220 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
4221
4222 /*
4223 * Add the overhead found in each block group
4224 */
4225 for (i = 0; i < ngroups; i++) {
4226 int blks;
4227
4228 blks = count_overhead(sb, i, buf);
4229 overhead += blks;
4230 if (blks)
4231 memset(buf, 0, sb->s_blocksize);
4232 cond_resched();
4233 }
4234
4235 /*
4236 * Add the internal journal blocks whether the journal has been
4237 * loaded or not
4238 */
4239 if (sbi->s_journal && !sbi->s_journal_bdev_file)
4240 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
4241 else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
4242 /* j_inum for internal journal is non-zero */
4243 j_inode = ext4_get_journal_inode(sb, j_inum);
4244 if (!IS_ERR(j_inode)) {
4245 j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
4246 overhead += EXT4_NUM_B2C(sbi, j_blocks);
4247 iput(j_inode);
4248 } else {
4249 ext4_msg(sb, KERN_ERR, "can't get journal size");
4250 }
4251 }
4252 sbi->s_overhead = overhead;
4253 smp_wmb();
4254 kvfree(buf);
4255 return 0;
4256 }
4257
ext4_set_resv_clusters(struct super_block * sb)4258 static void ext4_set_resv_clusters(struct super_block *sb)
4259 {
4260 ext4_fsblk_t resv_clusters;
4261 struct ext4_sb_info *sbi = EXT4_SB(sb);
4262
4263 /*
4264 * There's no need to reserve anything when we aren't using extents.
4265 * The space estimates are exact, there are no unwritten extents,
4266 * hole punching doesn't need new metadata... This is needed especially
4267 * to keep ext2/3 backward compatibility.
4268 */
4269 if (!ext4_has_feature_extents(sb))
4270 return;
4271 /*
4272 * By default we reserve 2% or 4096 clusters, whichever is smaller.
4273 * This should cover the situations where we can not afford to run
4274 * out of space like for example punch hole, or converting
4275 * unwritten extents in delalloc path. In most cases such
4276 * allocation would require 1, or 2 blocks, higher numbers are
4277 * very rare.
4278 */
4279 resv_clusters = (ext4_blocks_count(sbi->s_es) >>
4280 sbi->s_cluster_bits);
4281
4282 do_div(resv_clusters, 50);
4283 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
4284
4285 atomic64_set(&sbi->s_resv_clusters, resv_clusters);
4286 }
4287
ext4_quota_mode(struct super_block * sb)4288 static const char *ext4_quota_mode(struct super_block *sb)
4289 {
4290 #ifdef CONFIG_QUOTA
4291 if (!ext4_quota_capable(sb))
4292 return "none";
4293
4294 if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb))
4295 return "journalled";
4296 else
4297 return "writeback";
4298 #else
4299 return "disabled";
4300 #endif
4301 }
4302
ext4_setup_csum_trigger(struct super_block * sb,enum ext4_journal_trigger_type type,void (* trigger)(struct jbd2_buffer_trigger_type * type,struct buffer_head * bh,void * mapped_data,size_t size))4303 static void ext4_setup_csum_trigger(struct super_block *sb,
4304 enum ext4_journal_trigger_type type,
4305 void (*trigger)(
4306 struct jbd2_buffer_trigger_type *type,
4307 struct buffer_head *bh,
4308 void *mapped_data,
4309 size_t size))
4310 {
4311 struct ext4_sb_info *sbi = EXT4_SB(sb);
4312
4313 sbi->s_journal_triggers[type].sb = sb;
4314 sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger;
4315 }
4316
ext4_free_sbi(struct ext4_sb_info * sbi)4317 static void ext4_free_sbi(struct ext4_sb_info *sbi)
4318 {
4319 if (!sbi)
4320 return;
4321
4322 kfree(sbi->s_blockgroup_lock);
4323 fs_put_dax(sbi->s_daxdev, NULL);
4324 kfree(sbi);
4325 }
4326
ext4_alloc_sbi(struct super_block * sb)4327 static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
4328 {
4329 struct ext4_sb_info *sbi;
4330
4331 sbi = kzalloc_obj(*sbi);
4332 if (!sbi)
4333 return NULL;
4334
4335 sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
4336 NULL, NULL);
4337
4338 sbi->s_blockgroup_lock =
4339 kzalloc_obj(struct blockgroup_lock);
4340
4341 if (!sbi->s_blockgroup_lock)
4342 goto err_out;
4343
4344 sb->s_fs_info = sbi;
4345 sbi->s_sb = sb;
4346 return sbi;
4347 err_out:
4348 fs_put_dax(sbi->s_daxdev, NULL);
4349 kfree(sbi);
4350 return NULL;
4351 }
4352
ext4_set_def_opts(struct super_block * sb,struct ext4_super_block * es)4353 static void ext4_set_def_opts(struct super_block *sb,
4354 struct ext4_super_block *es)
4355 {
4356 unsigned long def_mount_opts;
4357
4358 /* Set defaults before we parse the mount options */
4359 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
4360 set_opt(sb, INIT_INODE_TABLE);
4361 if (def_mount_opts & EXT4_DEFM_DEBUG)
4362 set_opt(sb, DEBUG);
4363 if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
4364 set_opt(sb, GRPID);
4365 if (def_mount_opts & EXT4_DEFM_UID16)
4366 set_opt(sb, NO_UID32);
4367 /* xattr user namespace & acls are now defaulted on */
4368 set_opt(sb, XATTR_USER);
4369 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4370 set_opt(sb, POSIX_ACL);
4371 #endif
4372 if (ext4_has_feature_fast_commit(sb))
4373 set_opt2(sb, JOURNAL_FAST_COMMIT);
4374 /* don't forget to enable journal_csum when metadata_csum is enabled. */
4375 if (ext4_has_feature_metadata_csum(sb))
4376 set_opt(sb, JOURNAL_CHECKSUM);
4377
4378 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4379 set_opt(sb, JOURNAL_DATA);
4380 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4381 set_opt(sb, ORDERED_DATA);
4382 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4383 set_opt(sb, WRITEBACK_DATA);
4384
4385 if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC)
4386 set_opt(sb, ERRORS_PANIC);
4387 else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE)
4388 set_opt(sb, ERRORS_CONT);
4389 else
4390 set_opt(sb, ERRORS_RO);
4391 /* block_validity enabled by default; disable with noblock_validity */
4392 set_opt(sb, BLOCK_VALIDITY);
4393 if (def_mount_opts & EXT4_DEFM_DISCARD)
4394 set_opt(sb, DISCARD);
4395
4396 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4397 set_opt(sb, BARRIER);
4398
4399 /*
4400 * enable delayed allocation by default
4401 * Use -o nodelalloc to turn it off
4402 */
4403 if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4404 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4405 set_opt(sb, DELALLOC);
4406
4407 set_opt(sb, DIOREAD_NOLOCK);
4408 }
4409
ext4_handle_clustersize(struct super_block * sb)4410 static int ext4_handle_clustersize(struct super_block *sb)
4411 {
4412 struct ext4_sb_info *sbi = EXT4_SB(sb);
4413 struct ext4_super_block *es = sbi->s_es;
4414 int clustersize;
4415
4416 /* Handle clustersize */
4417 clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4418 if (ext4_has_feature_bigalloc(sb)) {
4419 if (clustersize < sb->s_blocksize) {
4420 ext4_msg(sb, KERN_ERR,
4421 "cluster size (%d) smaller than "
4422 "block size (%lu)", clustersize, sb->s_blocksize);
4423 return -EINVAL;
4424 }
4425 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
4426 le32_to_cpu(es->s_log_block_size);
4427 } else {
4428 if (clustersize != sb->s_blocksize) {
4429 ext4_msg(sb, KERN_ERR,
4430 "fragment/cluster size (%d) != "
4431 "block size (%lu)", clustersize, sb->s_blocksize);
4432 return -EINVAL;
4433 }
4434 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
4435 ext4_msg(sb, KERN_ERR,
4436 "#blocks per group too big: %lu",
4437 sbi->s_blocks_per_group);
4438 return -EINVAL;
4439 }
4440 sbi->s_cluster_bits = 0;
4441 }
4442 sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group);
4443 if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
4444 ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu",
4445 sbi->s_clusters_per_group);
4446 return -EINVAL;
4447 }
4448 if (sbi->s_blocks_per_group !=
4449 (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
4450 ext4_msg(sb, KERN_ERR,
4451 "blocks per group (%lu) and clusters per group (%lu) inconsistent",
4452 sbi->s_blocks_per_group, sbi->s_clusters_per_group);
4453 return -EINVAL;
4454 }
4455 sbi->s_cluster_ratio = clustersize / sb->s_blocksize;
4456
4457 /* Do we have standard group size of clustersize * 8 blocks ? */
4458 if (sbi->s_blocks_per_group == clustersize << 3)
4459 set_opt2(sb, STD_GROUP_SIZE);
4460
4461 return 0;
4462 }
4463
4464 /*
4465 * ext4_atomic_write_init: Initializes filesystem min & max atomic write units.
4466 * With non-bigalloc filesystem awu will be based upon filesystem blocksize
4467 * & bdev awu units.
4468 * With bigalloc it will be based upon bigalloc cluster size & bdev awu units.
4469 * @sb: super block
4470 */
ext4_atomic_write_init(struct super_block * sb)4471 static void ext4_atomic_write_init(struct super_block *sb)
4472 {
4473 struct ext4_sb_info *sbi = EXT4_SB(sb);
4474 struct block_device *bdev = sb->s_bdev;
4475 unsigned int clustersize = EXT4_CLUSTER_SIZE(sb);
4476
4477 if (!bdev_can_atomic_write(bdev))
4478 return;
4479
4480 if (!ext4_has_feature_extents(sb))
4481 return;
4482
4483 sbi->s_awu_min = max(sb->s_blocksize,
4484 bdev_atomic_write_unit_min_bytes(bdev));
4485 sbi->s_awu_max = min(clustersize,
4486 bdev_atomic_write_unit_max_bytes(bdev));
4487 if (sbi->s_awu_min && sbi->s_awu_max &&
4488 sbi->s_awu_min <= sbi->s_awu_max) {
4489 ext4_msg(sb, KERN_NOTICE, "Supports (experimental) DIO atomic writes awu_min: %u, awu_max: %u",
4490 sbi->s_awu_min, sbi->s_awu_max);
4491 } else {
4492 sbi->s_awu_min = 0;
4493 sbi->s_awu_max = 0;
4494 }
4495 }
4496
ext4_fast_commit_init(struct super_block * sb)4497 static void ext4_fast_commit_init(struct super_block *sb)
4498 {
4499 struct ext4_sb_info *sbi = EXT4_SB(sb);
4500
4501 /* Initialize fast commit stuff */
4502 atomic_set(&sbi->s_fc_subtid, 0);
4503 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
4504 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
4505 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
4506 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
4507 sbi->s_fc_bytes = 0;
4508 ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
4509 sbi->s_fc_ineligible_tid = 0;
4510 mutex_init(&sbi->s_fc_lock);
4511 memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4512 sbi->s_fc_replay_state.fc_regions = NULL;
4513 sbi->s_fc_replay_state.fc_regions_size = 0;
4514 sbi->s_fc_replay_state.fc_regions_used = 0;
4515 sbi->s_fc_replay_state.fc_regions_valid = 0;
4516 sbi->s_fc_replay_state.fc_modified_inodes = NULL;
4517 sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
4518 sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4519 }
4520
ext4_inode_info_init(struct super_block * sb,struct ext4_super_block * es)4521 static int ext4_inode_info_init(struct super_block *sb,
4522 struct ext4_super_block *es)
4523 {
4524 struct ext4_sb_info *sbi = EXT4_SB(sb);
4525
4526 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
4527 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
4528 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
4529 } else {
4530 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
4531 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
4532 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
4533 ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
4534 sbi->s_first_ino);
4535 return -EINVAL;
4536 }
4537 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
4538 (!is_power_of_2(sbi->s_inode_size)) ||
4539 (sbi->s_inode_size > sb->s_blocksize)) {
4540 ext4_msg(sb, KERN_ERR,
4541 "unsupported inode size: %d",
4542 sbi->s_inode_size);
4543 ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize);
4544 return -EINVAL;
4545 }
4546 /*
4547 * i_atime_extra is the last extra field available for
4548 * [acm]times in struct ext4_inode. Checking for that
4549 * field should suffice to ensure we have extra space
4550 * for all three.
4551 */
4552 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
4553 sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
4554 sb->s_time_gran = 1;
4555 sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
4556 } else {
4557 sb->s_time_gran = NSEC_PER_SEC;
4558 sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
4559 }
4560 sb->s_time_min = EXT4_TIMESTAMP_MIN;
4561 }
4562
4563 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
4564 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4565 EXT4_GOOD_OLD_INODE_SIZE;
4566 if (ext4_has_feature_extra_isize(sb)) {
4567 unsigned v, max = (sbi->s_inode_size -
4568 EXT4_GOOD_OLD_INODE_SIZE);
4569
4570 v = le16_to_cpu(es->s_want_extra_isize);
4571 if (v > max) {
4572 ext4_msg(sb, KERN_ERR,
4573 "bad s_want_extra_isize: %d", v);
4574 return -EINVAL;
4575 }
4576 if (sbi->s_want_extra_isize < v)
4577 sbi->s_want_extra_isize = v;
4578
4579 v = le16_to_cpu(es->s_min_extra_isize);
4580 if (v > max) {
4581 ext4_msg(sb, KERN_ERR,
4582 "bad s_min_extra_isize: %d", v);
4583 return -EINVAL;
4584 }
4585 if (sbi->s_want_extra_isize < v)
4586 sbi->s_want_extra_isize = v;
4587 }
4588 }
4589
4590 return 0;
4591 }
4592
4593 #if IS_ENABLED(CONFIG_UNICODE)
ext4_encoding_init(struct super_block * sb,struct ext4_super_block * es)4594 static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
4595 {
4596 const struct ext4_sb_encodings *encoding_info;
4597 struct unicode_map *encoding;
4598 __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags);
4599
4600 if (!ext4_has_feature_casefold(sb) || sb->s_encoding)
4601 return 0;
4602
4603 encoding_info = ext4_sb_read_encoding(es);
4604 if (!encoding_info) {
4605 ext4_msg(sb, KERN_ERR,
4606 "Encoding requested by superblock is unknown");
4607 return -EINVAL;
4608 }
4609
4610 encoding = utf8_load(encoding_info->version);
4611 if (IS_ERR(encoding)) {
4612 ext4_msg(sb, KERN_ERR,
4613 "can't mount with superblock charset: %s-%u.%u.%u "
4614 "not supported by the kernel. flags: 0x%x.",
4615 encoding_info->name,
4616 unicode_major(encoding_info->version),
4617 unicode_minor(encoding_info->version),
4618 unicode_rev(encoding_info->version),
4619 encoding_flags);
4620 return -EINVAL;
4621 }
4622 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
4623 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4624 unicode_major(encoding_info->version),
4625 unicode_minor(encoding_info->version),
4626 unicode_rev(encoding_info->version),
4627 encoding_flags);
4628
4629 sb->s_encoding = encoding;
4630 sb->s_encoding_flags = encoding_flags;
4631
4632 return 0;
4633 }
4634 #else
ext4_encoding_init(struct super_block * sb,struct ext4_super_block * es)4635 static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
4636 {
4637 return 0;
4638 }
4639 #endif
4640
ext4_init_metadata_csum(struct super_block * sb,struct ext4_super_block * es)4641 static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es)
4642 {
4643 struct ext4_sb_info *sbi = EXT4_SB(sb);
4644
4645 /* Warn if metadata_csum and gdt_csum are both set. */
4646 if (ext4_has_feature_metadata_csum(sb) &&
4647 ext4_has_feature_gdt_csum(sb))
4648 ext4_warning(sb, "metadata_csum and uninit_bg are "
4649 "redundant flags; please run fsck.");
4650
4651 /* Check for a known checksum algorithm */
4652 if (!ext4_verify_csum_type(sb, es)) {
4653 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4654 "unknown checksum algorithm.");
4655 return -EINVAL;
4656 }
4657 ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
4658 ext4_orphan_file_block_trigger);
4659
4660 /* Check superblock checksum */
4661 if (!ext4_superblock_csum_verify(sb, es)) {
4662 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4663 "invalid superblock checksum. Run e2fsck?");
4664 return -EFSBADCRC;
4665 }
4666
4667 /* Precompute checksum seed for all metadata */
4668 if (ext4_has_feature_csum_seed(sb))
4669 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
4670 else if (ext4_has_feature_metadata_csum(sb) ||
4671 ext4_has_feature_ea_inode(sb))
4672 sbi->s_csum_seed = ext4_chksum(~0, es->s_uuid,
4673 sizeof(es->s_uuid));
4674 return 0;
4675 }
4676
ext4_check_feature_compatibility(struct super_block * sb,struct ext4_super_block * es,int silent)4677 static int ext4_check_feature_compatibility(struct super_block *sb,
4678 struct ext4_super_block *es,
4679 int silent)
4680 {
4681 struct ext4_sb_info *sbi = EXT4_SB(sb);
4682
4683 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4684 (ext4_has_compat_features(sb) ||
4685 ext4_has_ro_compat_features(sb) ||
4686 ext4_has_incompat_features(sb)))
4687 ext4_msg(sb, KERN_WARNING,
4688 "feature flags set on rev 0 fs, "
4689 "running e2fsck is recommended");
4690
4691 if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
4692 set_opt2(sb, HURD_COMPAT);
4693 if (ext4_has_feature_64bit(sb)) {
4694 ext4_msg(sb, KERN_ERR,
4695 "The Hurd can't support 64-bit file systems");
4696 return -EINVAL;
4697 }
4698
4699 /*
4700 * ea_inode feature uses l_i_version field which is not
4701 * available in HURD_COMPAT mode.
4702 */
4703 if (ext4_has_feature_ea_inode(sb)) {
4704 ext4_msg(sb, KERN_ERR,
4705 "ea_inode feature is not supported for Hurd");
4706 return -EINVAL;
4707 }
4708 }
4709
4710 if (IS_EXT2_SB(sb)) {
4711 if (ext2_feature_set_ok(sb))
4712 ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
4713 "using the ext4 subsystem");
4714 else {
4715 /*
4716 * If we're probing be silent, if this looks like
4717 * it's actually an ext[34] filesystem.
4718 */
4719 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4720 return -EINVAL;
4721 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
4722 "to feature incompatibilities");
4723 return -EINVAL;
4724 }
4725 }
4726
4727 if (IS_EXT3_SB(sb)) {
4728 if (ext3_feature_set_ok(sb))
4729 ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
4730 "using the ext4 subsystem");
4731 else {
4732 /*
4733 * If we're probing be silent, if this looks like
4734 * it's actually an ext4 filesystem.
4735 */
4736 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4737 return -EINVAL;
4738 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
4739 "to feature incompatibilities");
4740 return -EINVAL;
4741 }
4742 }
4743
4744 /*
4745 * Check feature flags regardless of the revision level, since we
4746 * previously didn't change the revision level when setting the flags,
4747 * so there is a chance incompat flags are set on a rev 0 filesystem.
4748 */
4749 if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4750 return -EINVAL;
4751
4752 if (sbi->s_daxdev) {
4753 if (sb->s_blocksize == PAGE_SIZE)
4754 set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
4755 else
4756 ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
4757 }
4758
4759 if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4760 if (ext4_has_feature_inline_data(sb)) {
4761 ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
4762 " that may contain inline data");
4763 return -EINVAL;
4764 }
4765 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4766 ext4_msg(sb, KERN_ERR,
4767 "DAX unsupported by block device.");
4768 return -EINVAL;
4769 }
4770 }
4771
4772 if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4773 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
4774 es->s_encryption_level);
4775 return -EINVAL;
4776 }
4777
4778 return 0;
4779 }
4780
ext4_check_geometry(struct super_block * sb,struct ext4_super_block * es)4781 static int ext4_check_geometry(struct super_block *sb,
4782 struct ext4_super_block *es)
4783 {
4784 struct ext4_sb_info *sbi = EXT4_SB(sb);
4785 __u64 blocks_count;
4786 int err;
4787
4788 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) {
4789 ext4_msg(sb, KERN_ERR,
4790 "Number of reserved GDT blocks insanely large: %d",
4791 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
4792 return -EINVAL;
4793 }
4794 /*
4795 * Test whether we have more sectors than will fit in sector_t,
4796 * and whether the max offset is addressable by the page cache.
4797 */
4798 err = generic_check_addressable(sb->s_blocksize_bits,
4799 ext4_blocks_count(es));
4800 if (err) {
4801 ext4_msg(sb, KERN_ERR, "filesystem"
4802 " too large to mount safely on this system");
4803 return err;
4804 }
4805
4806 /* check blocks count against device size */
4807 blocks_count = sb_bdev_nr_blocks(sb);
4808 if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4809 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
4810 "exceeds size of device (%llu blocks)",
4811 ext4_blocks_count(es), blocks_count);
4812 return -EINVAL;
4813 }
4814
4815 /*
4816 * It makes no sense for the first data block to be beyond the end
4817 * of the filesystem.
4818 */
4819 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4820 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4821 "block %u is beyond end of filesystem (%llu)",
4822 le32_to_cpu(es->s_first_data_block),
4823 ext4_blocks_count(es));
4824 return -EINVAL;
4825 }
4826 if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
4827 (sbi->s_cluster_ratio == 1)) {
4828 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4829 "block is 0 with a 1k block and cluster size");
4830 return -EINVAL;
4831 }
4832
4833 blocks_count = (ext4_blocks_count(es) -
4834 le32_to_cpu(es->s_first_data_block) +
4835 EXT4_BLOCKS_PER_GROUP(sb) - 1);
4836 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4837 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4838 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4839 "(block count %llu, first data block %u, "
4840 "blocks per group %lu)", blocks_count,
4841 ext4_blocks_count(es),
4842 le32_to_cpu(es->s_first_data_block),
4843 EXT4_BLOCKS_PER_GROUP(sb));
4844 return -EINVAL;
4845 }
4846 sbi->s_groups_count = blocks_count;
4847 sbi->s_blockfile_groups = min(sbi->s_groups_count,
4848 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4849 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4850 le32_to_cpu(es->s_inodes_count)) {
4851 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4852 le32_to_cpu(es->s_inodes_count),
4853 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4854 return -EINVAL;
4855 }
4856
4857 return 0;
4858 }
4859
ext4_group_desc_init(struct super_block * sb,struct ext4_super_block * es,ext4_fsblk_t logical_sb_block,ext4_group_t * first_not_zeroed)4860 static int ext4_group_desc_init(struct super_block *sb,
4861 struct ext4_super_block *es,
4862 ext4_fsblk_t logical_sb_block,
4863 ext4_group_t *first_not_zeroed)
4864 {
4865 struct ext4_sb_info *sbi = EXT4_SB(sb);
4866 unsigned int db_count;
4867 ext4_fsblk_t block;
4868 int i;
4869
4870 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4871 EXT4_DESC_PER_BLOCK(sb);
4872 if (ext4_has_feature_meta_bg(sb)) {
4873 if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4874 ext4_msg(sb, KERN_WARNING,
4875 "first meta block group too large: %u "
4876 "(group descriptor block count %u)",
4877 le32_to_cpu(es->s_first_meta_bg), db_count);
4878 return -EINVAL;
4879 }
4880 }
4881 rcu_assign_pointer(sbi->s_group_desc,
4882 kvmalloc_objs(struct buffer_head *, db_count));
4883 if (sbi->s_group_desc == NULL) {
4884 ext4_msg(sb, KERN_ERR, "not enough memory");
4885 return -ENOMEM;
4886 }
4887
4888 bgl_lock_init(sbi->s_blockgroup_lock);
4889
4890 /* Pre-read the descriptors into the buffer cache */
4891 for (i = 0; i < db_count; i++) {
4892 block = descriptor_loc(sb, logical_sb_block, i);
4893 ext4_sb_breadahead_unmovable(sb, block);
4894 }
4895
4896 for (i = 0; i < db_count; i++) {
4897 struct buffer_head *bh;
4898
4899 block = descriptor_loc(sb, logical_sb_block, i);
4900 bh = ext4_sb_bread_unmovable(sb, block);
4901 if (IS_ERR(bh)) {
4902 ext4_msg(sb, KERN_ERR,
4903 "can't read group descriptor %d", i);
4904 sbi->s_gdb_count = i;
4905 return PTR_ERR(bh);
4906 }
4907 rcu_read_lock();
4908 rcu_dereference(sbi->s_group_desc)[i] = bh;
4909 rcu_read_unlock();
4910 }
4911 sbi->s_gdb_count = db_count;
4912 if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) {
4913 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4914 return -EFSCORRUPTED;
4915 }
4916
4917 return 0;
4918 }
4919
ext4_load_and_init_journal(struct super_block * sb,struct ext4_super_block * es,struct ext4_fs_context * ctx)4920 static int ext4_load_and_init_journal(struct super_block *sb,
4921 struct ext4_super_block *es,
4922 struct ext4_fs_context *ctx)
4923 {
4924 struct ext4_sb_info *sbi = EXT4_SB(sb);
4925 int err;
4926
4927 err = ext4_load_journal(sb, es, ctx->journal_devnum);
4928 if (err)
4929 return err;
4930
4931 if (ext4_has_feature_64bit(sb) &&
4932 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4933 JBD2_FEATURE_INCOMPAT_64BIT)) {
4934 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4935 goto out;
4936 }
4937
4938 if (!set_journal_csum_feature_set(sb)) {
4939 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4940 "feature set");
4941 goto out;
4942 }
4943
4944 if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
4945 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4946 JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
4947 ext4_msg(sb, KERN_ERR,
4948 "Failed to set fast commit journal feature");
4949 goto out;
4950 }
4951
4952 /* We have now updated the journal if required, so we can
4953 * validate the data journaling mode. */
4954 switch (test_opt(sb, DATA_FLAGS)) {
4955 case 0:
4956 /* No mode set, assume a default based on the journal
4957 * capabilities: ORDERED_DATA if the journal can
4958 * cope, else JOURNAL_DATA
4959 */
4960 if (jbd2_journal_check_available_features
4961 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4962 set_opt(sb, ORDERED_DATA);
4963 sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
4964 } else {
4965 set_opt(sb, JOURNAL_DATA);
4966 sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
4967 }
4968 break;
4969
4970 case EXT4_MOUNT_ORDERED_DATA:
4971 case EXT4_MOUNT_WRITEBACK_DATA:
4972 if (!jbd2_journal_check_available_features
4973 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4974 ext4_msg(sb, KERN_ERR, "Journal does not support "
4975 "requested data journaling mode");
4976 goto out;
4977 }
4978 break;
4979 default:
4980 break;
4981 }
4982
4983 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4984 test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4985 ext4_msg(sb, KERN_ERR, "can't mount with "
4986 "journal_async_commit in data=ordered mode");
4987 goto out;
4988 }
4989
4990 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
4991
4992 sbi->s_journal->j_submit_inode_data_buffers =
4993 ext4_journal_submit_inode_data_buffers;
4994 sbi->s_journal->j_finish_inode_data_buffers =
4995 ext4_journal_finish_inode_data_buffers;
4996
4997 return 0;
4998
4999 out:
5000 ext4_journal_destroy(sbi, sbi->s_journal);
5001 return -EINVAL;
5002 }
5003
ext4_check_journal_data_mode(struct super_block * sb)5004 static int ext4_check_journal_data_mode(struct super_block *sb)
5005 {
5006 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
5007 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with "
5008 "data=journal disables delayed allocation, "
5009 "dioread_nolock, O_DIRECT and fast_commit support!\n");
5010 /* can't mount with both data=journal and dioread_nolock. */
5011 clear_opt(sb, DIOREAD_NOLOCK);
5012 clear_opt2(sb, JOURNAL_FAST_COMMIT);
5013 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
5014 ext4_msg(sb, KERN_ERR, "can't mount with "
5015 "both data=journal and delalloc");
5016 return -EINVAL;
5017 }
5018 if (test_opt(sb, DAX_ALWAYS)) {
5019 ext4_msg(sb, KERN_ERR, "can't mount with "
5020 "both data=journal and dax");
5021 return -EINVAL;
5022 }
5023 if (ext4_has_feature_encrypt(sb)) {
5024 ext4_msg(sb, KERN_WARNING,
5025 "encrypted files will use data=ordered "
5026 "instead of data journaling mode");
5027 }
5028 if (test_opt(sb, DELALLOC))
5029 clear_opt(sb, DELALLOC);
5030 } else {
5031 sb->s_iflags |= SB_I_CGROUPWB;
5032 }
5033
5034 return 0;
5035 }
5036
ext4_has_journal_option(struct super_block * sb)5037 static const char *ext4_has_journal_option(struct super_block *sb)
5038 {
5039 struct ext4_sb_info *sbi = EXT4_SB(sb);
5040
5041 if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
5042 return "journal_async_commit";
5043 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM))
5044 return "journal_checksum";
5045 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
5046 return "commit=";
5047 if (EXT4_MOUNT_DATA_FLAGS &
5048 (sbi->s_mount_opt ^ sbi->s_def_mount_opt))
5049 return "data=";
5050 if (test_opt(sb, DATA_ERR_ABORT))
5051 return "data_err=abort";
5052 return NULL;
5053 }
5054
5055 /*
5056 * Limit the maximum folio order to 2048 blocks to prevent overestimation
5057 * of reserve handle credits during the folio writeback in environments
5058 * where the PAGE_SIZE exceeds 4KB.
5059 */
5060 #define EXT4_MAX_PAGECACHE_ORDER(sb) \
5061 umin(MAX_PAGECACHE_ORDER, (11 + (sb)->s_blocksize_bits - PAGE_SHIFT))
ext4_set_max_mapping_order(struct super_block * sb)5062 static void ext4_set_max_mapping_order(struct super_block *sb)
5063 {
5064 struct ext4_sb_info *sbi = EXT4_SB(sb);
5065
5066 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5067 sbi->s_max_folio_order = sbi->s_min_folio_order;
5068 else
5069 sbi->s_max_folio_order = EXT4_MAX_PAGECACHE_ORDER(sb);
5070 }
5071
ext4_check_large_folio(struct super_block * sb)5072 static int ext4_check_large_folio(struct super_block *sb)
5073 {
5074 const char *err_str = NULL;
5075
5076 if (ext4_has_feature_encrypt(sb))
5077 err_str = "encrypt";
5078
5079 if (!err_str) {
5080 ext4_set_max_mapping_order(sb);
5081 } else if (sb->s_blocksize > PAGE_SIZE) {
5082 ext4_msg(sb, KERN_ERR, "bs(%lu) > ps(%lu) unsupported for %s",
5083 sb->s_blocksize, PAGE_SIZE, err_str);
5084 return -EINVAL;
5085 }
5086
5087 return 0;
5088 }
5089
ext4_load_super(struct super_block * sb,ext4_fsblk_t * lsb,int silent)5090 static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
5091 int silent)
5092 {
5093 struct ext4_sb_info *sbi = EXT4_SB(sb);
5094 struct ext4_super_block *es;
5095 ext4_fsblk_t logical_sb_block;
5096 unsigned long offset = 0;
5097 struct buffer_head *bh;
5098 int ret = -EINVAL;
5099 int blocksize;
5100
5101 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
5102 if (!blocksize) {
5103 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
5104 return -EINVAL;
5105 }
5106
5107 /*
5108 * The ext4 superblock will not be buffer aligned for other than 1kB
5109 * block sizes. We need to calculate the offset from buffer start.
5110 */
5111 if (blocksize != EXT4_MIN_BLOCK_SIZE) {
5112 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
5113 offset = do_div(logical_sb_block, blocksize);
5114 } else {
5115 logical_sb_block = sbi->s_sb_block;
5116 }
5117
5118 bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
5119 if (IS_ERR(bh)) {
5120 ext4_msg(sb, KERN_ERR, "unable to read superblock");
5121 return PTR_ERR(bh);
5122 }
5123 /*
5124 * Note: s_es must be initialized as soon as possible because
5125 * some ext4 macro-instructions depend on its value
5126 */
5127 es = (struct ext4_super_block *) (bh->b_data + offset);
5128 sbi->s_es = es;
5129 sb->s_magic = le16_to_cpu(es->s_magic);
5130 if (sb->s_magic != EXT4_SUPER_MAGIC) {
5131 if (!silent)
5132 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5133 goto out;
5134 }
5135
5136 if (le32_to_cpu(es->s_log_block_size) >
5137 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
5138 ext4_msg(sb, KERN_ERR,
5139 "Invalid log block size: %u",
5140 le32_to_cpu(es->s_log_block_size));
5141 goto out;
5142 }
5143 if (le32_to_cpu(es->s_log_cluster_size) >
5144 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
5145 ext4_msg(sb, KERN_ERR,
5146 "Invalid log cluster size: %u",
5147 le32_to_cpu(es->s_log_cluster_size));
5148 goto out;
5149 }
5150
5151 blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
5152
5153 /*
5154 * If the default block size is not the same as the real block size,
5155 * we need to reload it.
5156 */
5157 if (sb->s_blocksize == blocksize)
5158 goto success;
5159
5160 /*
5161 * bh must be released before kill_bdev(), otherwise
5162 * it won't be freed and its page also. kill_bdev()
5163 * is called by sb_set_blocksize().
5164 */
5165 brelse(bh);
5166 /* Validate the filesystem blocksize */
5167 if (!sb_set_blocksize(sb, blocksize)) {
5168 ext4_msg(sb, KERN_ERR, "bad block size %d",
5169 blocksize);
5170 bh = NULL;
5171 goto out;
5172 }
5173
5174 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
5175 offset = do_div(logical_sb_block, blocksize);
5176 bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
5177 if (IS_ERR(bh)) {
5178 ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try");
5179 ret = PTR_ERR(bh);
5180 bh = NULL;
5181 goto out;
5182 }
5183 es = (struct ext4_super_block *)(bh->b_data + offset);
5184 sbi->s_es = es;
5185 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
5186 ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!");
5187 goto out;
5188 }
5189
5190 success:
5191 sbi->s_min_folio_order = get_order(blocksize);
5192 *lsb = logical_sb_block;
5193 sbi->s_sbh = bh;
5194 return 0;
5195 out:
5196 brelse(bh);
5197 return ret;
5198 }
5199
ext4_hash_info_init(struct super_block * sb)5200 static int ext4_hash_info_init(struct super_block *sb)
5201 {
5202 struct ext4_sb_info *sbi = EXT4_SB(sb);
5203 struct ext4_super_block *es = sbi->s_es;
5204 unsigned int i;
5205
5206 sbi->s_def_hash_version = es->s_def_hash_version;
5207
5208 if (sbi->s_def_hash_version > DX_HASH_LAST) {
5209 ext4_msg(sb, KERN_ERR,
5210 "Invalid default hash set in the superblock");
5211 return -EINVAL;
5212 } else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) {
5213 ext4_msg(sb, KERN_ERR,
5214 "SIPHASH is not a valid default hash value");
5215 return -EINVAL;
5216 }
5217
5218 for (i = 0; i < 4; i++)
5219 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
5220
5221 if (ext4_has_feature_dir_index(sb)) {
5222 i = le32_to_cpu(es->s_flags);
5223 if (i & EXT2_FLAGS_UNSIGNED_HASH)
5224 sbi->s_hash_unsigned = 3;
5225 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
5226 #ifdef __CHAR_UNSIGNED__
5227 if (!sb_rdonly(sb))
5228 es->s_flags |=
5229 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
5230 sbi->s_hash_unsigned = 3;
5231 #else
5232 if (!sb_rdonly(sb))
5233 es->s_flags |=
5234 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
5235 #endif
5236 }
5237 }
5238 return 0;
5239 }
5240
ext4_block_group_meta_init(struct super_block * sb,int silent)5241 static int ext4_block_group_meta_init(struct super_block *sb, int silent)
5242 {
5243 struct ext4_sb_info *sbi = EXT4_SB(sb);
5244 struct ext4_super_block *es = sbi->s_es;
5245 int has_huge_files;
5246
5247 has_huge_files = ext4_has_feature_huge_file(sb);
5248 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
5249 has_huge_files);
5250 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
5251
5252 sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
5253 if (ext4_has_feature_64bit(sb)) {
5254 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
5255 sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
5256 !is_power_of_2(sbi->s_desc_size)) {
5257 ext4_msg(sb, KERN_ERR,
5258 "unsupported descriptor size %lu",
5259 sbi->s_desc_size);
5260 return -EINVAL;
5261 }
5262 } else
5263 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
5264
5265 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
5266 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
5267
5268 sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb);
5269 if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) {
5270 if (!silent)
5271 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5272 return -EINVAL;
5273 }
5274 if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
5275 sbi->s_inodes_per_group > sb->s_blocksize * 8) {
5276 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
5277 sbi->s_inodes_per_group);
5278 return -EINVAL;
5279 }
5280 sbi->s_itb_per_group = sbi->s_inodes_per_group /
5281 sbi->s_inodes_per_block;
5282 sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb);
5283 sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY;
5284 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
5285 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
5286
5287 return 0;
5288 }
5289
5290 /*
5291 * It's hard to get stripe aligned blocks if stripe is not aligned with
5292 * cluster, just disable stripe and alert user to simplify code and avoid
5293 * stripe aligned allocation which will rarely succeed.
5294 */
ext4_is_stripe_incompatible(struct super_block * sb,unsigned long stripe)5295 static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
5296 {
5297 struct ext4_sb_info *sbi = EXT4_SB(sb);
5298 return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
5299 stripe % sbi->s_cluster_ratio != 0);
5300 }
5301
__ext4_fill_super(struct fs_context * fc,struct super_block * sb)5302 static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
5303 {
5304 struct ext4_super_block *es = NULL;
5305 struct ext4_sb_info *sbi = EXT4_SB(sb);
5306 ext4_fsblk_t logical_sb_block;
5307 struct inode *root;
5308 int needs_recovery;
5309 int err;
5310 ext4_group_t first_not_zeroed;
5311 struct ext4_fs_context *ctx = fc->fs_private;
5312 int silent = fc->sb_flags & SB_SILENT;
5313
5314 /* Set defaults for the variables that will be set during parsing */
5315 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO))
5316 ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
5317
5318 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
5319 sbi->s_sectors_written_start =
5320 part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
5321
5322 err = ext4_load_super(sb, &logical_sb_block, silent);
5323 if (err)
5324 goto out_fail;
5325
5326 es = sbi->s_es;
5327 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
5328
5329 err = ext4_init_metadata_csum(sb, es);
5330 if (err)
5331 goto failed_mount;
5332
5333 ext4_set_def_opts(sb, es);
5334
5335 sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es));
5336 sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es));
5337 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
5338 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
5339 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
5340 sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB;
5341 sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC;
5342
5343 /*
5344 * set default s_li_wait_mult for lazyinit, for the case there is
5345 * no mount option specified.
5346 */
5347 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
5348
5349 err = ext4_inode_info_init(sb, es);
5350 if (err)
5351 goto failed_mount;
5352
5353 err = parse_apply_sb_mount_options(sb, ctx);
5354 if (err < 0)
5355 goto failed_mount;
5356
5357 sbi->s_def_mount_opt = sbi->s_mount_opt;
5358 sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
5359
5360 err = ext4_check_opt_consistency(fc, sb);
5361 if (err < 0)
5362 goto failed_mount;
5363
5364 ext4_apply_options(fc, sb);
5365
5366 err = ext4_check_large_folio(sb);
5367 if (err < 0)
5368 goto failed_mount;
5369
5370 err = ext4_encoding_init(sb, es);
5371 if (err)
5372 goto failed_mount;
5373
5374 err = ext4_check_journal_data_mode(sb);
5375 if (err)
5376 goto failed_mount;
5377
5378 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5379 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5380
5381 /* HSM events are allowed by default. */
5382 sb->s_iflags |= SB_I_ALLOW_HSM;
5383
5384 err = ext4_check_feature_compatibility(sb, es, silent);
5385 if (err)
5386 goto failed_mount;
5387
5388 err = ext4_block_group_meta_init(sb, silent);
5389 if (err)
5390 goto failed_mount;
5391
5392 err = ext4_hash_info_init(sb);
5393 if (err)
5394 goto failed_mount;
5395
5396 err = ext4_handle_clustersize(sb);
5397 if (err)
5398 goto failed_mount;
5399
5400 err = ext4_check_geometry(sb, es);
5401 if (err)
5402 goto failed_mount;
5403
5404 timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
5405 spin_lock_init(&sbi->s_error_lock);
5406 INIT_WORK(&sbi->s_sb_upd_work, update_super_work);
5407
5408 err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed);
5409 if (err)
5410 goto failed_mount3;
5411
5412 err = ext4_es_register_shrinker(sbi);
5413 if (err)
5414 goto failed_mount3;
5415
5416 sbi->s_stripe = ext4_get_stripe_size(sbi);
5417 if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
5418 ext4_msg(sb, KERN_WARNING,
5419 "stripe (%lu) is not aligned with cluster size (%u), "
5420 "stripe is disabled",
5421 sbi->s_stripe, sbi->s_cluster_ratio);
5422 sbi->s_stripe = 0;
5423 }
5424 sbi->s_extent_max_zeroout_kb = 32;
5425
5426 /*
5427 * set up enough so that it can read an inode
5428 */
5429 sb->s_op = &ext4_sops;
5430 sb->s_export_op = &ext4_export_ops;
5431 sb->s_xattr = ext4_xattr_handlers;
5432 #ifdef CONFIG_FS_ENCRYPTION
5433 sb->s_cop = &ext4_cryptops;
5434 #endif
5435 #ifdef CONFIG_FS_VERITY
5436 sb->s_vop = &ext4_verityops;
5437 #endif
5438 #ifdef CONFIG_QUOTA
5439 sb->dq_op = &ext4_quota_operations;
5440 if (ext4_has_feature_quota(sb))
5441 sb->s_qcop = &dquot_quotactl_sysfile_ops;
5442 else
5443 sb->s_qcop = &ext4_qctl_operations;
5444 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
5445 #endif
5446 super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid));
5447 super_set_sysfs_name_bdev(sb);
5448
5449 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
5450 mutex_init(&sbi->s_orphan_lock);
5451
5452 spin_lock_init(&sbi->s_bdev_wb_lock);
5453
5454 ext4_atomic_write_init(sb);
5455 ext4_fast_commit_init(sb);
5456
5457 sb->s_root = NULL;
5458
5459 needs_recovery = (es->s_last_orphan != 0 ||
5460 ext4_has_feature_orphan_present(sb) ||
5461 ext4_has_feature_journal_needs_recovery(sb));
5462
5463 if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) {
5464 err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block));
5465 if (err)
5466 goto failed_mount3a;
5467 }
5468
5469 err = -EINVAL;
5470 /*
5471 * The first inode we look at is the journal inode. Don't try
5472 * root first: it may be modified in the journal!
5473 */
5474 if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
5475 err = ext4_load_and_init_journal(sb, es, ctx);
5476 if (err)
5477 goto failed_mount3a;
5478 if (bdev_read_only(sb->s_bdev))
5479 needs_recovery = 0;
5480 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
5481 ext4_has_feature_journal_needs_recovery(sb)) {
5482 ext4_msg(sb, KERN_ERR, "required journal recovery "
5483 "suppressed and not mounted read-only");
5484 goto failed_mount3a;
5485 } else {
5486 const char *journal_option;
5487
5488 /* Nojournal mode, all journal mount options are illegal */
5489 journal_option = ext4_has_journal_option(sb);
5490 if (journal_option != NULL) {
5491 ext4_msg(sb, KERN_ERR,
5492 "can't mount with %s, fs mounted w/o journal",
5493 journal_option);
5494 goto failed_mount3a;
5495 }
5496
5497 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
5498 clear_opt(sb, JOURNAL_CHECKSUM);
5499 clear_opt(sb, DATA_FLAGS);
5500 clear_opt2(sb, JOURNAL_FAST_COMMIT);
5501 sbi->s_journal = NULL;
5502 needs_recovery = 0;
5503 }
5504
5505 if (!test_opt(sb, NO_MBCACHE)) {
5506 sbi->s_ea_block_cache = ext4_xattr_create_cache();
5507 if (!sbi->s_ea_block_cache) {
5508 ext4_msg(sb, KERN_ERR,
5509 "Failed to create ea_block_cache");
5510 err = -EINVAL;
5511 goto failed_mount_wq;
5512 }
5513
5514 if (ext4_has_feature_ea_inode(sb)) {
5515 sbi->s_ea_inode_cache = ext4_xattr_create_cache();
5516 if (!sbi->s_ea_inode_cache) {
5517 ext4_msg(sb, KERN_ERR,
5518 "Failed to create ea_inode_cache");
5519 err = -EINVAL;
5520 goto failed_mount_wq;
5521 }
5522 }
5523 }
5524
5525 /*
5526 * Get the # of file system overhead blocks from the
5527 * superblock if present.
5528 */
5529 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
5530 /* ignore the precalculated value if it is ridiculous */
5531 if (sbi->s_overhead > ext4_blocks_count(es))
5532 sbi->s_overhead = 0;
5533 /*
5534 * If the bigalloc feature is not enabled recalculating the
5535 * overhead doesn't take long, so we might as well just redo
5536 * it to make sure we are using the correct value.
5537 */
5538 if (!ext4_has_feature_bigalloc(sb))
5539 sbi->s_overhead = 0;
5540 if (sbi->s_overhead == 0) {
5541 err = ext4_calculate_overhead(sb);
5542 if (err)
5543 goto failed_mount_wq;
5544 }
5545
5546 /*
5547 * The maximum number of concurrent works can be high and
5548 * concurrency isn't really necessary. Limit it to 1.
5549 */
5550 EXT4_SB(sb)->rsv_conversion_wq =
5551 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
5552 if (!EXT4_SB(sb)->rsv_conversion_wq) {
5553 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
5554 err = -ENOMEM;
5555 goto failed_mount4;
5556 }
5557
5558 /*
5559 * The jbd2_journal_load will have done any necessary log recovery,
5560 * so we can safely mount the rest of the filesystem now.
5561 */
5562
5563 root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
5564 if (IS_ERR(root)) {
5565 ext4_msg(sb, KERN_ERR, "get root inode failed");
5566 err = PTR_ERR(root);
5567 root = NULL;
5568 goto failed_mount4;
5569 }
5570 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
5571 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
5572 iput(root);
5573 err = -EFSCORRUPTED;
5574 goto failed_mount4;
5575 }
5576
5577 generic_set_sb_d_ops(sb);
5578 sb->s_root = d_make_root(root);
5579 if (!sb->s_root) {
5580 ext4_msg(sb, KERN_ERR, "get root dentry failed");
5581 err = -ENOMEM;
5582 goto failed_mount4;
5583 }
5584
5585 err = ext4_setup_super(sb, es, sb_rdonly(sb));
5586 if (err == -EROFS) {
5587 sb->s_flags |= SB_RDONLY;
5588 } else if (err)
5589 goto failed_mount4a;
5590
5591 ext4_set_resv_clusters(sb);
5592
5593 if (test_opt(sb, BLOCK_VALIDITY)) {
5594 err = ext4_setup_system_zone(sb);
5595 if (err) {
5596 ext4_msg(sb, KERN_ERR, "failed to initialize system "
5597 "zone (%d)", err);
5598 goto failed_mount4a;
5599 }
5600 }
5601 ext4_fc_replay_cleanup(sb);
5602
5603 ext4_ext_init(sb);
5604
5605 /*
5606 * Enable optimize_scan if number of groups is > threshold. This can be
5607 * turned off by passing "mb_optimize_scan=0". This can also be
5608 * turned on forcefully by passing "mb_optimize_scan=1".
5609 */
5610 if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) {
5611 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
5612 set_opt2(sb, MB_OPTIMIZE_SCAN);
5613 else
5614 clear_opt2(sb, MB_OPTIMIZE_SCAN);
5615 }
5616
5617 err = ext4_percpu_param_init(sbi);
5618 if (err)
5619 goto failed_mount5;
5620
5621 err = ext4_mb_init(sb);
5622 if (err) {
5623 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
5624 err);
5625 goto failed_mount5;
5626 }
5627
5628 /*
5629 * We can only set up the journal commit callback once
5630 * mballoc is initialized
5631 */
5632 if (sbi->s_journal)
5633 sbi->s_journal->j_commit_callback =
5634 ext4_journal_commit_callback;
5635
5636 if (ext4_has_feature_flex_bg(sb))
5637 if (!ext4_fill_flex_info(sb)) {
5638 ext4_msg(sb, KERN_ERR,
5639 "unable to initialize "
5640 "flex_bg meta info!");
5641 err = -ENOMEM;
5642 goto failed_mount6;
5643 }
5644
5645 err = ext4_register_li_request(sb, first_not_zeroed);
5646 if (err)
5647 goto failed_mount6;
5648
5649 err = ext4_init_orphan_info(sb);
5650 if (err)
5651 goto failed_mount7;
5652 #ifdef CONFIG_QUOTA
5653 /* Enable quota usage during mount. */
5654 if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
5655 err = ext4_enable_quotas(sb);
5656 if (err)
5657 goto failed_mount8;
5658 }
5659 #endif /* CONFIG_QUOTA */
5660
5661 /*
5662 * Save the original bdev mapping's wb_err value which could be
5663 * used to detect the metadata async write error.
5664 */
5665 errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
5666 &sbi->s_bdev_wb_err);
5667 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
5668 ext4_orphan_cleanup(sb, es);
5669 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5670 /*
5671 * Update the checksum after updating free space/inode counters and
5672 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
5673 * checksum in the buffer cache until it is written out and
5674 * e2fsprogs programs trying to open a file system immediately
5675 * after it is mounted can fail.
5676 */
5677 ext4_superblock_csum_set(sb);
5678 if (needs_recovery) {
5679 ext4_msg(sb, KERN_INFO, "recovery complete");
5680 err = ext4_mark_recovery_complete(sb, es);
5681 if (err)
5682 goto failed_mount9;
5683 }
5684
5685 if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) {
5686 ext4_msg(sb, KERN_WARNING,
5687 "mounting with \"discard\" option, but the device does not support discard");
5688 clear_opt(sb, DISCARD);
5689 }
5690
5691 if (es->s_error_count) {
5692 sbi->s_err_report_sec = 5*60; /* first time 5 minutes */
5693 mod_timer(&sbi->s_err_report,
5694 jiffies + secs_to_jiffies(sbi->s_err_report_sec));
5695 }
5696 sbi->s_err_report_sec = 24*60*60; /* Once a day */
5697
5698 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
5699 ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
5700 ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
5701 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
5702 atomic_set(&sbi->s_warning_count, 0);
5703 atomic_set(&sbi->s_msg_count, 0);
5704
5705 /* Register sysfs after all initializations are complete. */
5706 err = ext4_register_sysfs(sb);
5707 if (err)
5708 goto failed_mount9;
5709
5710 return 0;
5711
5712 failed_mount9:
5713 ext4_quotas_off(sb, EXT4_MAXQUOTAS);
5714 failed_mount8: __maybe_unused
5715 ext4_release_orphan_info(sb);
5716 failed_mount7:
5717 ext4_unregister_li_request(sb);
5718 failed_mount6:
5719 ext4_mb_release(sb);
5720 ext4_flex_groups_free(sbi);
5721 failed_mount5:
5722 ext4_percpu_param_destroy(sbi);
5723 ext4_ext_release(sb);
5724 ext4_release_system_zone(sb);
5725 failed_mount4a:
5726 dput(sb->s_root);
5727 sb->s_root = NULL;
5728 failed_mount4:
5729 ext4_msg(sb, KERN_ERR, "mount failed");
5730 if (EXT4_SB(sb)->rsv_conversion_wq)
5731 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5732 failed_mount_wq:
5733 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
5734 sbi->s_ea_inode_cache = NULL;
5735
5736 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
5737 sbi->s_ea_block_cache = NULL;
5738
5739 if (sbi->s_journal) {
5740 ext4_journal_destroy(sbi, sbi->s_journal);
5741 }
5742 failed_mount3a:
5743 ext4_es_unregister_shrinker(sbi);
5744 failed_mount3:
5745 /* flush s_sb_upd_work before sbi destroy */
5746 flush_work(&sbi->s_sb_upd_work);
5747 ext4_stop_mmpd(sbi);
5748 timer_delete_sync(&sbi->s_err_report);
5749 ext4_group_desc_free(sbi);
5750 failed_mount:
5751 #if IS_ENABLED(CONFIG_UNICODE)
5752 utf8_unload(sb->s_encoding);
5753 #endif
5754
5755 #ifdef CONFIG_QUOTA
5756 for (unsigned int i = 0; i < EXT4_MAXQUOTAS; i++)
5757 kfree(get_qf_name(sb, sbi, i));
5758 #endif
5759 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5760 brelse(sbi->s_sbh);
5761 if (sbi->s_journal_bdev_file) {
5762 invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
5763 bdev_fput(sbi->s_journal_bdev_file);
5764 }
5765 out_fail:
5766 invalidate_bdev(sb->s_bdev);
5767 sb->s_fs_info = NULL;
5768 return err;
5769 }
5770
ext4_fill_super(struct super_block * sb,struct fs_context * fc)5771 static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
5772 {
5773 struct ext4_fs_context *ctx = fc->fs_private;
5774 struct ext4_sb_info *sbi;
5775 const char *descr;
5776 int ret;
5777
5778 sbi = ext4_alloc_sbi(sb);
5779 if (!sbi)
5780 return -ENOMEM;
5781
5782 fc->s_fs_info = sbi;
5783
5784 /* Cleanup superblock name */
5785 strreplace(sb->s_id, '/', '!');
5786
5787 sbi->s_sb_block = 1; /* Default super block location */
5788 if (ctx->spec & EXT4_SPEC_s_sb_block)
5789 sbi->s_sb_block = ctx->s_sb_block;
5790
5791 ret = __ext4_fill_super(fc, sb);
5792 if (ret < 0)
5793 goto free_sbi;
5794
5795 if (sbi->s_journal) {
5796 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5797 descr = " journalled data mode";
5798 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
5799 descr = " ordered data mode";
5800 else
5801 descr = " writeback data mode";
5802 } else
5803 descr = "out journal";
5804
5805 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
5806 ext4_msg(sb, KERN_INFO, "mounted filesystem %pU %s with%s. "
5807 "Quota mode: %s.", &sb->s_uuid,
5808 sb_rdonly(sb) ? "ro" : "r/w", descr,
5809 ext4_quota_mode(sb));
5810
5811 /* Update the s_overhead_clusters if necessary */
5812 ext4_update_overhead(sb, false);
5813 return 0;
5814
5815 free_sbi:
5816 ext4_free_sbi(sbi);
5817 fc->s_fs_info = NULL;
5818 return ret;
5819 }
5820
ext4_get_tree(struct fs_context * fc)5821 static int ext4_get_tree(struct fs_context *fc)
5822 {
5823 return get_tree_bdev(fc, ext4_fill_super);
5824 }
5825
5826 /*
5827 * Setup any per-fs journal parameters now. We'll do this both on
5828 * initial mount, once the journal has been initialised but before we've
5829 * done any recovery; and again on any subsequent remount.
5830 */
ext4_init_journal_params(struct super_block * sb,journal_t * journal)5831 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5832 {
5833 struct ext4_sb_info *sbi = EXT4_SB(sb);
5834
5835 journal->j_commit_interval = sbi->s_commit_interval;
5836 journal->j_min_batch_time = sbi->s_min_batch_time;
5837 journal->j_max_batch_time = sbi->s_max_batch_time;
5838 ext4_fc_init(sb, journal);
5839
5840 write_lock(&journal->j_state_lock);
5841 if (test_opt(sb, BARRIER))
5842 journal->j_flags |= JBD2_BARRIER;
5843 else
5844 journal->j_flags &= ~JBD2_BARRIER;
5845 /*
5846 * Always enable journal cycle record option, letting the journal
5847 * records log transactions continuously between each mount.
5848 */
5849 journal->j_flags |= JBD2_CYCLE_RECORD;
5850 write_unlock(&journal->j_state_lock);
5851 }
5852
ext4_get_journal_inode(struct super_block * sb,unsigned int journal_inum)5853 static struct inode *ext4_get_journal_inode(struct super_block *sb,
5854 unsigned int journal_inum)
5855 {
5856 struct inode *journal_inode;
5857
5858 /*
5859 * Test for the existence of a valid inode on disk. Bad things
5860 * happen if we iget() an unused inode, as the subsequent iput()
5861 * will try to delete it.
5862 */
5863 journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5864 if (IS_ERR(journal_inode)) {
5865 ext4_msg(sb, KERN_ERR, "no journal found");
5866 return ERR_CAST(journal_inode);
5867 }
5868 if (!journal_inode->i_nlink) {
5869 make_bad_inode(journal_inode);
5870 iput(journal_inode);
5871 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5872 return ERR_PTR(-EFSCORRUPTED);
5873 }
5874 if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
5875 ext4_msg(sb, KERN_ERR, "invalid journal inode");
5876 iput(journal_inode);
5877 return ERR_PTR(-EFSCORRUPTED);
5878 }
5879
5880 ext4_debug("Journal inode found at %p: %lld bytes\n",
5881 journal_inode, journal_inode->i_size);
5882 return journal_inode;
5883 }
5884
ext4_journal_bmap(journal_t * journal,sector_t * block)5885 static int ext4_journal_bmap(journal_t *journal, sector_t *block)
5886 {
5887 struct ext4_map_blocks map;
5888 int ret;
5889
5890 if (journal->j_inode == NULL)
5891 return 0;
5892
5893 map.m_lblk = *block;
5894 map.m_len = 1;
5895 ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
5896 if (ret <= 0) {
5897 ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
5898 "journal bmap failed: block %llu ret %d\n",
5899 *block, ret);
5900 jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
5901 return ret;
5902 }
5903 *block = map.m_pblk;
5904 return 0;
5905 }
5906
ext4_open_inode_journal(struct super_block * sb,unsigned int journal_inum)5907 static journal_t *ext4_open_inode_journal(struct super_block *sb,
5908 unsigned int journal_inum)
5909 {
5910 struct inode *journal_inode;
5911 journal_t *journal;
5912
5913 journal_inode = ext4_get_journal_inode(sb, journal_inum);
5914 if (IS_ERR(journal_inode))
5915 return ERR_CAST(journal_inode);
5916
5917 journal = jbd2_journal_init_inode(journal_inode);
5918 if (IS_ERR(journal)) {
5919 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5920 iput(journal_inode);
5921 return ERR_CAST(journal);
5922 }
5923 journal->j_private = sb;
5924 journal->j_bmap = ext4_journal_bmap;
5925 ext4_init_journal_params(sb, journal);
5926 return journal;
5927 }
5928
ext4_get_journal_blkdev(struct super_block * sb,dev_t j_dev,ext4_fsblk_t * j_start,ext4_fsblk_t * j_len)5929 static struct file *ext4_get_journal_blkdev(struct super_block *sb,
5930 dev_t j_dev, ext4_fsblk_t *j_start,
5931 ext4_fsblk_t *j_len)
5932 {
5933 struct buffer_head *bh;
5934 struct block_device *bdev;
5935 struct file *bdev_file;
5936 int hblock, blocksize;
5937 ext4_fsblk_t sb_block;
5938 unsigned long offset;
5939 struct ext4_super_block *es;
5940 int errno;
5941
5942 bdev_file = bdev_file_open_by_dev(j_dev,
5943 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
5944 sb, &fs_holder_ops);
5945 if (IS_ERR(bdev_file)) {
5946 ext4_msg(sb, KERN_ERR,
5947 "failed to open journal device unknown-block(%u,%u) %ld",
5948 MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_file));
5949 return bdev_file;
5950 }
5951
5952 bdev = file_bdev(bdev_file);
5953 blocksize = sb->s_blocksize;
5954 hblock = bdev_logical_block_size(bdev);
5955 if (blocksize < hblock) {
5956 ext4_msg(sb, KERN_ERR,
5957 "blocksize too small for journal device");
5958 errno = -EINVAL;
5959 goto out_bdev;
5960 }
5961
5962 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
5963 offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5964 set_blocksize(bdev_file, blocksize);
5965 bh = __bread(bdev, sb_block, blocksize);
5966 if (!bh) {
5967 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
5968 "external journal");
5969 errno = -EINVAL;
5970 goto out_bdev;
5971 }
5972
5973 es = (struct ext4_super_block *) (bh->b_data + offset);
5974 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
5975 !(le32_to_cpu(es->s_feature_incompat) &
5976 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
5977 ext4_msg(sb, KERN_ERR, "external journal has bad superblock");
5978 errno = -EFSCORRUPTED;
5979 goto out_bh;
5980 }
5981
5982 if ((le32_to_cpu(es->s_feature_ro_compat) &
5983 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
5984 es->s_checksum != ext4_superblock_csum(es)) {
5985 ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock");
5986 errno = -EFSCORRUPTED;
5987 goto out_bh;
5988 }
5989
5990 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
5991 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
5992 errno = -EFSCORRUPTED;
5993 goto out_bh;
5994 }
5995
5996 *j_start = sb_block + 1;
5997 *j_len = ext4_blocks_count(es);
5998 brelse(bh);
5999 return bdev_file;
6000
6001 out_bh:
6002 brelse(bh);
6003 out_bdev:
6004 bdev_fput(bdev_file);
6005 return ERR_PTR(errno);
6006 }
6007
ext4_open_dev_journal(struct super_block * sb,dev_t j_dev)6008 static journal_t *ext4_open_dev_journal(struct super_block *sb,
6009 dev_t j_dev)
6010 {
6011 journal_t *journal;
6012 ext4_fsblk_t j_start;
6013 ext4_fsblk_t j_len;
6014 struct file *bdev_file;
6015 int errno = 0;
6016
6017 bdev_file = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
6018 if (IS_ERR(bdev_file))
6019 return ERR_CAST(bdev_file);
6020
6021 journal = jbd2_journal_init_dev(file_bdev(bdev_file), sb->s_bdev, j_start,
6022 j_len, sb->s_blocksize);
6023 if (IS_ERR(journal)) {
6024 ext4_msg(sb, KERN_ERR, "failed to create device journal");
6025 errno = PTR_ERR(journal);
6026 goto out_bdev;
6027 }
6028 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
6029 ext4_msg(sb, KERN_ERR, "External journal has more than one "
6030 "user (unsupported) - %d",
6031 be32_to_cpu(journal->j_superblock->s_nr_users));
6032 errno = -EINVAL;
6033 goto out_journal;
6034 }
6035 journal->j_private = sb;
6036 EXT4_SB(sb)->s_journal_bdev_file = bdev_file;
6037 ext4_init_journal_params(sb, journal);
6038 return journal;
6039
6040 out_journal:
6041 ext4_journal_destroy(EXT4_SB(sb), journal);
6042 out_bdev:
6043 bdev_fput(bdev_file);
6044 return ERR_PTR(errno);
6045 }
6046
ext4_load_journal(struct super_block * sb,struct ext4_super_block * es,unsigned long journal_devnum)6047 static int ext4_load_journal(struct super_block *sb,
6048 struct ext4_super_block *es,
6049 unsigned long journal_devnum)
6050 {
6051 journal_t *journal;
6052 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
6053 dev_t journal_dev;
6054 int err = 0;
6055 int really_read_only;
6056 int journal_dev_ro;
6057
6058 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
6059 return -EFSCORRUPTED;
6060
6061 if (journal_devnum &&
6062 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
6063 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
6064 "numbers have changed");
6065 journal_dev = new_decode_dev(journal_devnum);
6066 } else
6067 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
6068
6069 if (journal_inum && journal_dev) {
6070 ext4_msg(sb, KERN_ERR,
6071 "filesystem has both journal inode and journal device!");
6072 return -EINVAL;
6073 }
6074
6075 if (journal_inum) {
6076 journal = ext4_open_inode_journal(sb, journal_inum);
6077 if (IS_ERR(journal))
6078 return PTR_ERR(journal);
6079 } else {
6080 journal = ext4_open_dev_journal(sb, journal_dev);
6081 if (IS_ERR(journal))
6082 return PTR_ERR(journal);
6083 }
6084
6085 journal_dev_ro = bdev_read_only(journal->j_dev);
6086 really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
6087
6088 if (journal_dev_ro && !sb_rdonly(sb)) {
6089 ext4_msg(sb, KERN_ERR,
6090 "journal device read-only, try mounting with '-o ro'");
6091 err = -EROFS;
6092 goto err_out;
6093 }
6094
6095 /*
6096 * Are we loading a blank journal or performing recovery after a
6097 * crash? For recovery, we need to check in advance whether we
6098 * can get read-write access to the device.
6099 */
6100 if (ext4_has_feature_journal_needs_recovery(sb)) {
6101 if (sb_rdonly(sb)) {
6102 ext4_msg(sb, KERN_INFO, "INFO: recovery "
6103 "required on readonly filesystem");
6104 if (really_read_only) {
6105 ext4_msg(sb, KERN_ERR, "write access "
6106 "unavailable, cannot proceed "
6107 "(try mounting with noload)");
6108 err = -EROFS;
6109 goto err_out;
6110 }
6111 ext4_msg(sb, KERN_INFO, "write access will "
6112 "be enabled during recovery");
6113 }
6114 }
6115
6116 if (!(journal->j_flags & JBD2_BARRIER))
6117 ext4_msg(sb, KERN_INFO, "barriers disabled");
6118
6119 if (!ext4_has_feature_journal_needs_recovery(sb))
6120 err = jbd2_journal_wipe(journal, !really_read_only);
6121 if (!err) {
6122 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
6123 __le16 orig_state;
6124 bool changed = false;
6125
6126 if (save)
6127 memcpy(save, ((char *) es) +
6128 EXT4_S_ERR_START, EXT4_S_ERR_LEN);
6129 err = jbd2_journal_load(journal);
6130 if (save && memcmp(((char *) es) + EXT4_S_ERR_START,
6131 save, EXT4_S_ERR_LEN)) {
6132 memcpy(((char *) es) + EXT4_S_ERR_START,
6133 save, EXT4_S_ERR_LEN);
6134 changed = true;
6135 }
6136 kfree(save);
6137 orig_state = es->s_state;
6138 es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state &
6139 EXT4_ERROR_FS);
6140 if (orig_state != es->s_state)
6141 changed = true;
6142 /* Write out restored error information to the superblock */
6143 if (changed && !really_read_only) {
6144 int err2;
6145 err2 = ext4_commit_super(sb);
6146 err = err ? : err2;
6147 }
6148 }
6149
6150 if (err) {
6151 ext4_msg(sb, KERN_ERR, "error loading journal");
6152 goto err_out;
6153 }
6154
6155 EXT4_SB(sb)->s_journal = journal;
6156 err = ext4_clear_journal_err(sb, es);
6157 if (err) {
6158 ext4_journal_destroy(EXT4_SB(sb), journal);
6159 return err;
6160 }
6161
6162 if (!really_read_only && journal_devnum &&
6163 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
6164 es->s_journal_dev = cpu_to_le32(journal_devnum);
6165 ext4_commit_super(sb);
6166 }
6167 if (!really_read_only && journal_inum &&
6168 journal_inum != le32_to_cpu(es->s_journal_inum)) {
6169 es->s_journal_inum = cpu_to_le32(journal_inum);
6170 ext4_commit_super(sb);
6171 }
6172
6173 return 0;
6174
6175 err_out:
6176 ext4_journal_destroy(EXT4_SB(sb), journal);
6177 return err;
6178 }
6179
6180 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
ext4_update_super(struct super_block * sb)6181 static void ext4_update_super(struct super_block *sb)
6182 {
6183 struct ext4_sb_info *sbi = EXT4_SB(sb);
6184 struct ext4_super_block *es = sbi->s_es;
6185 struct buffer_head *sbh = sbi->s_sbh;
6186
6187 lock_buffer(sbh);
6188 /*
6189 * If the file system is mounted read-only, don't update the
6190 * superblock write time. This avoids updating the superblock
6191 * write time when we are mounting the root file system
6192 * read/only but we need to replay the journal; at that point,
6193 * for people who are east of GMT and who make their clock
6194 * tick in localtime for Windows bug-for-bug compatibility,
6195 * the clock is set in the future, and this will cause e2fsck
6196 * to complain and force a full file system check.
6197 */
6198 if (!sb_rdonly(sb))
6199 ext4_update_tstamp(es, s_wtime);
6200 es->s_kbytes_written =
6201 cpu_to_le64(sbi->s_kbytes_written +
6202 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
6203 sbi->s_sectors_written_start) >> 1));
6204 if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
6205 ext4_free_blocks_count_set(es,
6206 EXT4_C2B(sbi, percpu_counter_sum_positive(
6207 &sbi->s_freeclusters_counter)));
6208 if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
6209 es->s_free_inodes_count =
6210 cpu_to_le32(percpu_counter_sum_positive(
6211 &sbi->s_freeinodes_counter));
6212 /* Copy error information to the on-disk superblock */
6213 spin_lock(&sbi->s_error_lock);
6214 if (sbi->s_add_error_count > 0) {
6215 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
6216 if (!es->s_first_error_time && !es->s_first_error_time_hi) {
6217 __ext4_update_tstamp(&es->s_first_error_time,
6218 &es->s_first_error_time_hi,
6219 sbi->s_first_error_time);
6220 strtomem_pad(es->s_first_error_func,
6221 sbi->s_first_error_func, 0);
6222 es->s_first_error_line =
6223 cpu_to_le32(sbi->s_first_error_line);
6224 es->s_first_error_ino =
6225 cpu_to_le32(sbi->s_first_error_ino);
6226 es->s_first_error_block =
6227 cpu_to_le64(sbi->s_first_error_block);
6228 es->s_first_error_errcode =
6229 ext4_errno_to_code(sbi->s_first_error_code);
6230 }
6231 __ext4_update_tstamp(&es->s_last_error_time,
6232 &es->s_last_error_time_hi,
6233 sbi->s_last_error_time);
6234 strtomem_pad(es->s_last_error_func, sbi->s_last_error_func, 0);
6235 es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
6236 es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
6237 es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
6238 es->s_last_error_errcode =
6239 ext4_errno_to_code(sbi->s_last_error_code);
6240 /*
6241 * Start the daily error reporting function if it hasn't been
6242 * started already and sbi->s_err_report_sec is not zero
6243 */
6244 if (!es->s_error_count && !sbi->s_err_report_sec)
6245 mod_timer(&sbi->s_err_report,
6246 jiffies + secs_to_jiffies(sbi->s_err_report_sec));
6247 le32_add_cpu(&es->s_error_count, sbi->s_add_error_count);
6248 sbi->s_add_error_count = 0;
6249 }
6250 spin_unlock(&sbi->s_error_lock);
6251
6252 ext4_superblock_csum_set(sb);
6253 unlock_buffer(sbh);
6254 }
6255
ext4_commit_super(struct super_block * sb)6256 static int ext4_commit_super(struct super_block *sb)
6257 {
6258 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
6259
6260 if (!sbh)
6261 return -EINVAL;
6262
6263 ext4_update_super(sb);
6264
6265 lock_buffer(sbh);
6266 /* Buffer got discarded which means block device got invalidated */
6267 if (!buffer_mapped(sbh)) {
6268 unlock_buffer(sbh);
6269 return -EIO;
6270 }
6271
6272 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
6273 /*
6274 * Oh, dear. A previous attempt to write the
6275 * superblock failed. This could happen because the
6276 * USB device was yanked out. Or it could happen to
6277 * be a transient write error and maybe the block will
6278 * be remapped. Nothing we can do but to retry the
6279 * write and hope for the best.
6280 */
6281 ext4_msg(sb, KERN_ERR, "previous I/O error to "
6282 "superblock detected");
6283 clear_buffer_write_io_error(sbh);
6284 set_buffer_uptodate(sbh);
6285 }
6286 get_bh(sbh);
6287 /* Clear potential dirty bit if it was journalled update */
6288 clear_buffer_dirty(sbh);
6289 sbh->b_end_io = end_buffer_write_sync;
6290 submit_bh(REQ_OP_WRITE | REQ_SYNC |
6291 (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
6292 wait_on_buffer(sbh);
6293 if (buffer_write_io_error(sbh)) {
6294 ext4_msg(sb, KERN_ERR, "I/O error while writing "
6295 "superblock");
6296 clear_buffer_write_io_error(sbh);
6297 set_buffer_uptodate(sbh);
6298 return -EIO;
6299 }
6300 return 0;
6301 }
6302
6303 /*
6304 * Have we just finished recovery? If so, and if we are mounting (or
6305 * remounting) the filesystem readonly, then we will end up with a
6306 * consistent fs on disk. Record that fact.
6307 */
ext4_mark_recovery_complete(struct super_block * sb,struct ext4_super_block * es)6308 static int ext4_mark_recovery_complete(struct super_block *sb,
6309 struct ext4_super_block *es)
6310 {
6311 int err;
6312 journal_t *journal = EXT4_SB(sb)->s_journal;
6313
6314 if (!ext4_has_feature_journal(sb)) {
6315 if (journal != NULL) {
6316 ext4_error(sb, "Journal got removed while the fs was "
6317 "mounted!");
6318 return -EFSCORRUPTED;
6319 }
6320 return 0;
6321 }
6322 jbd2_journal_lock_updates(journal);
6323 err = jbd2_journal_flush(journal, 0);
6324 if (err < 0)
6325 goto out;
6326
6327 if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) ||
6328 ext4_has_feature_orphan_present(sb))) {
6329 if (!ext4_orphan_file_empty(sb)) {
6330 ext4_error(sb, "Orphan file not empty on read-only fs.");
6331 err = -EFSCORRUPTED;
6332 goto out;
6333 }
6334 ext4_clear_feature_journal_needs_recovery(sb);
6335 ext4_clear_feature_orphan_present(sb);
6336 ext4_commit_super(sb);
6337 }
6338 out:
6339 jbd2_journal_unlock_updates(journal);
6340 return err;
6341 }
6342
6343 /*
6344 * If we are mounting (or read-write remounting) a filesystem whose journal
6345 * has recorded an error from a previous lifetime, move that error to the
6346 * main filesystem now.
6347 */
ext4_clear_journal_err(struct super_block * sb,struct ext4_super_block * es)6348 static int ext4_clear_journal_err(struct super_block *sb,
6349 struct ext4_super_block *es)
6350 {
6351 journal_t *journal;
6352 int j_errno;
6353 const char *errstr;
6354
6355 if (!ext4_has_feature_journal(sb)) {
6356 ext4_error(sb, "Journal got removed while the fs was mounted!");
6357 return -EFSCORRUPTED;
6358 }
6359
6360 journal = EXT4_SB(sb)->s_journal;
6361
6362 /*
6363 * Now check for any error status which may have been recorded in the
6364 * journal by a prior ext4_error() or ext4_abort()
6365 */
6366
6367 j_errno = jbd2_journal_errno(journal);
6368 if (j_errno) {
6369 char nbuf[16];
6370
6371 errstr = ext4_decode_error(sb, j_errno, nbuf);
6372 ext4_warning(sb, "Filesystem error recorded "
6373 "from previous mount: %s", errstr);
6374
6375 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
6376 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
6377 j_errno = ext4_commit_super(sb);
6378 if (j_errno)
6379 return j_errno;
6380 ext4_warning(sb, "Marked fs in need of filesystem check.");
6381
6382 jbd2_journal_clear_err(journal);
6383 jbd2_journal_update_sb_errno(journal);
6384 }
6385 return 0;
6386 }
6387
6388 /*
6389 * Force the running and committing transactions to commit,
6390 * and wait on the commit.
6391 */
ext4_force_commit(struct super_block * sb)6392 int ext4_force_commit(struct super_block *sb)
6393 {
6394 return ext4_journal_force_commit(EXT4_SB(sb)->s_journal);
6395 }
6396
ext4_sync_fs(struct super_block * sb,int wait)6397 static int ext4_sync_fs(struct super_block *sb, int wait)
6398 {
6399 int ret = 0;
6400 tid_t target;
6401 bool needs_barrier = false;
6402 struct ext4_sb_info *sbi = EXT4_SB(sb);
6403
6404 ret = ext4_emergency_state(sb);
6405 if (unlikely(ret))
6406 return ret;
6407
6408 trace_ext4_sync_fs(sb, wait);
6409 flush_workqueue(sbi->rsv_conversion_wq);
6410 /*
6411 * Writeback quota in non-journalled quota case - journalled quota has
6412 * no dirty dquots
6413 */
6414 dquot_writeback_dquots(sb, -1);
6415 /*
6416 * Data writeback is possible w/o journal transaction, so barrier must
6417 * being sent at the end of the function. But we can skip it if
6418 * transaction_commit will do it for us.
6419 */
6420 if (sbi->s_journal) {
6421 target = jbd2_get_latest_transaction(sbi->s_journal);
6422 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
6423 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
6424 needs_barrier = true;
6425
6426 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
6427 if (wait)
6428 ret = jbd2_log_wait_commit(sbi->s_journal,
6429 target);
6430 }
6431 } else if (wait && test_opt(sb, BARRIER))
6432 needs_barrier = true;
6433 if (needs_barrier) {
6434 int err;
6435 err = blkdev_issue_flush(sb->s_bdev);
6436 if (!ret)
6437 ret = err;
6438 }
6439
6440 return ret;
6441 }
6442
6443 /*
6444 * LVM calls this function before a (read-only) snapshot is created. This
6445 * gives us a chance to flush the journal completely and mark the fs clean.
6446 *
6447 * Note that only this function cannot bring a filesystem to be in a clean
6448 * state independently. It relies on upper layer to stop all data & metadata
6449 * modifications.
6450 */
ext4_freeze(struct super_block * sb)6451 static int ext4_freeze(struct super_block *sb)
6452 {
6453 int error = 0;
6454 journal_t *journal = EXT4_SB(sb)->s_journal;
6455
6456 if (journal) {
6457 /* Now we set up the journal barrier. */
6458 jbd2_journal_lock_updates(journal);
6459
6460 /*
6461 * Don't clear the needs_recovery flag if we failed to
6462 * flush the journal.
6463 */
6464 error = jbd2_journal_flush(journal, 0);
6465 if (error < 0)
6466 goto out;
6467
6468 /* Journal blocked and flushed, clear needs_recovery flag. */
6469 ext4_clear_feature_journal_needs_recovery(sb);
6470 if (ext4_orphan_file_empty(sb))
6471 ext4_clear_feature_orphan_present(sb);
6472 }
6473
6474 error = ext4_commit_super(sb);
6475 out:
6476 if (journal)
6477 /* we rely on upper layer to stop further updates */
6478 jbd2_journal_unlock_updates(journal);
6479 return error;
6480 }
6481
6482 /*
6483 * Called by LVM after the snapshot is done. We need to reset the RECOVER
6484 * flag here, even though the filesystem is not technically dirty yet.
6485 */
ext4_unfreeze(struct super_block * sb)6486 static int ext4_unfreeze(struct super_block *sb)
6487 {
6488 if (ext4_emergency_state(sb))
6489 return 0;
6490
6491 if (EXT4_SB(sb)->s_journal) {
6492 /* Reset the needs_recovery flag before the fs is unlocked. */
6493 ext4_set_feature_journal_needs_recovery(sb);
6494 if (ext4_has_feature_orphan_file(sb))
6495 ext4_set_feature_orphan_present(sb);
6496 }
6497
6498 ext4_commit_super(sb);
6499 return 0;
6500 }
6501
6502 /*
6503 * Structure to save mount options for ext4_remount's benefit
6504 */
6505 struct ext4_mount_options {
6506 unsigned long s_mount_opt;
6507 unsigned long s_mount_opt2;
6508 kuid_t s_resuid;
6509 kgid_t s_resgid;
6510 unsigned long s_commit_interval;
6511 u32 s_min_batch_time, s_max_batch_time;
6512 #ifdef CONFIG_QUOTA
6513 int s_jquota_fmt;
6514 char *s_qf_names[EXT4_MAXQUOTAS];
6515 #endif
6516 };
6517
__ext4_remount(struct fs_context * fc,struct super_block * sb)6518 static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
6519 {
6520 struct ext4_fs_context *ctx = fc->fs_private;
6521 struct ext4_super_block *es;
6522 struct ext4_sb_info *sbi = EXT4_SB(sb);
6523 unsigned long old_sb_flags;
6524 struct ext4_mount_options old_opts;
6525 ext4_group_t g;
6526 int err = 0;
6527 int alloc_ctx;
6528 #ifdef CONFIG_QUOTA
6529 int enable_quota = 0;
6530 int i, j;
6531 char *to_free[EXT4_MAXQUOTAS];
6532 #endif
6533
6534
6535 /* Store the original options */
6536 old_sb_flags = sb->s_flags;
6537 old_opts.s_mount_opt = sbi->s_mount_opt;
6538 old_opts.s_mount_opt2 = sbi->s_mount_opt2;
6539 old_opts.s_resuid = sbi->s_resuid;
6540 old_opts.s_resgid = sbi->s_resgid;
6541 old_opts.s_commit_interval = sbi->s_commit_interval;
6542 old_opts.s_min_batch_time = sbi->s_min_batch_time;
6543 old_opts.s_max_batch_time = sbi->s_max_batch_time;
6544 #ifdef CONFIG_QUOTA
6545 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
6546 for (i = 0; i < EXT4_MAXQUOTAS; i++)
6547 if (sbi->s_qf_names[i]) {
6548 char *qf_name = get_qf_name(sb, sbi, i);
6549
6550 old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
6551 if (!old_opts.s_qf_names[i]) {
6552 for (j = 0; j < i; j++)
6553 kfree(old_opts.s_qf_names[j]);
6554 return -ENOMEM;
6555 }
6556 } else
6557 old_opts.s_qf_names[i] = NULL;
6558 #endif
6559 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) {
6560 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
6561 ctx->journal_ioprio =
6562 sbi->s_journal->j_task->io_context->ioprio;
6563 else
6564 ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
6565
6566 }
6567
6568 if ((ctx->spec & EXT4_SPEC_s_stripe) &&
6569 ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
6570 ext4_msg(sb, KERN_WARNING,
6571 "stripe (%lu) is not aligned with cluster size (%u), "
6572 "stripe is disabled",
6573 ctx->s_stripe, sbi->s_cluster_ratio);
6574 ctx->s_stripe = 0;
6575 }
6576
6577 /*
6578 * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
6579 * two calls to ext4_should_dioread_nolock() to return inconsistent
6580 * values, triggering WARN_ON in ext4_add_complete_io(). we grab
6581 * here s_writepages_rwsem to avoid race between writepages ops and
6582 * remount.
6583 */
6584 alloc_ctx = ext4_writepages_down_write(sb);
6585 ext4_apply_options(fc, sb);
6586 ext4_writepages_up_write(sb, alloc_ctx);
6587
6588 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
6589 test_opt(sb, JOURNAL_CHECKSUM)) {
6590 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
6591 "during remount not supported; ignoring");
6592 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
6593 }
6594
6595 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
6596 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
6597 ext4_msg(sb, KERN_ERR, "can't mount with "
6598 "both data=journal and delalloc");
6599 err = -EINVAL;
6600 goto restore_opts;
6601 }
6602 if (test_opt(sb, DIOREAD_NOLOCK)) {
6603 ext4_msg(sb, KERN_ERR, "can't mount with "
6604 "both data=journal and dioread_nolock");
6605 err = -EINVAL;
6606 goto restore_opts;
6607 }
6608 } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
6609 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
6610 ext4_msg(sb, KERN_ERR, "can't mount with "
6611 "journal_async_commit in data=ordered mode");
6612 err = -EINVAL;
6613 goto restore_opts;
6614 }
6615 }
6616
6617 if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
6618 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
6619 err = -EINVAL;
6620 goto restore_opts;
6621 }
6622
6623 if ((old_opts.s_mount_opt & EXT4_MOUNT_DELALLOC) &&
6624 !test_opt(sb, DELALLOC)) {
6625 ext4_msg(sb, KERN_ERR, "can't disable delalloc during remount");
6626 err = -EINVAL;
6627 goto restore_opts;
6628 }
6629
6630 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
6631 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
6632
6633 es = sbi->s_es;
6634
6635 if (sbi->s_journal) {
6636 ext4_init_journal_params(sb, sbi->s_journal);
6637 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
6638 }
6639
6640 /* Flush outstanding errors before changing fs state */
6641 flush_work(&sbi->s_sb_upd_work);
6642
6643 if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
6644 if (ext4_emergency_state(sb)) {
6645 err = -EROFS;
6646 goto restore_opts;
6647 }
6648
6649 if (fc->sb_flags & SB_RDONLY) {
6650 err = sync_filesystem(sb);
6651 if (err < 0)
6652 goto restore_opts;
6653 err = dquot_suspend(sb, -1);
6654 if (err < 0)
6655 goto restore_opts;
6656
6657 /*
6658 * First of all, the unconditional stuff we have to do
6659 * to disable replay of the journal when we next remount
6660 */
6661 sb->s_flags |= SB_RDONLY;
6662
6663 /*
6664 * OK, test if we are remounting a valid rw partition
6665 * readonly, and if so set the rdonly flag and then
6666 * mark the partition as valid again.
6667 */
6668 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
6669 (sbi->s_mount_state & EXT4_VALID_FS))
6670 es->s_state = cpu_to_le16(sbi->s_mount_state);
6671
6672 if (sbi->s_journal) {
6673 /*
6674 * We let remount-ro finish even if marking fs
6675 * as clean failed...
6676 */
6677 ext4_mark_recovery_complete(sb, es);
6678 }
6679 } else {
6680 /* Make sure we can mount this feature set readwrite */
6681 if (ext4_has_feature_readonly(sb) ||
6682 !ext4_feature_set_ok(sb, 0)) {
6683 err = -EROFS;
6684 goto restore_opts;
6685 }
6686 /*
6687 * Make sure the group descriptor checksums
6688 * are sane. If they aren't, refuse to remount r/w.
6689 */
6690 for (g = 0; g < sbi->s_groups_count; g++) {
6691 struct ext4_group_desc *gdp =
6692 ext4_get_group_desc(sb, g, NULL);
6693
6694 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
6695 ext4_msg(sb, KERN_ERR,
6696 "ext4_remount: Checksum for group %u failed (%u!=%u)",
6697 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
6698 le16_to_cpu(gdp->bg_checksum));
6699 err = -EFSBADCRC;
6700 goto restore_opts;
6701 }
6702 }
6703
6704 /*
6705 * If we have an unprocessed orphan list hanging
6706 * around from a previously readonly bdev mount,
6707 * require a full umount/remount for now.
6708 */
6709 if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) {
6710 ext4_msg(sb, KERN_WARNING, "Couldn't "
6711 "remount RDWR because of unprocessed "
6712 "orphan inode list. Please "
6713 "umount/remount instead");
6714 err = -EINVAL;
6715 goto restore_opts;
6716 }
6717
6718 /*
6719 * Mounting a RDONLY partition read-write, so reread
6720 * and store the current valid flag. (It may have
6721 * been changed by e2fsck since we originally mounted
6722 * the partition.)
6723 */
6724 if (sbi->s_journal) {
6725 err = ext4_clear_journal_err(sb, es);
6726 if (err)
6727 goto restore_opts;
6728 }
6729 sbi->s_mount_state = (le16_to_cpu(es->s_state) &
6730 ~EXT4_FC_REPLAY);
6731
6732 err = ext4_setup_super(sb, es, 0);
6733 if (err)
6734 goto restore_opts;
6735
6736 sb->s_flags &= ~SB_RDONLY;
6737 if (ext4_has_feature_mmp(sb)) {
6738 err = ext4_multi_mount_protect(sb,
6739 le64_to_cpu(es->s_mmp_block));
6740 if (err)
6741 goto restore_opts;
6742 }
6743 #ifdef CONFIG_QUOTA
6744 enable_quota = 1;
6745 #endif
6746 }
6747 }
6748
6749 /*
6750 * Handle creation of system zone data early because it can fail.
6751 * Releasing of existing data is done when we are sure remount will
6752 * succeed.
6753 */
6754 if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
6755 err = ext4_setup_system_zone(sb);
6756 if (err)
6757 goto restore_opts;
6758 }
6759
6760 if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
6761 err = ext4_commit_super(sb);
6762 if (err)
6763 goto restore_opts;
6764 }
6765
6766 #ifdef CONFIG_QUOTA
6767 if (enable_quota) {
6768 if (sb_any_quota_suspended(sb))
6769 dquot_resume(sb, -1);
6770 else if (ext4_has_feature_quota(sb)) {
6771 err = ext4_enable_quotas(sb);
6772 if (err)
6773 goto restore_opts;
6774 }
6775 }
6776 /* Release old quota file names */
6777 for (i = 0; i < EXT4_MAXQUOTAS; i++)
6778 kfree(old_opts.s_qf_names[i]);
6779 #endif
6780 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6781 ext4_release_system_zone(sb);
6782
6783 /*
6784 * Reinitialize lazy itable initialization thread based on
6785 * current settings
6786 */
6787 if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
6788 ext4_unregister_li_request(sb);
6789 else {
6790 ext4_group_t first_not_zeroed;
6791 first_not_zeroed = ext4_has_uninit_itable(sb);
6792 ext4_register_li_request(sb, first_not_zeroed);
6793 }
6794
6795 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6796 ext4_stop_mmpd(sbi);
6797
6798 /*
6799 * Handle aborting the filesystem as the last thing during remount to
6800 * avoid obsure errors during remount when some option changes fail to
6801 * apply due to shutdown filesystem.
6802 */
6803 if (test_opt2(sb, ABORT))
6804 ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
6805
6806 return 0;
6807
6808 restore_opts:
6809 /*
6810 * If there was a failing r/w to ro transition, we may need to
6811 * re-enable quota
6812 */
6813 if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
6814 sb_any_quota_suspended(sb))
6815 dquot_resume(sb, -1);
6816
6817 alloc_ctx = ext4_writepages_down_write(sb);
6818 sb->s_flags = old_sb_flags;
6819 sbi->s_mount_opt = old_opts.s_mount_opt;
6820 sbi->s_mount_opt2 = old_opts.s_mount_opt2;
6821 sbi->s_resuid = old_opts.s_resuid;
6822 sbi->s_resgid = old_opts.s_resgid;
6823 sbi->s_commit_interval = old_opts.s_commit_interval;
6824 sbi->s_min_batch_time = old_opts.s_min_batch_time;
6825 sbi->s_max_batch_time = old_opts.s_max_batch_time;
6826 ext4_writepages_up_write(sb, alloc_ctx);
6827
6828 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6829 ext4_release_system_zone(sb);
6830 #ifdef CONFIG_QUOTA
6831 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
6832 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
6833 to_free[i] = get_qf_name(sb, sbi, i);
6834 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
6835 }
6836 synchronize_rcu();
6837 for (i = 0; i < EXT4_MAXQUOTAS; i++)
6838 kfree(to_free[i]);
6839 #endif
6840 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6841 ext4_stop_mmpd(sbi);
6842 return err;
6843 }
6844
ext4_reconfigure(struct fs_context * fc)6845 static int ext4_reconfigure(struct fs_context *fc)
6846 {
6847 struct super_block *sb = fc->root->d_sb;
6848 int ret;
6849 bool old_ro = sb_rdonly(sb);
6850
6851 fc->s_fs_info = EXT4_SB(sb);
6852
6853 ret = ext4_check_opt_consistency(fc, sb);
6854 if (ret < 0)
6855 return ret;
6856
6857 ret = __ext4_remount(fc, sb);
6858 if (ret < 0)
6859 return ret;
6860
6861 ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.",
6862 &sb->s_uuid,
6863 (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
6864
6865 return 0;
6866 }
6867
6868 #ifdef CONFIG_QUOTA
ext4_statfs_project(struct super_block * sb,kprojid_t projid,struct kstatfs * buf)6869 static int ext4_statfs_project(struct super_block *sb,
6870 kprojid_t projid, struct kstatfs *buf)
6871 {
6872 struct kqid qid;
6873 struct dquot *dquot;
6874 u64 limit;
6875 u64 curblock;
6876
6877 qid = make_kqid_projid(projid);
6878 dquot = dqget(sb, qid);
6879 if (IS_ERR(dquot))
6880 return PTR_ERR(dquot);
6881 spin_lock(&dquot->dq_dqb_lock);
6882
6883 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
6884 dquot->dq_dqb.dqb_bhardlimit);
6885 limit >>= sb->s_blocksize_bits;
6886
6887 if (limit) {
6888 uint64_t remaining = 0;
6889
6890 curblock = (dquot->dq_dqb.dqb_curspace +
6891 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
6892 if (limit > curblock)
6893 remaining = limit - curblock;
6894
6895 buf->f_blocks = min(buf->f_blocks, limit);
6896 buf->f_bfree = min(buf->f_bfree, remaining);
6897 buf->f_bavail = min(buf->f_bavail, remaining);
6898 }
6899
6900 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
6901 dquot->dq_dqb.dqb_ihardlimit);
6902 if (limit) {
6903 uint64_t remaining = 0;
6904
6905 if (limit > dquot->dq_dqb.dqb_curinodes)
6906 remaining = limit - dquot->dq_dqb.dqb_curinodes;
6907
6908 buf->f_files = min(buf->f_files, limit);
6909 buf->f_ffree = min(buf->f_ffree, remaining);
6910 }
6911
6912 spin_unlock(&dquot->dq_dqb_lock);
6913 dqput(dquot);
6914 return 0;
6915 }
6916 #endif
6917
ext4_statfs(struct dentry * dentry,struct kstatfs * buf)6918 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6919 {
6920 struct super_block *sb = dentry->d_sb;
6921 struct ext4_sb_info *sbi = EXT4_SB(sb);
6922 struct ext4_super_block *es = sbi->s_es;
6923 ext4_fsblk_t overhead = 0, resv_blocks;
6924 s64 bfree;
6925 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6926
6927 if (!test_opt(sb, MINIX_DF))
6928 overhead = sbi->s_overhead;
6929
6930 buf->f_type = EXT4_SUPER_MAGIC;
6931 buf->f_bsize = sb->s_blocksize;
6932 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6933 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
6934 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6935 /* prevent underflow in case that few free space is available */
6936 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
6937 buf->f_bavail = buf->f_bfree -
6938 (ext4_r_blocks_count(es) + resv_blocks);
6939 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6940 buf->f_bavail = 0;
6941 buf->f_files = le32_to_cpu(es->s_inodes_count);
6942 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6943 buf->f_namelen = EXT4_NAME_LEN;
6944 buf->f_fsid = uuid_to_fsid(es->s_uuid);
6945
6946 #ifdef CONFIG_QUOTA
6947 if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
6948 sb_has_quota_limits_enabled(sb, PRJQUOTA))
6949 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
6950 #endif
6951 return 0;
6952 }
6953
6954
6955 #ifdef CONFIG_QUOTA
6956
6957 /*
6958 * Helper functions so that transaction is started before we acquire dqio_sem
6959 * to keep correct lock ordering of transaction > dqio_sem
6960 */
dquot_to_inode(struct dquot * dquot)6961 static inline struct inode *dquot_to_inode(struct dquot *dquot)
6962 {
6963 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6964 }
6965
ext4_write_dquot(struct dquot * dquot)6966 static int ext4_write_dquot(struct dquot *dquot)
6967 {
6968 int ret, err;
6969 handle_t *handle;
6970 struct inode *inode;
6971
6972 inode = dquot_to_inode(dquot);
6973 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
6974 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
6975 if (IS_ERR(handle))
6976 return PTR_ERR(handle);
6977 ret = dquot_commit(dquot);
6978 if (ret < 0)
6979 ext4_error_err(dquot->dq_sb, -ret,
6980 "Failed to commit dquot type %d",
6981 dquot->dq_id.type);
6982 err = ext4_journal_stop(handle);
6983 if (!ret)
6984 ret = err;
6985 return ret;
6986 }
6987
ext4_acquire_dquot(struct dquot * dquot)6988 static int ext4_acquire_dquot(struct dquot *dquot)
6989 {
6990 int ret, err;
6991 handle_t *handle;
6992
6993 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
6994 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
6995 if (IS_ERR(handle))
6996 return PTR_ERR(handle);
6997 ret = dquot_acquire(dquot);
6998 if (ret < 0)
6999 ext4_error_err(dquot->dq_sb, -ret,
7000 "Failed to acquire dquot type %d",
7001 dquot->dq_id.type);
7002 err = ext4_journal_stop(handle);
7003 if (!ret)
7004 ret = err;
7005 return ret;
7006 }
7007
ext4_release_dquot(struct dquot * dquot)7008 static int ext4_release_dquot(struct dquot *dquot)
7009 {
7010 int ret, err;
7011 handle_t *handle;
7012 bool freeze_protected = false;
7013
7014 /*
7015 * Trying to sb_start_intwrite() in a running transaction
7016 * can result in a deadlock. Further, running transactions
7017 * are already protected from freezing.
7018 */
7019 if (!ext4_journal_current_handle()) {
7020 sb_start_intwrite(dquot->dq_sb);
7021 freeze_protected = true;
7022 }
7023
7024 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
7025 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
7026 if (IS_ERR(handle)) {
7027 /* Release dquot anyway to avoid endless cycle in dqput() */
7028 dquot_release(dquot);
7029 if (freeze_protected)
7030 sb_end_intwrite(dquot->dq_sb);
7031 return PTR_ERR(handle);
7032 }
7033 ret = dquot_release(dquot);
7034 if (ret < 0)
7035 ext4_error_err(dquot->dq_sb, -ret,
7036 "Failed to release dquot type %d",
7037 dquot->dq_id.type);
7038 err = ext4_journal_stop(handle);
7039 if (!ret)
7040 ret = err;
7041
7042 if (freeze_protected)
7043 sb_end_intwrite(dquot->dq_sb);
7044
7045 return ret;
7046 }
7047
ext4_mark_dquot_dirty(struct dquot * dquot)7048 static int ext4_mark_dquot_dirty(struct dquot *dquot)
7049 {
7050 struct super_block *sb = dquot->dq_sb;
7051
7052 if (ext4_is_quota_journalled(sb)) {
7053 dquot_mark_dquot_dirty(dquot);
7054 return ext4_write_dquot(dquot);
7055 } else {
7056 return dquot_mark_dquot_dirty(dquot);
7057 }
7058 }
7059
ext4_write_info(struct super_block * sb,int type)7060 static int ext4_write_info(struct super_block *sb, int type)
7061 {
7062 int ret, err;
7063 handle_t *handle;
7064
7065 /* Data block + inode block */
7066 handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
7067 if (IS_ERR(handle))
7068 return PTR_ERR(handle);
7069 ret = dquot_commit_info(sb, type);
7070 err = ext4_journal_stop(handle);
7071 if (!ret)
7072 ret = err;
7073 return ret;
7074 }
7075
lockdep_set_quota_inode(struct inode * inode,int subclass)7076 static void lockdep_set_quota_inode(struct inode *inode, int subclass)
7077 {
7078 struct ext4_inode_info *ei = EXT4_I(inode);
7079
7080 /* The first argument of lockdep_set_subclass has to be
7081 * *exactly* the same as the argument to init_rwsem() --- in
7082 * this case, in init_once() --- or lockdep gets unhappy
7083 * because the name of the lock is set using the
7084 * stringification of the argument to init_rwsem().
7085 */
7086 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
7087 lockdep_set_subclass(&ei->i_data_sem, subclass);
7088 }
7089
7090 /*
7091 * Standard function to be called on quota_on
7092 */
ext4_quota_on(struct super_block * sb,int type,int format_id,const struct path * path)7093 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
7094 const struct path *path)
7095 {
7096 int err;
7097
7098 if (!test_opt(sb, QUOTA))
7099 return -EINVAL;
7100
7101 /* Quotafile not on the same filesystem? */
7102 if (path->dentry->d_sb != sb)
7103 return -EXDEV;
7104
7105 /* Quota already enabled for this file? */
7106 if (IS_NOQUOTA(d_inode(path->dentry)))
7107 return -EBUSY;
7108
7109 /* Journaling quota? */
7110 if (EXT4_SB(sb)->s_qf_names[type]) {
7111 /* Quotafile not in fs root? */
7112 if (path->dentry->d_parent != sb->s_root)
7113 ext4_msg(sb, KERN_WARNING,
7114 "Quota file not on filesystem root. "
7115 "Journaled quota will not work");
7116 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
7117 } else {
7118 /*
7119 * Clear the flag just in case mount options changed since
7120 * last time.
7121 */
7122 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
7123 }
7124
7125 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
7126 err = dquot_quota_on(sb, type, format_id, path);
7127 if (!err) {
7128 struct inode *inode = d_inode(path->dentry);
7129 handle_t *handle;
7130
7131 /*
7132 * Set inode flags to prevent userspace from messing with quota
7133 * files. If this fails, we return success anyway since quotas
7134 * are already enabled and this is not a hard failure.
7135 */
7136 inode_lock(inode);
7137 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
7138 if (IS_ERR(handle))
7139 goto unlock_inode;
7140 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
7141 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
7142 S_NOATIME | S_IMMUTABLE);
7143 err = ext4_mark_inode_dirty(handle, inode);
7144 ext4_journal_stop(handle);
7145 unlock_inode:
7146 inode_unlock(inode);
7147 if (err)
7148 dquot_quota_off(sb, type);
7149 }
7150 if (err)
7151 lockdep_set_quota_inode(path->dentry->d_inode,
7152 I_DATA_SEM_NORMAL);
7153 return err;
7154 }
7155
ext4_check_quota_inum(int type,unsigned long qf_inum)7156 static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
7157 {
7158 switch (type) {
7159 case USRQUOTA:
7160 return qf_inum == EXT4_USR_QUOTA_INO;
7161 case GRPQUOTA:
7162 return qf_inum == EXT4_GRP_QUOTA_INO;
7163 case PRJQUOTA:
7164 return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
7165 default:
7166 BUG();
7167 }
7168 }
7169
ext4_quota_enable(struct super_block * sb,int type,int format_id,unsigned int flags)7170 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
7171 unsigned int flags)
7172 {
7173 int err;
7174 struct inode *qf_inode;
7175 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
7176 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
7177 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
7178 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
7179 };
7180
7181 BUG_ON(!ext4_has_feature_quota(sb));
7182
7183 if (!qf_inums[type])
7184 return -EPERM;
7185
7186 if (!ext4_check_quota_inum(type, qf_inums[type])) {
7187 ext4_error(sb, "Bad quota inum: %lu, type: %d",
7188 qf_inums[type], type);
7189 return -EUCLEAN;
7190 }
7191
7192 qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
7193 if (IS_ERR(qf_inode)) {
7194 ext4_error(sb, "Bad quota inode: %lu, type: %d",
7195 qf_inums[type], type);
7196 return PTR_ERR(qf_inode);
7197 }
7198
7199 /* Don't account quota for quota files to avoid recursion */
7200 qf_inode->i_flags |= S_NOQUOTA;
7201 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
7202 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
7203 if (err)
7204 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
7205 iput(qf_inode);
7206
7207 return err;
7208 }
7209
7210 /* Enable usage tracking for all quota types. */
ext4_enable_quotas(struct super_block * sb)7211 int ext4_enable_quotas(struct super_block *sb)
7212 {
7213 int type, err = 0;
7214 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
7215 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
7216 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
7217 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
7218 };
7219 bool quota_mopt[EXT4_MAXQUOTAS] = {
7220 test_opt(sb, USRQUOTA),
7221 test_opt(sb, GRPQUOTA),
7222 test_opt(sb, PRJQUOTA),
7223 };
7224
7225 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
7226 for (type = 0; type < EXT4_MAXQUOTAS; type++) {
7227 if (qf_inums[type]) {
7228 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
7229 DQUOT_USAGE_ENABLED |
7230 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
7231 if (err) {
7232 ext4_warning(sb,
7233 "Failed to enable quota tracking "
7234 "(type=%d, err=%d, ino=%lu). "
7235 "Please run e2fsck to fix.", type,
7236 err, qf_inums[type]);
7237
7238 ext4_quotas_off(sb, type);
7239 return err;
7240 }
7241 }
7242 }
7243 return 0;
7244 }
7245
ext4_quota_off(struct super_block * sb,int type)7246 static int ext4_quota_off(struct super_block *sb, int type)
7247 {
7248 struct inode *inode = sb_dqopt(sb)->files[type];
7249 handle_t *handle;
7250 int err;
7251
7252 /* Force all delayed allocation blocks to be allocated.
7253 * Caller already holds s_umount sem */
7254 if (test_opt(sb, DELALLOC))
7255 sync_filesystem(sb);
7256
7257 if (!inode || !igrab(inode))
7258 goto out;
7259
7260 err = dquot_quota_off(sb, type);
7261 if (err || ext4_has_feature_quota(sb))
7262 goto out_put;
7263 /*
7264 * When the filesystem was remounted read-only first, we cannot cleanup
7265 * inode flags here. Bad luck but people should be using QUOTA feature
7266 * these days anyway.
7267 */
7268 if (sb_rdonly(sb))
7269 goto out_put;
7270
7271 inode_lock(inode);
7272 /*
7273 * Update modification times of quota files when userspace can
7274 * start looking at them. If we fail, we return success anyway since
7275 * this is not a hard failure and quotas are already disabled.
7276 */
7277 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
7278 if (IS_ERR(handle)) {
7279 err = PTR_ERR(handle);
7280 goto out_unlock;
7281 }
7282 EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
7283 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
7284 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
7285 err = ext4_mark_inode_dirty(handle, inode);
7286 ext4_journal_stop(handle);
7287 out_unlock:
7288 inode_unlock(inode);
7289 out_put:
7290 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
7291 iput(inode);
7292 return err;
7293 out:
7294 return dquot_quota_off(sb, type);
7295 }
7296
7297 /* Read data from quotafile - avoid pagecache and such because we cannot afford
7298 * acquiring the locks... As quota files are never truncated and quota code
7299 * itself serializes the operations (and no one else should touch the files)
7300 * we don't have to be afraid of races */
ext4_quota_read(struct super_block * sb,int type,char * data,size_t len,loff_t off)7301 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
7302 size_t len, loff_t off)
7303 {
7304 struct inode *inode = sb_dqopt(sb)->files[type];
7305 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
7306 int offset = off & (sb->s_blocksize - 1);
7307 int tocopy;
7308 size_t toread;
7309 struct buffer_head *bh;
7310 loff_t i_size = i_size_read(inode);
7311
7312 if (off > i_size)
7313 return 0;
7314 if (off+len > i_size)
7315 len = i_size-off;
7316 toread = len;
7317 while (toread > 0) {
7318 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
7319 bh = ext4_bread(NULL, inode, blk, 0);
7320 if (IS_ERR(bh))
7321 return PTR_ERR(bh);
7322 if (!bh) /* A hole? */
7323 memset(data, 0, tocopy);
7324 else
7325 memcpy(data, bh->b_data+offset, tocopy);
7326 brelse(bh);
7327 offset = 0;
7328 toread -= tocopy;
7329 data += tocopy;
7330 blk++;
7331 }
7332 return len;
7333 }
7334
7335 /* Write to quotafile (we know the transaction is already started and has
7336 * enough credits) */
ext4_quota_write(struct super_block * sb,int type,const char * data,size_t len,loff_t off)7337 static ssize_t ext4_quota_write(struct super_block *sb, int type,
7338 const char *data, size_t len, loff_t off)
7339 {
7340 struct inode *inode = sb_dqopt(sb)->files[type];
7341 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
7342 int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
7343 int retries = 0;
7344 struct buffer_head *bh;
7345 handle_t *handle = journal_current_handle();
7346
7347 if (!handle) {
7348 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
7349 " cancelled because transaction is not started",
7350 (unsigned long long)off, (unsigned long long)len);
7351 return -EIO;
7352 }
7353 /*
7354 * Since we account only one data block in transaction credits,
7355 * then it is impossible to cross a block boundary.
7356 */
7357 if (sb->s_blocksize - offset < len) {
7358 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
7359 " cancelled because not block aligned",
7360 (unsigned long long)off, (unsigned long long)len);
7361 return -EIO;
7362 }
7363
7364 do {
7365 bh = ext4_bread(handle, inode, blk,
7366 EXT4_GET_BLOCKS_CREATE |
7367 EXT4_GET_BLOCKS_METADATA_NOFAIL);
7368 } while (PTR_ERR(bh) == -ENOSPC &&
7369 ext4_should_retry_alloc(inode->i_sb, &retries));
7370 if (IS_ERR(bh))
7371 return PTR_ERR(bh);
7372 if (!bh)
7373 goto out;
7374 BUFFER_TRACE(bh, "get write access");
7375 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
7376 if (err) {
7377 brelse(bh);
7378 return err;
7379 }
7380 lock_buffer(bh);
7381 memcpy(bh->b_data+offset, data, len);
7382 flush_dcache_folio(bh->b_folio);
7383 unlock_buffer(bh);
7384 err = ext4_handle_dirty_metadata(handle, NULL, bh);
7385 brelse(bh);
7386 out:
7387 if (inode->i_size < off + len) {
7388 i_size_write(inode, off + len);
7389 EXT4_I(inode)->i_disksize = inode->i_size;
7390 err2 = ext4_mark_inode_dirty(handle, inode);
7391 if (unlikely(err2 && !err))
7392 err = err2;
7393 }
7394 return err ? err : len;
7395 }
7396 #endif
7397
7398 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
register_as_ext2(void)7399 static inline void register_as_ext2(void)
7400 {
7401 int err = register_filesystem(&ext2_fs_type);
7402 if (err)
7403 printk(KERN_WARNING
7404 "EXT4-fs: Unable to register as ext2 (%d)\n", err);
7405 }
7406
unregister_as_ext2(void)7407 static inline void unregister_as_ext2(void)
7408 {
7409 unregister_filesystem(&ext2_fs_type);
7410 }
7411
ext2_feature_set_ok(struct super_block * sb)7412 static inline int ext2_feature_set_ok(struct super_block *sb)
7413 {
7414 if (ext4_has_unknown_ext2_incompat_features(sb))
7415 return 0;
7416 if (sb_rdonly(sb))
7417 return 1;
7418 if (ext4_has_unknown_ext2_ro_compat_features(sb))
7419 return 0;
7420 return 1;
7421 }
7422 #else
register_as_ext2(void)7423 static inline void register_as_ext2(void) { }
unregister_as_ext2(void)7424 static inline void unregister_as_ext2(void) { }
ext2_feature_set_ok(struct super_block * sb)7425 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
7426 #endif
7427
register_as_ext3(void)7428 static inline void register_as_ext3(void)
7429 {
7430 int err = register_filesystem(&ext3_fs_type);
7431 if (err)
7432 printk(KERN_WARNING
7433 "EXT4-fs: Unable to register as ext3 (%d)\n", err);
7434 }
7435
unregister_as_ext3(void)7436 static inline void unregister_as_ext3(void)
7437 {
7438 unregister_filesystem(&ext3_fs_type);
7439 }
7440
ext3_feature_set_ok(struct super_block * sb)7441 static inline int ext3_feature_set_ok(struct super_block *sb)
7442 {
7443 if (ext4_has_unknown_ext3_incompat_features(sb))
7444 return 0;
7445 if (!ext4_has_feature_journal(sb))
7446 return 0;
7447 if (sb_rdonly(sb))
7448 return 1;
7449 if (ext4_has_unknown_ext3_ro_compat_features(sb))
7450 return 0;
7451 return 1;
7452 }
7453
ext4_kill_sb(struct super_block * sb)7454 static void ext4_kill_sb(struct super_block *sb)
7455 {
7456 struct ext4_sb_info *sbi = EXT4_SB(sb);
7457 struct file *bdev_file = sbi ? sbi->s_journal_bdev_file : NULL;
7458
7459 kill_block_super(sb);
7460
7461 if (bdev_file)
7462 bdev_fput(bdev_file);
7463 }
7464
7465 static struct file_system_type ext4_fs_type = {
7466 .owner = THIS_MODULE,
7467 .name = "ext4",
7468 .init_fs_context = ext4_init_fs_context,
7469 .parameters = ext4_param_specs,
7470 .kill_sb = ext4_kill_sb,
7471 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
7472 FS_LBS,
7473 };
7474 MODULE_ALIAS_FS("ext4");
7475
ext4_init_fs(void)7476 static int __init ext4_init_fs(void)
7477 {
7478 int err;
7479
7480 ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
7481 ext4_li_info = NULL;
7482
7483 /* Build-time check for flags consistency */
7484 ext4_check_flag_values();
7485
7486 err = ext4_init_es();
7487 if (err)
7488 return err;
7489
7490 err = ext4_init_pending();
7491 if (err)
7492 goto out7;
7493
7494 err = ext4_init_post_read_processing();
7495 if (err)
7496 goto out6;
7497
7498 err = ext4_init_pageio();
7499 if (err)
7500 goto out5;
7501
7502 err = ext4_init_system_zone();
7503 if (err)
7504 goto out4;
7505
7506 err = ext4_init_sysfs();
7507 if (err)
7508 goto out3;
7509
7510 err = ext4_init_mballoc();
7511 if (err)
7512 goto out2;
7513 err = init_inodecache();
7514 if (err)
7515 goto out1;
7516
7517 err = ext4_fc_init_dentry_cache();
7518 if (err)
7519 goto out05;
7520
7521 register_as_ext3();
7522 register_as_ext2();
7523 err = register_filesystem(&ext4_fs_type);
7524 if (err)
7525 goto out;
7526
7527 return 0;
7528 out:
7529 unregister_as_ext2();
7530 unregister_as_ext3();
7531 ext4_fc_destroy_dentry_cache();
7532 out05:
7533 destroy_inodecache();
7534 out1:
7535 ext4_exit_mballoc();
7536 out2:
7537 ext4_exit_sysfs();
7538 out3:
7539 ext4_exit_system_zone();
7540 out4:
7541 ext4_exit_pageio();
7542 out5:
7543 ext4_exit_post_read_processing();
7544 out6:
7545 ext4_exit_pending();
7546 out7:
7547 ext4_exit_es();
7548
7549 return err;
7550 }
7551
ext4_exit_fs(void)7552 static void __exit ext4_exit_fs(void)
7553 {
7554 ext4_destroy_lazyinit_thread();
7555 unregister_as_ext2();
7556 unregister_as_ext3();
7557 unregister_filesystem(&ext4_fs_type);
7558 ext4_fc_destroy_dentry_cache();
7559 destroy_inodecache();
7560 ext4_exit_mballoc();
7561 ext4_exit_sysfs();
7562 ext4_exit_system_zone();
7563 ext4_exit_pageio();
7564 ext4_exit_post_read_processing();
7565 ext4_exit_es();
7566 ext4_exit_pending();
7567 }
7568
7569 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
7570 MODULE_DESCRIPTION("Fourth Extended Filesystem");
7571 MODULE_LICENSE("GPL");
7572 module_init(ext4_init_fs)
7573 module_exit(ext4_exit_fs)
7574