1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/super.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * Big-endian to little-endian byte-swapping/bitmaps by
17 * David S. Miller (davem@caip.rutgers.edu), 1995
18 */
19
20 #include <linux/module.h>
21 #include <linux/string.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/vmalloc.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/parser.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/vfs.h>
33 #include <linux/random.h>
34 #include <linux/mount.h>
35 #include <linux/namei.h>
36 #include <linux/quotaops.h>
37 #include <linux/seq_file.h>
38 #include <linux/ctype.h>
39 #include <linux/log2.h>
40 #include <linux/crc16.h>
41 #include <linux/dax.h>
42 #include <linux/uaccess.h>
43 #include <linux/iversion.h>
44 #include <linux/unicode.h>
45 #include <linux/part_stat.h>
46 #include <linux/kthread.h>
47 #include <linux/freezer.h>
48 #include <linux/fsnotify.h>
49 #include <linux/fs_context.h>
50 #include <linux/fs_parser.h>
51 #include <linux/fserror.h>
52
53 #include "ext4.h"
54 #include "ext4_extents.h" /* Needed for trace points definition */
55 #include "ext4_jbd2.h"
56 #include "xattr.h"
57 #include "acl.h"
58 #include "mballoc.h"
59 #include "fsmap.h"
60
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/ext4.h>
63
64 static struct ext4_lazy_init *ext4_li_info;
65 static DEFINE_MUTEX(ext4_li_mtx);
66 static struct ratelimit_state ext4_mount_msg_ratelimit;
67
68 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
69 unsigned long journal_devnum);
70 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
71 static void ext4_update_super(struct super_block *sb);
72 static int ext4_commit_super(struct super_block *sb);
73 static int ext4_mark_recovery_complete(struct super_block *sb,
74 struct ext4_super_block *es);
75 static int ext4_clear_journal_err(struct super_block *sb,
76 struct ext4_super_block *es);
77 static int ext4_sync_fs(struct super_block *sb, int wait);
78 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
79 static int ext4_unfreeze(struct super_block *sb);
80 static int ext4_freeze(struct super_block *sb);
81 static inline int ext2_feature_set_ok(struct super_block *sb);
82 static inline int ext3_feature_set_ok(struct super_block *sb);
83 static void ext4_unregister_li_request(struct super_block *sb);
84 static void ext4_clear_request_list(void);
85 static struct inode *ext4_get_journal_inode(struct super_block *sb,
86 unsigned int journal_inum);
87 static int ext4_validate_options(struct fs_context *fc);
88 static int ext4_check_opt_consistency(struct fs_context *fc,
89 struct super_block *sb);
90 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb);
91 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param);
92 static int ext4_get_tree(struct fs_context *fc);
93 static int ext4_reconfigure(struct fs_context *fc);
94 static void ext4_fc_free(struct fs_context *fc);
95 static int ext4_init_fs_context(struct fs_context *fc);
96 static void ext4_kill_sb(struct super_block *sb);
97 static const struct fs_parameter_spec ext4_param_specs[];
98
99 /*
100 * Lock ordering
101 *
102 * page fault path:
103 * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start
104 * -> page lock -> i_data_sem (rw)
105 *
106 * buffered write path:
107 * sb_start_write -> i_mutex -> mmap_lock
108 * sb_start_write -> i_mutex -> transaction start -> page lock ->
109 * i_data_sem (rw)
110 *
111 * truncate:
112 * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) ->
113 * page lock
114 * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start ->
115 * i_data_sem (rw)
116 *
117 * direct IO:
118 * sb_start_write -> i_mutex -> mmap_lock
119 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
120 *
121 * writepages:
122 * transaction start -> page lock(s) -> i_data_sem (rw)
123 */
124
125 static const struct fs_context_operations ext4_context_ops = {
126 .parse_param = ext4_parse_param,
127 .get_tree = ext4_get_tree,
128 .reconfigure = ext4_reconfigure,
129 .free = ext4_fc_free,
130 };
131
132
133 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
134 static struct file_system_type ext2_fs_type = {
135 .owner = THIS_MODULE,
136 .name = "ext2",
137 .init_fs_context = ext4_init_fs_context,
138 .parameters = ext4_param_specs,
139 .kill_sb = ext4_kill_sb,
140 .fs_flags = FS_REQUIRES_DEV,
141 };
142 MODULE_ALIAS_FS("ext2");
143 MODULE_ALIAS("ext2");
144 #define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
145 #else
146 #define IS_EXT2_SB(sb) (0)
147 #endif
148
149
150 static struct file_system_type ext3_fs_type = {
151 .owner = THIS_MODULE,
152 .name = "ext3",
153 .init_fs_context = ext4_init_fs_context,
154 .parameters = ext4_param_specs,
155 .kill_sb = ext4_kill_sb,
156 .fs_flags = FS_REQUIRES_DEV,
157 };
158 MODULE_ALIAS_FS("ext3");
159 MODULE_ALIAS("ext3");
160 #define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
161
162
__ext4_read_bh(struct buffer_head * bh,blk_opf_t op_flags,bh_end_io_t * end_io,bool simu_fail)163 static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
164 bh_end_io_t *end_io, bool simu_fail)
165 {
166 if (simu_fail) {
167 clear_buffer_uptodate(bh);
168 unlock_buffer(bh);
169 return;
170 }
171
172 /*
173 * buffer's verified bit is no longer valid after reading from
174 * disk again due to write out error, clear it to make sure we
175 * recheck the buffer contents.
176 */
177 clear_buffer_verified(bh);
178
179 bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
180 get_bh(bh);
181 submit_bh(REQ_OP_READ | op_flags, bh);
182 }
183
ext4_read_bh_nowait(struct buffer_head * bh,blk_opf_t op_flags,bh_end_io_t * end_io,bool simu_fail)184 void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
185 bh_end_io_t *end_io, bool simu_fail)
186 {
187 BUG_ON(!buffer_locked(bh));
188
189 if (ext4_buffer_uptodate(bh)) {
190 unlock_buffer(bh);
191 return;
192 }
193 __ext4_read_bh(bh, op_flags, end_io, simu_fail);
194 }
195
ext4_read_bh(struct buffer_head * bh,blk_opf_t op_flags,bh_end_io_t * end_io,bool simu_fail)196 int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
197 bh_end_io_t *end_io, bool simu_fail)
198 {
199 BUG_ON(!buffer_locked(bh));
200
201 if (ext4_buffer_uptodate(bh)) {
202 unlock_buffer(bh);
203 return 0;
204 }
205
206 __ext4_read_bh(bh, op_flags, end_io, simu_fail);
207
208 wait_on_buffer(bh);
209 if (buffer_uptodate(bh))
210 return 0;
211 return -EIO;
212 }
213
ext4_read_bh_lock(struct buffer_head * bh,blk_opf_t op_flags,bool wait)214 int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
215 {
216 lock_buffer(bh);
217 if (!wait) {
218 ext4_read_bh_nowait(bh, op_flags, NULL, false);
219 return 0;
220 }
221 return ext4_read_bh(bh, op_flags, NULL, false);
222 }
223
224 /*
225 * This works like __bread_gfp() except it uses ERR_PTR for error
226 * returns. Currently with sb_bread it's impossible to distinguish
227 * between ENOMEM and EIO situations (since both result in a NULL
228 * return.
229 */
__ext4_sb_bread_gfp(struct super_block * sb,sector_t block,blk_opf_t op_flags,gfp_t gfp)230 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
231 sector_t block,
232 blk_opf_t op_flags, gfp_t gfp)
233 {
234 struct buffer_head *bh;
235 int ret;
236
237 bh = sb_getblk_gfp(sb, block, gfp);
238 if (bh == NULL)
239 return ERR_PTR(-ENOMEM);
240 if (ext4_buffer_uptodate(bh))
241 return bh;
242
243 ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
244 if (ret) {
245 put_bh(bh);
246 return ERR_PTR(ret);
247 }
248 return bh;
249 }
250
ext4_sb_bread(struct super_block * sb,sector_t block,blk_opf_t op_flags)251 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
252 blk_opf_t op_flags)
253 {
254 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
255 ~__GFP_FS) | __GFP_MOVABLE;
256
257 return __ext4_sb_bread_gfp(sb, block, op_flags, gfp);
258 }
259
ext4_sb_bread_unmovable(struct super_block * sb,sector_t block)260 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
261 sector_t block)
262 {
263 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
264 ~__GFP_FS);
265
266 return __ext4_sb_bread_gfp(sb, block, 0, gfp);
267 }
268
ext4_sb_bread_nofail(struct super_block * sb,sector_t block)269 struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb,
270 sector_t block)
271 {
272 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
273 ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL;
274
275 return __ext4_sb_bread_gfp(sb, block, 0, gfp);
276 }
277
ext4_sb_breadahead_unmovable(struct super_block * sb,sector_t block)278 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block)
279 {
280 struct buffer_head *bh = bdev_getblk(sb->s_bdev, block,
281 sb->s_blocksize, GFP_NOWAIT);
282
283 if (likely(bh)) {
284 if (trylock_buffer(bh))
285 ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL, false);
286 brelse(bh);
287 }
288 }
289
ext4_verify_csum_type(struct super_block * sb,struct ext4_super_block * es)290 static int ext4_verify_csum_type(struct super_block *sb,
291 struct ext4_super_block *es)
292 {
293 if (!ext4_has_feature_metadata_csum(sb))
294 return 1;
295
296 return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
297 }
298
ext4_superblock_csum(struct ext4_super_block * es)299 __le32 ext4_superblock_csum(struct ext4_super_block *es)
300 {
301 int offset = offsetof(struct ext4_super_block, s_checksum);
302 __u32 csum;
303
304 csum = ext4_chksum(~0, (char *)es, offset);
305
306 return cpu_to_le32(csum);
307 }
308
ext4_superblock_csum_verify(struct super_block * sb,struct ext4_super_block * es)309 static int ext4_superblock_csum_verify(struct super_block *sb,
310 struct ext4_super_block *es)
311 {
312 if (!ext4_has_feature_metadata_csum(sb))
313 return 1;
314
315 return es->s_checksum == ext4_superblock_csum(es);
316 }
317
ext4_superblock_csum_set(struct super_block * sb)318 void ext4_superblock_csum_set(struct super_block *sb)
319 {
320 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
321
322 if (!ext4_has_feature_metadata_csum(sb))
323 return;
324
325 es->s_checksum = ext4_superblock_csum(es);
326 }
327
ext4_block_bitmap(struct super_block * sb,struct ext4_group_desc * bg)328 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
329 struct ext4_group_desc *bg)
330 {
331 return le32_to_cpu(bg->bg_block_bitmap_lo) |
332 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
333 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
334 }
335
ext4_inode_bitmap(struct super_block * sb,struct ext4_group_desc * bg)336 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
337 struct ext4_group_desc *bg)
338 {
339 return le32_to_cpu(bg->bg_inode_bitmap_lo) |
340 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
341 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
342 }
343
ext4_inode_table(struct super_block * sb,struct ext4_group_desc * bg)344 ext4_fsblk_t ext4_inode_table(struct super_block *sb,
345 struct ext4_group_desc *bg)
346 {
347 return le32_to_cpu(bg->bg_inode_table_lo) |
348 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
349 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
350 }
351
ext4_free_group_clusters(struct super_block * sb,struct ext4_group_desc * bg)352 __u32 ext4_free_group_clusters(struct super_block *sb,
353 struct ext4_group_desc *bg)
354 {
355 return le16_to_cpu(bg->bg_free_blocks_count_lo) |
356 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
357 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
358 }
359
ext4_free_inodes_count(struct super_block * sb,struct ext4_group_desc * bg)360 __u32 ext4_free_inodes_count(struct super_block *sb,
361 struct ext4_group_desc *bg)
362 {
363 return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) |
364 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
365 (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0);
366 }
367
ext4_used_dirs_count(struct super_block * sb,struct ext4_group_desc * bg)368 __u32 ext4_used_dirs_count(struct super_block *sb,
369 struct ext4_group_desc *bg)
370 {
371 return le16_to_cpu(bg->bg_used_dirs_count_lo) |
372 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
373 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
374 }
375
ext4_itable_unused_count(struct super_block * sb,struct ext4_group_desc * bg)376 __u32 ext4_itable_unused_count(struct super_block *sb,
377 struct ext4_group_desc *bg)
378 {
379 return le16_to_cpu(bg->bg_itable_unused_lo) |
380 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
381 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
382 }
383
ext4_block_bitmap_set(struct super_block * sb,struct ext4_group_desc * bg,ext4_fsblk_t blk)384 void ext4_block_bitmap_set(struct super_block *sb,
385 struct ext4_group_desc *bg, ext4_fsblk_t blk)
386 {
387 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
388 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
389 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
390 }
391
ext4_inode_bitmap_set(struct super_block * sb,struct ext4_group_desc * bg,ext4_fsblk_t blk)392 void ext4_inode_bitmap_set(struct super_block *sb,
393 struct ext4_group_desc *bg, ext4_fsblk_t blk)
394 {
395 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
396 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
397 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
398 }
399
ext4_inode_table_set(struct super_block * sb,struct ext4_group_desc * bg,ext4_fsblk_t blk)400 void ext4_inode_table_set(struct super_block *sb,
401 struct ext4_group_desc *bg, ext4_fsblk_t blk)
402 {
403 bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
404 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
405 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
406 }
407
ext4_free_group_clusters_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)408 void ext4_free_group_clusters_set(struct super_block *sb,
409 struct ext4_group_desc *bg, __u32 count)
410 {
411 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
412 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
413 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
414 }
415
ext4_free_inodes_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)416 void ext4_free_inodes_set(struct super_block *sb,
417 struct ext4_group_desc *bg, __u32 count)
418 {
419 WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count));
420 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
421 WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16));
422 }
423
ext4_used_dirs_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)424 void ext4_used_dirs_set(struct super_block *sb,
425 struct ext4_group_desc *bg, __u32 count)
426 {
427 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
428 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
429 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
430 }
431
ext4_itable_unused_set(struct super_block * sb,struct ext4_group_desc * bg,__u32 count)432 void ext4_itable_unused_set(struct super_block *sb,
433 struct ext4_group_desc *bg, __u32 count)
434 {
435 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
436 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
437 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
438 }
439
__ext4_update_tstamp(__le32 * lo,__u8 * hi,time64_t now)440 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now)
441 {
442 now = clamp_val(now, 0, (1ull << 40) - 1);
443
444 *lo = cpu_to_le32(lower_32_bits(now));
445 *hi = upper_32_bits(now);
446 }
447
__ext4_get_tstamp(__le32 * lo,__u8 * hi)448 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
449 {
450 return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
451 }
452 #define ext4_update_tstamp(es, tstamp) \
453 __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \
454 ktime_get_real_seconds())
455 #define ext4_get_tstamp(es, tstamp) \
456 __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
457
458 /*
459 * The ext4_maybe_update_superblock() function checks and updates the
460 * superblock if needed.
461 *
462 * This function is designed to update the on-disk superblock only under
463 * certain conditions to prevent excessive disk writes and unnecessary
464 * waking of the disk from sleep. The superblock will be updated if:
465 * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last
466 * superblock update
467 * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the
468 * last superblock update.
469 *
470 * @sb: The superblock
471 */
ext4_maybe_update_superblock(struct super_block * sb)472 static void ext4_maybe_update_superblock(struct super_block *sb)
473 {
474 struct ext4_sb_info *sbi = EXT4_SB(sb);
475 struct ext4_super_block *es = sbi->s_es;
476 journal_t *journal = sbi->s_journal;
477 time64_t now;
478 __u64 last_update;
479 __u64 lifetime_write_kbytes;
480 __u64 diff_size;
481
482 if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
483 !(sb->s_flags & SB_ACTIVE) || !journal ||
484 journal->j_flags & JBD2_UNMOUNT)
485 return;
486
487 now = ktime_get_real_seconds();
488 last_update = ext4_get_tstamp(es, s_wtime);
489
490 if (likely(now - last_update < sbi->s_sb_update_sec))
491 return;
492
493 lifetime_write_kbytes = sbi->s_kbytes_written +
494 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
495 sbi->s_sectors_written_start) >> 1);
496
497 /* Get the number of kilobytes not written to disk to account
498 * for statistics and compare with a multiple of 16 MB. This
499 * is used to determine when the next superblock commit should
500 * occur (i.e. not more often than once per 16MB if there was
501 * less written in an hour).
502 */
503 diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written);
504
505 if (diff_size > sbi->s_sb_update_kb)
506 schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
507 }
508
ext4_journal_commit_callback(journal_t * journal,transaction_t * txn)509 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
510 {
511 struct super_block *sb = journal->j_private;
512
513 BUG_ON(txn->t_state == T_FINISHED);
514
515 ext4_process_freed_data(sb, txn->t_tid);
516 ext4_maybe_update_superblock(sb);
517 }
518
ext4_journalled_writepage_needs_redirty(struct jbd2_inode * jinode,struct folio * folio)519 static bool ext4_journalled_writepage_needs_redirty(struct jbd2_inode *jinode,
520 struct folio *folio)
521 {
522 struct buffer_head *bh, *head;
523 struct journal_head *jh;
524 transaction_t *trans = READ_ONCE(jinode->i_transaction);
525
526 bh = head = folio_buffers(folio);
527 do {
528 /*
529 * We have to redirty a page in these cases:
530 * 1) If buffer is dirty, it means the page was dirty because it
531 * contains a buffer that needs checkpointing. So the dirty bit
532 * needs to be preserved so that checkpointing writes the buffer
533 * properly.
534 * 2) If buffer is not part of the committing transaction
535 * (we may have just accidentally come across this buffer because
536 * inode range tracking is not exact) or if the currently running
537 * transaction already contains this buffer as well, dirty bit
538 * needs to be preserved so that the buffer gets writeprotected
539 * properly on running transaction's commit.
540 */
541 jh = bh2jh(bh);
542 if (buffer_dirty(bh) ||
543 (jh && (jh->b_transaction != trans ||
544 jh->b_next_transaction)))
545 return true;
546 } while ((bh = bh->b_this_page) != head);
547
548 return false;
549 }
550
ext4_journalled_submit_inode_data_buffers(struct jbd2_inode * jinode)551 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode)
552 {
553 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
554 loff_t range_start, range_end;
555 struct writeback_control wbc = {
556 .sync_mode = WB_SYNC_ALL,
557 .nr_to_write = LONG_MAX,
558 };
559 struct folio *folio = NULL;
560 int error;
561
562 if (!jbd2_jinode_get_dirty_range(jinode, &range_start, &range_end))
563 return 0;
564
565 wbc.range_start = range_start;
566 wbc.range_end = range_end;
567
568 /*
569 * writeback_iter() already checks for dirty pages and calls
570 * folio_clear_dirty_for_io(), which we want to write protect the
571 * folios.
572 *
573 * However, we may have to redirty a folio sometimes.
574 */
575 while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
576 if (ext4_journalled_writepage_needs_redirty(jinode, folio))
577 folio_redirty_for_writepage(&wbc, folio);
578 folio_unlock(folio);
579 }
580
581 return error;
582 }
583
ext4_journal_submit_inode_data_buffers(struct jbd2_inode * jinode)584 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
585 {
586 int ret;
587
588 if (ext4_should_journal_data(jinode->i_vfs_inode))
589 ret = ext4_journalled_submit_inode_data_buffers(jinode);
590 else
591 ret = ext4_normal_submit_inode_data_buffers(jinode);
592 return ret;
593 }
594
ext4_journal_finish_inode_data_buffers(struct jbd2_inode * jinode)595 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
596 {
597 int ret = 0;
598
599 if (!ext4_should_journal_data(jinode->i_vfs_inode))
600 ret = jbd2_journal_finish_inode_data_buffers(jinode);
601
602 return ret;
603 }
604
system_going_down(void)605 static bool system_going_down(void)
606 {
607 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
608 || system_state == SYSTEM_RESTART;
609 }
610
611 struct ext4_err_translation {
612 int code;
613 int errno;
614 };
615
616 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err }
617
618 static struct ext4_err_translation err_translation[] = {
619 EXT4_ERR_TRANSLATE(EIO),
620 EXT4_ERR_TRANSLATE(ENOMEM),
621 EXT4_ERR_TRANSLATE(EFSBADCRC),
622 EXT4_ERR_TRANSLATE(EFSCORRUPTED),
623 EXT4_ERR_TRANSLATE(ENOSPC),
624 EXT4_ERR_TRANSLATE(ENOKEY),
625 EXT4_ERR_TRANSLATE(EROFS),
626 EXT4_ERR_TRANSLATE(EFBIG),
627 EXT4_ERR_TRANSLATE(EEXIST),
628 EXT4_ERR_TRANSLATE(ERANGE),
629 EXT4_ERR_TRANSLATE(EOVERFLOW),
630 EXT4_ERR_TRANSLATE(EBUSY),
631 EXT4_ERR_TRANSLATE(ENOTDIR),
632 EXT4_ERR_TRANSLATE(ENOTEMPTY),
633 EXT4_ERR_TRANSLATE(ESHUTDOWN),
634 EXT4_ERR_TRANSLATE(EFAULT),
635 };
636
ext4_errno_to_code(int errno)637 static int ext4_errno_to_code(int errno)
638 {
639 int i;
640
641 for (i = 0; i < ARRAY_SIZE(err_translation); i++)
642 if (err_translation[i].errno == errno)
643 return err_translation[i].code;
644 return EXT4_ERR_UNKNOWN;
645 }
646
save_error_info(struct super_block * sb,int error,__u32 ino,__u64 block,const char * func,unsigned int line)647 static void save_error_info(struct super_block *sb, int error,
648 __u32 ino, __u64 block,
649 const char *func, unsigned int line)
650 {
651 struct ext4_sb_info *sbi = EXT4_SB(sb);
652
653 /* We default to EFSCORRUPTED error... */
654 if (error == 0)
655 error = EFSCORRUPTED;
656
657 spin_lock(&sbi->s_error_lock);
658 sbi->s_add_error_count++;
659 sbi->s_last_error_code = error;
660 sbi->s_last_error_line = line;
661 sbi->s_last_error_ino = ino;
662 sbi->s_last_error_block = block;
663 sbi->s_last_error_func = func;
664 sbi->s_last_error_time = ktime_get_real_seconds();
665 if (!sbi->s_first_error_time) {
666 sbi->s_first_error_code = error;
667 sbi->s_first_error_line = line;
668 sbi->s_first_error_ino = ino;
669 sbi->s_first_error_block = block;
670 sbi->s_first_error_func = func;
671 sbi->s_first_error_time = sbi->s_last_error_time;
672 }
673 spin_unlock(&sbi->s_error_lock);
674 }
675
676 /* Deal with the reporting of failure conditions on a filesystem such as
677 * inconsistencies detected or read IO failures.
678 *
679 * On ext2, we can store the error state of the filesystem in the
680 * superblock. That is not possible on ext4, because we may have other
681 * write ordering constraints on the superblock which prevent us from
682 * writing it out straight away; and given that the journal is about to
683 * be aborted, we can't rely on the current, or future, transactions to
684 * write out the superblock safely.
685 *
686 * We'll just use the jbd2_journal_abort() error code to record an error in
687 * the journal instead. On recovery, the journal will complain about
688 * that error until we've noted it down and cleared it.
689 *
690 * If force_ro is set, we unconditionally force the filesystem into an
691 * ABORT|READONLY state, unless the error response on the fs has been set to
692 * panic in which case we take the easy way out and panic immediately. This is
693 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
694 * at a critical moment in log management.
695 */
ext4_handle_error(struct super_block * sb,bool force_ro,int error,__u32 ino,__u64 block,const char * func,unsigned int line)696 static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
697 __u32 ino, __u64 block,
698 const char *func, unsigned int line)
699 {
700 journal_t *journal = EXT4_SB(sb)->s_journal;
701 bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
702
703 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
704 if (test_opt(sb, WARN_ON_ERROR))
705 WARN_ON_ONCE(1);
706
707 if (!continue_fs && !ext4_emergency_ro(sb) && journal)
708 jbd2_journal_abort(journal, -error);
709
710 if (!bdev_read_only(sb->s_bdev)) {
711 save_error_info(sb, error, ino, block, func, line);
712 /*
713 * In case the fs should keep running, we need to writeout
714 * superblock through the journal. Due to lock ordering
715 * constraints, it may not be safe to do it right here so we
716 * defer superblock flushing to a workqueue. We just need to be
717 * careful when the journal is already shutting down. If we get
718 * here in that case, just update the sb directly as the last
719 * transaction won't commit anyway.
720 */
721 if (continue_fs && journal &&
722 !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
723 schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
724 else
725 ext4_commit_super(sb);
726 }
727
728 /*
729 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
730 * could panic during 'reboot -f' as the underlying device got already
731 * disabled.
732 */
733 if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
734 panic("EXT4-fs (device %s): panic forced after error\n",
735 sb->s_id);
736 }
737
738 if (ext4_emergency_ro(sb) || continue_fs)
739 return;
740
741 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
742 /*
743 * We don't set SB_RDONLY because that requires sb->s_umount
744 * semaphore and setting it without proper remount procedure is
745 * confusing code such as freeze_super() leading to deadlocks
746 * and other problems.
747 */
748 set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags);
749 }
750
update_super_work(struct work_struct * work)751 static void update_super_work(struct work_struct *work)
752 {
753 struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
754 s_sb_upd_work);
755 journal_t *journal = sbi->s_journal;
756 handle_t *handle;
757
758 /*
759 * If the journal is still running, we have to write out superblock
760 * through the journal to avoid collisions of other journalled sb
761 * updates.
762 *
763 * We use directly jbd2 functions here to avoid recursing back into
764 * ext4 error handling code during handling of previous errors.
765 */
766 if (!ext4_emergency_state(sbi->s_sb) &&
767 !sb_rdonly(sbi->s_sb) && journal) {
768 struct buffer_head *sbh = sbi->s_sbh;
769 bool call_notify_err = false;
770
771 handle = jbd2_journal_start(journal, 1);
772 if (IS_ERR(handle))
773 goto write_directly;
774 if (jbd2_journal_get_write_access(handle, sbh)) {
775 jbd2_journal_stop(handle);
776 goto write_directly;
777 }
778
779 if (sbi->s_add_error_count > 0)
780 call_notify_err = true;
781
782 ext4_update_super(sbi->s_sb);
783 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
784 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to "
785 "superblock detected");
786 clear_buffer_write_io_error(sbh);
787 set_buffer_uptodate(sbh);
788 }
789
790 if (jbd2_journal_dirty_metadata(handle, sbh)) {
791 jbd2_journal_stop(handle);
792 goto write_directly;
793 }
794 jbd2_journal_stop(handle);
795
796 if (call_notify_err)
797 ext4_notify_error_sysfs(sbi);
798
799 return;
800 }
801 write_directly:
802 /*
803 * Write through journal failed. Write sb directly to get error info
804 * out and hope for the best.
805 */
806 ext4_commit_super(sbi->s_sb);
807 ext4_notify_error_sysfs(sbi);
808 }
809
810 #define ext4_error_ratelimit(sb) \
811 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \
812 "EXT4-fs error")
813
__ext4_error(struct super_block * sb,const char * function,unsigned int line,bool force_ro,int error,__u64 block,const char * fmt,...)814 void __ext4_error(struct super_block *sb, const char *function,
815 unsigned int line, bool force_ro, int error, __u64 block,
816 const char *fmt, ...)
817 {
818 struct va_format vaf;
819 va_list args;
820
821 if (unlikely(ext4_emergency_state(sb)))
822 return;
823
824 trace_ext4_error(sb, function, line);
825 if (ext4_error_ratelimit(sb)) {
826 va_start(args, fmt);
827 vaf.fmt = fmt;
828 vaf.va = &args;
829 printk(KERN_CRIT
830 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
831 sb->s_id, function, line, current->comm, &vaf);
832 va_end(args);
833 }
834 fserror_report_metadata(sb, error ? -abs(error) : -EFSCORRUPTED,
835 GFP_ATOMIC);
836
837 ext4_handle_error(sb, force_ro, error, 0, block, function, line);
838 }
839
__ext4_error_inode(struct inode * inode,const char * function,unsigned int line,ext4_fsblk_t block,int error,const char * fmt,...)840 void __ext4_error_inode(struct inode *inode, const char *function,
841 unsigned int line, ext4_fsblk_t block, int error,
842 const char *fmt, ...)
843 {
844 va_list args;
845 struct va_format vaf;
846
847 if (unlikely(ext4_emergency_state(inode->i_sb)))
848 return;
849
850 trace_ext4_error(inode->i_sb, function, line);
851 if (ext4_error_ratelimit(inode->i_sb)) {
852 va_start(args, fmt);
853 vaf.fmt = fmt;
854 vaf.va = &args;
855 if (block)
856 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
857 "inode #%llu: block %llu: comm %s: %pV\n",
858 inode->i_sb->s_id, function, line, inode->i_ino,
859 block, current->comm, &vaf);
860 else
861 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
862 "inode #%llu: comm %s: %pV\n",
863 inode->i_sb->s_id, function, line, inode->i_ino,
864 current->comm, &vaf);
865 va_end(args);
866 }
867 fserror_report_file_metadata(inode,
868 error ? -abs(error) : -EFSCORRUPTED,
869 GFP_ATOMIC);
870
871 ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
872 function, line);
873 }
874
__ext4_error_file(struct file * file,const char * function,unsigned int line,ext4_fsblk_t block,const char * fmt,...)875 void __ext4_error_file(struct file *file, const char *function,
876 unsigned int line, ext4_fsblk_t block,
877 const char *fmt, ...)
878 {
879 va_list args;
880 struct va_format vaf;
881 struct inode *inode = file_inode(file);
882 char pathname[80], *path;
883
884 if (unlikely(ext4_emergency_state(inode->i_sb)))
885 return;
886
887 trace_ext4_error(inode->i_sb, function, line);
888 if (ext4_error_ratelimit(inode->i_sb)) {
889 path = file_path(file, pathname, sizeof(pathname));
890 if (IS_ERR(path))
891 path = "(unknown)";
892 va_start(args, fmt);
893 vaf.fmt = fmt;
894 vaf.va = &args;
895 if (block)
896 printk(KERN_CRIT
897 "EXT4-fs error (device %s): %s:%d: inode #%llu: "
898 "block %llu: comm %s: path %s: %pV\n",
899 inode->i_sb->s_id, function, line, inode->i_ino,
900 block, current->comm, path, &vaf);
901 else
902 printk(KERN_CRIT
903 "EXT4-fs error (device %s): %s:%d: inode #%llu: "
904 "comm %s: path %s: %pV\n",
905 inode->i_sb->s_id, function, line, inode->i_ino,
906 current->comm, path, &vaf);
907 va_end(args);
908 }
909 fserror_report_file_metadata(inode, -EFSCORRUPTED, GFP_ATOMIC);
910
911 ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
912 function, line);
913 }
914
ext4_decode_error(struct super_block * sb,int errno,char nbuf[16])915 const char *ext4_decode_error(struct super_block *sb, int errno,
916 char nbuf[16])
917 {
918 char *errstr = NULL;
919
920 switch (errno) {
921 case -EFSCORRUPTED:
922 errstr = "Corrupt filesystem";
923 break;
924 case -EFSBADCRC:
925 errstr = "Filesystem failed CRC";
926 break;
927 case -EIO:
928 errstr = "IO failure";
929 break;
930 case -ENOMEM:
931 errstr = "Out of memory";
932 break;
933 case -EROFS:
934 if (!sb || (EXT4_SB(sb)->s_journal &&
935 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
936 errstr = "Journal has aborted";
937 else
938 errstr = "Readonly filesystem";
939 break;
940 default:
941 /* If the caller passed in an extra buffer for unknown
942 * errors, textualise them now. Else we just return
943 * NULL. */
944 if (nbuf) {
945 /* Check for truncated error codes... */
946 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
947 errstr = nbuf;
948 }
949 break;
950 }
951
952 return errstr;
953 }
954
955 /* __ext4_std_error decodes expected errors from journaling functions
956 * automatically and invokes the appropriate error response. */
957
__ext4_std_error(struct super_block * sb,const char * function,unsigned int line,int errno)958 void __ext4_std_error(struct super_block *sb, const char *function,
959 unsigned int line, int errno)
960 {
961 char nbuf[16];
962 const char *errstr;
963
964 if (unlikely(ext4_emergency_state(sb)))
965 return;
966
967 /* Special case: if the error is EROFS, and we're not already
968 * inside a transaction, then there's really no point in logging
969 * an error. */
970 if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
971 return;
972
973 if (ext4_error_ratelimit(sb)) {
974 errstr = ext4_decode_error(sb, errno, nbuf);
975 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
976 sb->s_id, function, line, errstr);
977 }
978 fserror_report_metadata(sb, errno ? -abs(errno) : -EFSCORRUPTED,
979 GFP_ATOMIC);
980
981 ext4_handle_error(sb, false, -errno, 0, 0, function, line);
982 }
983
__ext4_msg(struct super_block * sb,const char * prefix,const char * fmt,...)984 void __ext4_msg(struct super_block *sb,
985 const char *prefix, const char *fmt, ...)
986 {
987 struct va_format vaf;
988 va_list args;
989
990 if (sb) {
991 atomic_inc(&EXT4_SB(sb)->s_msg_count);
992 if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state),
993 "EXT4-fs"))
994 return;
995 }
996
997 va_start(args, fmt);
998 vaf.fmt = fmt;
999 vaf.va = &args;
1000 if (sb)
1001 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
1002 else
1003 printk("%sEXT4-fs: %pV\n", prefix, &vaf);
1004 va_end(args);
1005 }
1006
ext4_warning_ratelimit(struct super_block * sb)1007 static int ext4_warning_ratelimit(struct super_block *sb)
1008 {
1009 atomic_inc(&EXT4_SB(sb)->s_warning_count);
1010 return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
1011 "EXT4-fs warning");
1012 }
1013
__ext4_warning(struct super_block * sb,const char * function,unsigned int line,const char * fmt,...)1014 void __ext4_warning(struct super_block *sb, const char *function,
1015 unsigned int line, const char *fmt, ...)
1016 {
1017 struct va_format vaf;
1018 va_list args;
1019
1020 if (!ext4_warning_ratelimit(sb))
1021 return;
1022
1023 va_start(args, fmt);
1024 vaf.fmt = fmt;
1025 vaf.va = &args;
1026 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
1027 sb->s_id, function, line, &vaf);
1028 va_end(args);
1029 }
1030
__ext4_warning_inode(const struct inode * inode,const char * function,unsigned int line,const char * fmt,...)1031 void __ext4_warning_inode(const struct inode *inode, const char *function,
1032 unsigned int line, const char *fmt, ...)
1033 {
1034 struct va_format vaf;
1035 va_list args;
1036
1037 if (!ext4_warning_ratelimit(inode->i_sb))
1038 return;
1039
1040 va_start(args, fmt);
1041 vaf.fmt = fmt;
1042 vaf.va = &args;
1043 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
1044 "inode #%llu: comm %s: %pV\n", inode->i_sb->s_id,
1045 function, line, inode->i_ino, current->comm, &vaf);
1046 va_end(args);
1047 }
1048
__ext4_grp_locked_error(const char * function,unsigned int line,struct super_block * sb,ext4_group_t grp,u64 ino,ext4_fsblk_t block,const char * fmt,...)1049 void __ext4_grp_locked_error(const char *function, unsigned int line,
1050 struct super_block *sb, ext4_group_t grp,
1051 u64 ino, ext4_fsblk_t block,
1052 const char *fmt, ...)
1053 __releases(bitlock)
1054 __acquires(bitlock)
1055 {
1056 struct va_format vaf;
1057 va_list args;
1058
1059 if (unlikely(ext4_emergency_state(sb)))
1060 return;
1061
1062 trace_ext4_error(sb, function, line);
1063 if (ext4_error_ratelimit(sb)) {
1064 va_start(args, fmt);
1065 vaf.fmt = fmt;
1066 vaf.va = &args;
1067 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
1068 sb->s_id, function, line, grp);
1069 if (ino)
1070 printk(KERN_CONT "inode %llu: ", ino);
1071 if (block)
1072 printk(KERN_CONT "block %llu:",
1073 (unsigned long long) block);
1074 printk(KERN_CONT "%pV\n", &vaf);
1075 va_end(args);
1076 }
1077
1078 if (test_opt(sb, ERRORS_CONT)) {
1079 if (test_opt(sb, WARN_ON_ERROR))
1080 WARN_ON_ONCE(1);
1081 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
1082 if (!bdev_read_only(sb->s_bdev)) {
1083 save_error_info(sb, EFSCORRUPTED, ino, block, function,
1084 line);
1085 schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
1086 }
1087 return;
1088 }
1089 ext4_unlock_group(sb, grp);
1090 ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
1091 /*
1092 * We only get here in the ERRORS_RO case; relocking the group
1093 * may be dangerous, but nothing bad will happen since the
1094 * filesystem will have already been marked read/only and the
1095 * journal has been aborted. We return 1 as a hint to callers
1096 * who might what to use the return value from
1097 * ext4_grp_locked_error() to distinguish between the
1098 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
1099 * aggressively from the ext4 function in question, with a
1100 * more appropriate error code.
1101 */
1102 ext4_lock_group(sb, grp);
1103 return;
1104 }
1105
ext4_mark_group_bitmap_corrupted(struct super_block * sb,ext4_group_t group,unsigned int flags)1106 void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
1107 ext4_group_t group,
1108 unsigned int flags)
1109 {
1110 struct ext4_sb_info *sbi = EXT4_SB(sb);
1111 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1112 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
1113 int ret;
1114
1115 if (!grp || !gdp)
1116 return;
1117 if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
1118 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1119 &grp->bb_state);
1120 if (!ret)
1121 percpu_counter_sub(&sbi->s_freeclusters_counter,
1122 grp->bb_free);
1123 }
1124
1125 if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
1126 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
1127 &grp->bb_state);
1128 if (!ret && gdp) {
1129 int count;
1130
1131 count = ext4_free_inodes_count(sb, gdp);
1132 percpu_counter_sub(&sbi->s_freeinodes_counter,
1133 count);
1134 }
1135 }
1136 }
1137
ext4_update_dynamic_rev(struct super_block * sb)1138 void ext4_update_dynamic_rev(struct super_block *sb)
1139 {
1140 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1141
1142 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
1143 return;
1144
1145 ext4_warning(sb,
1146 "updating to rev %d because of new feature flag, "
1147 "running e2fsck is recommended",
1148 EXT4_DYNAMIC_REV);
1149
1150 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
1151 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
1152 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
1153 /* leave es->s_feature_*compat flags alone */
1154 /* es->s_uuid will be set by e2fsck if empty */
1155
1156 /*
1157 * The rest of the superblock fields should be zero, and if not it
1158 * means they are likely already in use, so leave them alone. We
1159 * can leave it up to e2fsck to clean up any inconsistencies there.
1160 */
1161 }
1162
orphan_list_entry(struct list_head * l)1163 static inline struct inode *orphan_list_entry(struct list_head *l)
1164 {
1165 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
1166 }
1167
dump_orphan_list(struct super_block * sb,struct ext4_sb_info * sbi)1168 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
1169 {
1170 struct list_head *l;
1171
1172 ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
1173 le32_to_cpu(sbi->s_es->s_last_orphan));
1174
1175 printk(KERN_ERR "sb_info orphan list:\n");
1176 list_for_each(l, &sbi->s_orphan) {
1177 struct inode *inode = orphan_list_entry(l);
1178 printk(KERN_ERR " "
1179 "inode %s:%llu at %p: mode %o, nlink %d, next %d\n",
1180 inode->i_sb->s_id, inode->i_ino, inode,
1181 inode->i_mode, inode->i_nlink,
1182 NEXT_ORPHAN(inode));
1183 }
1184 }
1185
1186 #ifdef CONFIG_QUOTA
1187 static int ext4_quota_off(struct super_block *sb, int type);
1188
ext4_quotas_off(struct super_block * sb,int type)1189 static inline void ext4_quotas_off(struct super_block *sb, int type)
1190 {
1191 BUG_ON(type > EXT4_MAXQUOTAS);
1192
1193 /* Use our quota_off function to clear inode flags etc. */
1194 for (type--; type >= 0; type--)
1195 ext4_quota_off(sb, type);
1196 }
1197
1198 /*
1199 * This is a helper function which is used in the mount/remount
1200 * codepaths (which holds s_umount) to fetch the quota file name.
1201 */
get_qf_name(struct super_block * sb,struct ext4_sb_info * sbi,int type)1202 static inline char *get_qf_name(struct super_block *sb,
1203 struct ext4_sb_info *sbi,
1204 int type)
1205 {
1206 return rcu_dereference_protected(sbi->s_qf_names[type],
1207 lockdep_is_held(&sb->s_umount));
1208 }
1209 #else
ext4_quotas_off(struct super_block * sb,int type)1210 static inline void ext4_quotas_off(struct super_block *sb, int type)
1211 {
1212 }
1213 #endif
1214
ext4_percpu_param_init(struct ext4_sb_info * sbi)1215 static int ext4_percpu_param_init(struct ext4_sb_info *sbi)
1216 {
1217 ext4_fsblk_t block;
1218 int err;
1219
1220 block = ext4_count_free_clusters(sbi->s_sb);
1221 ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block));
1222 err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
1223 GFP_KERNEL);
1224 if (!err) {
1225 unsigned long freei = ext4_count_free_inodes(sbi->s_sb);
1226 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
1227 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
1228 GFP_KERNEL);
1229 }
1230 if (!err)
1231 err = percpu_counter_init(&sbi->s_dirs_counter,
1232 ext4_count_dirs(sbi->s_sb), GFP_KERNEL);
1233 if (!err)
1234 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
1235 GFP_KERNEL);
1236 if (!err)
1237 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
1238 GFP_KERNEL);
1239 if (!err)
1240 err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
1241
1242 if (err)
1243 ext4_msg(sbi->s_sb, KERN_ERR, "insufficient memory");
1244
1245 return err;
1246 }
1247
ext4_percpu_param_destroy(struct ext4_sb_info * sbi)1248 static void ext4_percpu_param_destroy(struct ext4_sb_info *sbi)
1249 {
1250 percpu_counter_destroy(&sbi->s_freeclusters_counter);
1251 percpu_counter_destroy(&sbi->s_freeinodes_counter);
1252 percpu_counter_destroy(&sbi->s_dirs_counter);
1253 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1254 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
1255 percpu_free_rwsem(&sbi->s_writepages_rwsem);
1256 }
1257
ext4_group_desc_free(struct ext4_sb_info * sbi)1258 static void ext4_group_desc_free(struct ext4_sb_info *sbi)
1259 {
1260 struct buffer_head **group_desc;
1261 int i;
1262
1263 group_desc = rcu_access_pointer(sbi->s_group_desc);
1264 for (i = 0; i < sbi->s_gdb_count; i++)
1265 brelse(group_desc[i]);
1266 kvfree(group_desc);
1267 }
1268
ext4_flex_groups_free(struct ext4_sb_info * sbi)1269 static void ext4_flex_groups_free(struct ext4_sb_info *sbi)
1270 {
1271 struct flex_groups **flex_groups;
1272 int i;
1273
1274 flex_groups = rcu_access_pointer(sbi->s_flex_groups);
1275 if (flex_groups) {
1276 for (i = 0; i < sbi->s_flex_groups_allocated; i++)
1277 kvfree(flex_groups[i]);
1278 kvfree(flex_groups);
1279 }
1280 }
1281
ext4_put_super(struct super_block * sb)1282 static void ext4_put_super(struct super_block *sb)
1283 {
1284 struct ext4_sb_info *sbi = EXT4_SB(sb);
1285 struct ext4_super_block *es = sbi->s_es;
1286 int aborted = 0;
1287 int err;
1288
1289 /*
1290 * Unregister sysfs before destroying jbd2 journal.
1291 * Since we could still access attr_journal_task attribute via sysfs
1292 * path which could have sbi->s_journal->j_task as NULL
1293 * Unregister sysfs before flush sbi->s_sb_upd_work.
1294 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
1295 * read metadata verify failed then will queue error work.
1296 * update_super_work will call start_this_handle may trigger
1297 * BUG_ON.
1298 */
1299 ext4_unregister_sysfs(sb);
1300
1301 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount"))
1302 ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.",
1303 &sb->s_uuid);
1304
1305 ext4_unregister_li_request(sb);
1306 ext4_quotas_off(sb, EXT4_MAXQUOTAS);
1307
1308 destroy_workqueue(sbi->rsv_conversion_wq);
1309 ext4_release_orphan_info(sb);
1310
1311 if (sbi->s_journal) {
1312 aborted = is_journal_aborted(sbi->s_journal);
1313 err = ext4_journal_destroy(sbi, sbi->s_journal);
1314 if ((err < 0) && !aborted) {
1315 ext4_abort(sb, -err, "Couldn't clean up the journal");
1316 }
1317 } else
1318 flush_work(&sbi->s_sb_upd_work);
1319
1320 ext4_es_unregister_shrinker(sbi);
1321 timer_shutdown_sync(&sbi->s_err_report);
1322 ext4_release_system_zone(sb);
1323 ext4_mb_release(sb);
1324 ext4_ext_release(sb);
1325
1326 if (!ext4_emergency_state(sb) && !sb_rdonly(sb)) {
1327 if (!aborted) {
1328 ext4_clear_feature_journal_needs_recovery(sb);
1329 ext4_clear_feature_orphan_present(sb);
1330 es->s_state = cpu_to_le16(sbi->s_mount_state);
1331 }
1332 ext4_commit_super(sb);
1333 }
1334
1335 ext4_group_desc_free(sbi);
1336 ext4_flex_groups_free(sbi);
1337
1338 WARN_ON_ONCE(!(sbi->s_mount_state & EXT4_ERROR_FS) &&
1339 percpu_counter_sum(&sbi->s_dirtyclusters_counter));
1340 ext4_percpu_param_destroy(sbi);
1341 #ifdef CONFIG_QUOTA
1342 for (int i = 0; i < EXT4_MAXQUOTAS; i++)
1343 kfree(get_qf_name(sb, sbi, i));
1344 #endif
1345
1346 /* Debugging code just in case the in-memory inode orphan list
1347 * isn't empty. The on-disk one can be non-empty if we've
1348 * detected an error and taken the fs readonly, but the
1349 * in-memory list had better be clean by this point. */
1350 if (!list_empty(&sbi->s_orphan))
1351 dump_orphan_list(sb, sbi);
1352 ASSERT(list_empty(&sbi->s_orphan));
1353
1354 sync_blockdev(sb->s_bdev);
1355 invalidate_bdev(sb->s_bdev);
1356 if (sbi->s_journal_bdev_file) {
1357 /*
1358 * Invalidate the journal device's buffers. We don't want them
1359 * floating about in memory - the physical journal device may
1360 * hotswapped, and it breaks the `ro-after' testing code.
1361 */
1362 sync_blockdev(file_bdev(sbi->s_journal_bdev_file));
1363 invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
1364 }
1365
1366 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
1367 sbi->s_ea_inode_cache = NULL;
1368
1369 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
1370 sbi->s_ea_block_cache = NULL;
1371
1372 ext4_stop_mmpd(sbi);
1373
1374 brelse(sbi->s_sbh);
1375 sb->s_fs_info = NULL;
1376 /*
1377 * Now that we are completely done shutting down the
1378 * superblock, we need to actually destroy the kobject.
1379 */
1380 kobject_put(&sbi->s_kobj);
1381 wait_for_completion(&sbi->s_kobj_unregister);
1382 kfree(sbi->s_blockgroup_lock);
1383 fs_put_dax(sbi->s_daxdev, NULL);
1384 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
1385 #if IS_ENABLED(CONFIG_UNICODE)
1386 utf8_unload(sb->s_encoding);
1387 #endif
1388 kfree(sbi);
1389 }
1390
1391 static struct kmem_cache *ext4_inode_cachep;
1392
1393 /*
1394 * Called inside transaction, so use GFP_NOFS
1395 */
ext4_alloc_inode(struct super_block * sb)1396 static struct inode *ext4_alloc_inode(struct super_block *sb)
1397 {
1398 struct ext4_inode_info *ei;
1399
1400 ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS);
1401 if (!ei)
1402 return NULL;
1403
1404 inode_set_iversion(&ei->vfs_inode, 1);
1405 ei->i_flags = 0;
1406 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1407 spin_lock_init(&ei->i_raw_lock);
1408 ei->i_prealloc_node = RB_ROOT;
1409 atomic_set(&ei->i_prealloc_active, 0);
1410 rwlock_init(&ei->i_prealloc_lock);
1411 ext4_es_init_tree(&ei->i_es_tree);
1412 rwlock_init(&ei->i_es_lock);
1413 INIT_LIST_HEAD(&ei->i_es_list);
1414 ei->i_es_all_nr = 0;
1415 ei->i_es_shk_nr = 0;
1416 ei->i_es_shrink_lblk = 0;
1417 ei->i_es_seq = 0;
1418 ei->i_reserved_data_blocks = 0;
1419 spin_lock_init(&(ei->i_block_reservation_lock));
1420 ext4_init_pending_tree(&ei->i_pending_tree);
1421 #ifdef CONFIG_QUOTA
1422 ei->i_reserved_quota = 0;
1423 memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1424 #endif
1425 ei->jinode = NULL;
1426 INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1427 spin_lock_init(&ei->i_completed_io_lock);
1428 ei->i_sync_tid = 0;
1429 ei->i_datasync_tid = 0;
1430 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1431 ext4_fc_init_inode(&ei->vfs_inode);
1432 spin_lock_init(&ei->i_fc_lock);
1433 mmb_init(&ei->i_metadata_bhs, &ei->vfs_inode.i_data);
1434 return &ei->vfs_inode;
1435 }
1436
ext4_drop_inode(struct inode * inode)1437 static int ext4_drop_inode(struct inode *inode)
1438 {
1439 int drop = inode_generic_drop(inode);
1440
1441 if (!drop)
1442 drop = fscrypt_drop_inode(inode);
1443
1444 trace_ext4_drop_inode(inode, drop);
1445 return drop;
1446 }
1447
ext4_free_in_core_inode(struct inode * inode)1448 static void ext4_free_in_core_inode(struct inode *inode)
1449 {
1450 fscrypt_free_inode(inode);
1451 if (!list_empty(&(EXT4_I(inode)->i_fc_list))) {
1452 pr_warn("%s: inode %llu still in fc list",
1453 __func__, inode->i_ino);
1454 }
1455 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
1456 }
1457
ext4_destroy_inode(struct inode * inode)1458 static void ext4_destroy_inode(struct inode *inode)
1459 {
1460 if (ext4_inode_orphan_tracked(inode)) {
1461 ext4_msg(inode->i_sb, KERN_ERR,
1462 "Inode %llu (%p): inode tracked as orphan!",
1463 inode->i_ino, EXT4_I(inode));
1464 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
1465 EXT4_I(inode), sizeof(struct ext4_inode_info),
1466 true);
1467 dump_stack();
1468 }
1469
1470 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ERROR_FS) &&
1471 WARN_ON_ONCE(EXT4_I(inode)->i_reserved_data_blocks))
1472 ext4_msg(inode->i_sb, KERN_ERR,
1473 "Inode %llu (%p): i_reserved_data_blocks (%u) not cleared!",
1474 inode->i_ino, EXT4_I(inode),
1475 EXT4_I(inode)->i_reserved_data_blocks);
1476 }
1477
ext4_shutdown(struct super_block * sb)1478 static void ext4_shutdown(struct super_block *sb)
1479 {
1480 ext4_force_shutdown(sb, EXT4_GOING_FLAGS_NOLOGFLUSH);
1481 }
1482
init_once(void * foo)1483 static void init_once(void *foo)
1484 {
1485 struct ext4_inode_info *ei = foo;
1486
1487 INIT_LIST_HEAD(&ei->i_orphan);
1488 init_rwsem(&ei->xattr_sem);
1489 init_rwsem(&ei->i_data_sem);
1490 inode_init_once(&ei->vfs_inode);
1491 ext4_fc_init_inode(&ei->vfs_inode);
1492 #ifdef CONFIG_FS_ENCRYPTION
1493 ei->i_crypt_info = NULL;
1494 #endif
1495 }
1496
init_inodecache(void)1497 static int __init init_inodecache(void)
1498 {
1499 struct kmem_cache_args args = {
1500 .useroffset = offsetof(struct ext4_inode_info, i_data),
1501 .usersize = sizeof_field(struct ext4_inode_info, i_data),
1502 .use_freeptr_offset = true,
1503 .freeptr_offset = offsetof(struct ext4_inode_info, i_flags),
1504 .ctor = init_once,
1505 };
1506
1507 ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
1508 sizeof(struct ext4_inode_info),
1509 &args,
1510 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT);
1511
1512 if (ext4_inode_cachep == NULL)
1513 return -ENOMEM;
1514 return 0;
1515 }
1516
destroy_inodecache(void)1517 static void destroy_inodecache(void)
1518 {
1519 /*
1520 * Make sure all delayed rcu free inodes are flushed before we
1521 * destroy cache.
1522 */
1523 rcu_barrier();
1524 kmem_cache_destroy(ext4_inode_cachep);
1525 }
1526
ext4_clear_inode(struct inode * inode)1527 void ext4_clear_inode(struct inode *inode)
1528 {
1529 ext4_fc_del(inode);
1530 if (!EXT4_SB(inode->i_sb)->s_journal)
1531 mmb_invalidate(&EXT4_I(inode)->i_metadata_bhs);
1532 clear_inode(inode);
1533 ext4_discard_preallocations(inode);
1534 /*
1535 * We must remove the inode from the hash before ext4_free_inode()
1536 * clears the bit in inode bitmap as otherwise another process reusing
1537 * the inode will block in insert_inode_hash() waiting for inode
1538 * eviction to complete while holding transaction handle open, but
1539 * ext4_evict_inode() still running for that inode could block waiting
1540 * for transaction commit if the inode is marked as IS_SYNC => deadlock.
1541 *
1542 * Removing the inode from the hash here is safe. There are two cases
1543 * to consider:
1544 * 1) The inode still has references to it (i_nlink > 0). In that case
1545 * we are keeping the inode and once we remove the inode from the hash,
1546 * iget() can create the new inode structure for the same inode number
1547 * and we are fine with that as all IO on behalf of the inode is
1548 * finished.
1549 * 2) We are deleting the inode (i_nlink == 0). In that case inode
1550 * number cannot be reused until ext4_free_inode() clears the bit in
1551 * the inode bitmap, at which point all IO is done and reuse is fine
1552 * again.
1553 */
1554 remove_inode_hash(inode);
1555 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1556 dquot_drop(inode);
1557 if (EXT4_I(inode)->jinode) {
1558 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
1559 EXT4_I(inode)->jinode);
1560 jbd2_free_inode(EXT4_I(inode)->jinode);
1561 EXT4_I(inode)->jinode = NULL;
1562 }
1563 fscrypt_put_encryption_info(inode);
1564 }
1565
ext4_nfs_get_inode(struct super_block * sb,u64 ino,u32 generation)1566 static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1567 u64 ino, u32 generation)
1568 {
1569 struct inode *inode;
1570
1571 /*
1572 * Currently we don't know the generation for parent directory, so
1573 * a generation of 0 means "accept any"
1574 */
1575 inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
1576 if (IS_ERR(inode))
1577 return ERR_CAST(inode);
1578 if (generation && inode->i_generation != generation) {
1579 iput(inode);
1580 return ERR_PTR(-ESTALE);
1581 }
1582
1583 return inode;
1584 }
1585
ext4_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)1586 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1587 int fh_len, int fh_type)
1588 {
1589 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1590 ext4_nfs_get_inode);
1591 }
1592
ext4_fh_to_parent(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)1593 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1594 int fh_len, int fh_type)
1595 {
1596 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1597 ext4_nfs_get_inode);
1598 }
1599
ext4_nfs_commit_metadata(struct inode * inode)1600 static int ext4_nfs_commit_metadata(struct inode *inode)
1601 {
1602 struct writeback_control wbc = {
1603 .sync_mode = WB_SYNC_ALL
1604 };
1605
1606 trace_ext4_nfs_commit_metadata(inode);
1607 return ext4_write_inode(inode, &wbc);
1608 }
1609
1610 #ifdef CONFIG_QUOTA
1611 static const char * const quotatypes[] = INITQFNAMES;
1612 #define QTYPE2NAME(t) (quotatypes[t])
1613
1614 static int ext4_write_dquot(struct dquot *dquot);
1615 static int ext4_acquire_dquot(struct dquot *dquot);
1616 static int ext4_release_dquot(struct dquot *dquot);
1617 static int ext4_mark_dquot_dirty(struct dquot *dquot);
1618 static int ext4_write_info(struct super_block *sb, int type);
1619 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1620 const struct path *path);
1621 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1622 size_t len, loff_t off);
1623 static ssize_t ext4_quota_write(struct super_block *sb, int type,
1624 const char *data, size_t len, loff_t off);
1625 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
1626 unsigned int flags);
1627
ext4_get_dquots(struct inode * inode)1628 static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
1629 {
1630 return EXT4_I(inode)->i_dquot;
1631 }
1632
1633 static const struct dquot_operations ext4_quota_operations = {
1634 .get_reserved_space = ext4_get_reserved_space,
1635 .write_dquot = ext4_write_dquot,
1636 .acquire_dquot = ext4_acquire_dquot,
1637 .release_dquot = ext4_release_dquot,
1638 .mark_dirty = ext4_mark_dquot_dirty,
1639 .write_info = ext4_write_info,
1640 .alloc_dquot = dquot_alloc,
1641 .destroy_dquot = dquot_destroy,
1642 .get_projid = ext4_get_projid,
1643 .get_inode_usage = ext4_get_inode_usage,
1644 .get_next_id = dquot_get_next_id,
1645 };
1646
1647 static const struct quotactl_ops ext4_qctl_operations = {
1648 .quota_on = ext4_quota_on,
1649 .quota_off = ext4_quota_off,
1650 .quota_sync = dquot_quota_sync,
1651 .get_state = dquot_get_state,
1652 .set_info = dquot_set_dqinfo,
1653 .get_dqblk = dquot_get_dqblk,
1654 .set_dqblk = dquot_set_dqblk,
1655 .get_nextdqblk = dquot_get_next_dqblk,
1656 };
1657 #endif
1658
1659 static const struct super_operations ext4_sops = {
1660 .alloc_inode = ext4_alloc_inode,
1661 .free_inode = ext4_free_in_core_inode,
1662 .destroy_inode = ext4_destroy_inode,
1663 .write_inode = ext4_write_inode,
1664 .dirty_inode = ext4_dirty_inode,
1665 .drop_inode = ext4_drop_inode,
1666 .evict_inode = ext4_evict_inode,
1667 .put_super = ext4_put_super,
1668 .sync_fs = ext4_sync_fs,
1669 .freeze_fs = ext4_freeze,
1670 .unfreeze_fs = ext4_unfreeze,
1671 .statfs = ext4_statfs,
1672 .show_options = ext4_show_options,
1673 .shutdown = ext4_shutdown,
1674 #ifdef CONFIG_QUOTA
1675 .quota_read = ext4_quota_read,
1676 .quota_write = ext4_quota_write,
1677 .get_dquots = ext4_get_dquots,
1678 #endif
1679 };
1680
1681 static const struct export_operations ext4_export_ops = {
1682 .encode_fh = generic_encode_ino32_fh,
1683 .fh_to_dentry = ext4_fh_to_dentry,
1684 .fh_to_parent = ext4_fh_to_parent,
1685 .get_parent = ext4_get_parent,
1686 .commit_metadata = ext4_nfs_commit_metadata,
1687 };
1688
1689 enum {
1690 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1691 Opt_resgid, Opt_resuid, Opt_sb,
1692 Opt_nouid32, Opt_debug, Opt_removed,
1693 Opt_user_xattr, Opt_acl,
1694 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1695 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1696 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1697 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1698 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1699 Opt_inlinecrypt,
1700 Opt_usrjquota, Opt_grpjquota, Opt_quota,
1701 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1702 Opt_usrquota, Opt_grpquota, Opt_prjquota,
1703 Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
1704 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
1705 Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize,
1706 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1707 Opt_inode_readahead_blks, Opt_journal_ioprio,
1708 Opt_dioread_nolock, Opt_dioread_lock,
1709 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1710 Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1711 Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan,
1712 Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type,
1713 #ifdef CONFIG_EXT4_DEBUG
1714 Opt_fc_debug_max_replay, Opt_fc_debug_force
1715 #endif
1716 };
1717
1718 static const struct constant_table ext4_param_errors[] = {
1719 {"continue", EXT4_MOUNT_ERRORS_CONT},
1720 {"panic", EXT4_MOUNT_ERRORS_PANIC},
1721 {"remount-ro", EXT4_MOUNT_ERRORS_RO},
1722 {}
1723 };
1724
1725 static const struct constant_table ext4_param_data[] = {
1726 {"journal", EXT4_MOUNT_JOURNAL_DATA},
1727 {"ordered", EXT4_MOUNT_ORDERED_DATA},
1728 {"writeback", EXT4_MOUNT_WRITEBACK_DATA},
1729 {}
1730 };
1731
1732 static const struct constant_table ext4_param_data_err[] = {
1733 {"abort", Opt_data_err_abort},
1734 {"ignore", Opt_data_err_ignore},
1735 {}
1736 };
1737
1738 static const struct constant_table ext4_param_jqfmt[] = {
1739 {"vfsold", QFMT_VFS_OLD},
1740 {"vfsv0", QFMT_VFS_V0},
1741 {"vfsv1", QFMT_VFS_V1},
1742 {}
1743 };
1744
1745 static const struct constant_table ext4_param_dax[] = {
1746 {"always", Opt_dax_always},
1747 {"inode", Opt_dax_inode},
1748 {"never", Opt_dax_never},
1749 {}
1750 };
1751
1752 /*
1753 * Mount option specification
1754 * We don't use fsparam_flag_no because of the way we set the
1755 * options and the way we show them in _ext4_show_options(). To
1756 * keep the changes to a minimum, let's keep the negative options
1757 * separate for now.
1758 */
1759 static const struct fs_parameter_spec ext4_param_specs[] = {
1760 fsparam_flag ("bsddf", Opt_bsd_df),
1761 fsparam_flag ("minixdf", Opt_minix_df),
1762 fsparam_flag ("grpid", Opt_grpid),
1763 fsparam_flag ("bsdgroups", Opt_grpid),
1764 fsparam_flag ("nogrpid", Opt_nogrpid),
1765 fsparam_flag ("sysvgroups", Opt_nogrpid),
1766 fsparam_gid ("resgid", Opt_resgid),
1767 fsparam_uid ("resuid", Opt_resuid),
1768 fsparam_u32 ("sb", Opt_sb),
1769 fsparam_enum ("errors", Opt_errors, ext4_param_errors),
1770 fsparam_flag ("nouid32", Opt_nouid32),
1771 fsparam_flag ("debug", Opt_debug),
1772 fsparam_flag ("oldalloc", Opt_removed),
1773 fsparam_flag ("orlov", Opt_removed),
1774 fsparam_flag ("user_xattr", Opt_user_xattr),
1775 fsparam_flag ("acl", Opt_acl),
1776 fsparam_flag ("norecovery", Opt_noload),
1777 fsparam_flag ("noload", Opt_noload),
1778 fsparam_flag ("bh", Opt_removed),
1779 fsparam_flag ("nobh", Opt_removed),
1780 fsparam_u32 ("commit", Opt_commit),
1781 fsparam_u32 ("min_batch_time", Opt_min_batch_time),
1782 fsparam_u32 ("max_batch_time", Opt_max_batch_time),
1783 fsparam_u32 ("journal_dev", Opt_journal_dev),
1784 fsparam_bdev ("journal_path", Opt_journal_path),
1785 fsparam_flag ("journal_checksum", Opt_journal_checksum),
1786 fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum),
1787 fsparam_flag ("journal_async_commit",Opt_journal_async_commit),
1788 fsparam_flag ("abort", Opt_abort),
1789 fsparam_enum ("data", Opt_data, ext4_param_data),
1790 fsparam_enum ("data_err", Opt_data_err,
1791 ext4_param_data_err),
1792 fsparam_string_empty
1793 ("usrjquota", Opt_usrjquota),
1794 fsparam_string_empty
1795 ("grpjquota", Opt_grpjquota),
1796 fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt),
1797 fsparam_flag ("grpquota", Opt_grpquota),
1798 fsparam_flag ("quota", Opt_quota),
1799 fsparam_flag ("noquota", Opt_noquota),
1800 fsparam_flag ("usrquota", Opt_usrquota),
1801 fsparam_flag ("prjquota", Opt_prjquota),
1802 fsparam_flag ("barrier", Opt_barrier),
1803 fsparam_u32 ("barrier", Opt_barrier),
1804 fsparam_flag ("nobarrier", Opt_nobarrier),
1805 fsparam_flag ("i_version", Opt_removed),
1806 fsparam_flag ("dax", Opt_dax),
1807 fsparam_enum ("dax", Opt_dax_type, ext4_param_dax),
1808 fsparam_u32 ("stripe", Opt_stripe),
1809 fsparam_flag ("delalloc", Opt_delalloc),
1810 fsparam_flag ("nodelalloc", Opt_nodelalloc),
1811 fsparam_flag ("warn_on_error", Opt_warn_on_error),
1812 fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error),
1813 fsparam_u32 ("debug_want_extra_isize",
1814 Opt_debug_want_extra_isize),
1815 fsparam_flag ("mblk_io_submit", Opt_removed),
1816 fsparam_flag ("nomblk_io_submit", Opt_removed),
1817 fsparam_flag ("block_validity", Opt_block_validity),
1818 fsparam_flag ("noblock_validity", Opt_noblock_validity),
1819 fsparam_u32 ("inode_readahead_blks",
1820 Opt_inode_readahead_blks),
1821 fsparam_u32 ("journal_ioprio", Opt_journal_ioprio),
1822 fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc),
1823 fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc),
1824 fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc),
1825 fsparam_flag ("dioread_nolock", Opt_dioread_nolock),
1826 fsparam_flag ("nodioread_nolock", Opt_dioread_lock),
1827 fsparam_flag ("dioread_lock", Opt_dioread_lock),
1828 fsparam_flag ("discard", Opt_discard),
1829 fsparam_flag ("nodiscard", Opt_nodiscard),
1830 fsparam_u32 ("init_itable", Opt_init_itable),
1831 fsparam_flag ("init_itable", Opt_init_itable),
1832 fsparam_flag ("noinit_itable", Opt_noinit_itable),
1833 #ifdef CONFIG_EXT4_DEBUG
1834 fsparam_flag ("fc_debug_force", Opt_fc_debug_force),
1835 fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay),
1836 #endif
1837 fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb),
1838 fsparam_flag ("test_dummy_encryption",
1839 Opt_test_dummy_encryption),
1840 fsparam_string ("test_dummy_encryption",
1841 Opt_test_dummy_encryption),
1842 fsparam_flag ("inlinecrypt", Opt_inlinecrypt),
1843 fsparam_flag ("nombcache", Opt_nombcache),
1844 fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */
1845 fsparam_flag ("prefetch_block_bitmaps",
1846 Opt_removed),
1847 fsparam_flag ("no_prefetch_block_bitmaps",
1848 Opt_no_prefetch_block_bitmaps),
1849 fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan),
1850 fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */
1851 fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */
1852 fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */
1853 fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */
1854 fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */
1855 {}
1856 };
1857
1858
1859 #define MOPT_SET 0x0001
1860 #define MOPT_CLEAR 0x0002
1861 #define MOPT_NOSUPPORT 0x0004
1862 #define MOPT_EXPLICIT 0x0008
1863 #ifdef CONFIG_QUOTA
1864 #define MOPT_Q 0
1865 #define MOPT_QFMT 0x0010
1866 #else
1867 #define MOPT_Q MOPT_NOSUPPORT
1868 #define MOPT_QFMT MOPT_NOSUPPORT
1869 #endif
1870 #define MOPT_NO_EXT2 0x0020
1871 #define MOPT_NO_EXT3 0x0040
1872 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
1873 #define MOPT_SKIP 0x0080
1874 #define MOPT_2 0x0100
1875
1876 static const struct mount_opts {
1877 int token;
1878 int mount_opt;
1879 int flags;
1880 } ext4_mount_opts[] = {
1881 {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
1882 {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
1883 {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
1884 {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
1885 {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
1886 {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1887 {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
1888 MOPT_EXT4_ONLY | MOPT_SET},
1889 {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
1890 MOPT_EXT4_ONLY | MOPT_CLEAR},
1891 {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
1892 {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1893 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1894 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1895 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1896 MOPT_EXT4_ONLY | MOPT_CLEAR},
1897 {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
1898 {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
1899 {Opt_commit, 0, MOPT_NO_EXT2},
1900 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1901 MOPT_EXT4_ONLY | MOPT_CLEAR},
1902 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1903 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1904 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1905 EXT4_MOUNT_JOURNAL_CHECKSUM),
1906 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1907 {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1908 {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2},
1909 {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
1910 {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
1911 {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
1912 {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
1913 {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
1914 {Opt_dax_type, 0, MOPT_EXT4_ONLY},
1915 {Opt_journal_dev, 0, MOPT_NO_EXT2},
1916 {Opt_journal_path, 0, MOPT_NO_EXT2},
1917 {Opt_journal_ioprio, 0, MOPT_NO_EXT2},
1918 {Opt_data, 0, MOPT_NO_EXT2},
1919 {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
1920 #ifdef CONFIG_EXT4_FS_POSIX_ACL
1921 {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
1922 #else
1923 {Opt_acl, 0, MOPT_NOSUPPORT},
1924 #endif
1925 {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
1926 {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1927 {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
1928 {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
1929 MOPT_SET | MOPT_Q},
1930 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
1931 MOPT_SET | MOPT_Q},
1932 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
1933 MOPT_SET | MOPT_Q},
1934 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1935 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
1936 MOPT_CLEAR | MOPT_Q},
1937 {Opt_usrjquota, 0, MOPT_Q},
1938 {Opt_grpjquota, 0, MOPT_Q},
1939 {Opt_jqfmt, 0, MOPT_QFMT},
1940 {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1941 {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS,
1942 MOPT_SET},
1943 #ifdef CONFIG_EXT4_DEBUG
1944 {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT,
1945 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY},
1946 #endif
1947 {Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2},
1948 {Opt_err, 0, 0}
1949 };
1950
1951 #if IS_ENABLED(CONFIG_UNICODE)
1952 static const struct ext4_sb_encodings {
1953 __u16 magic;
1954 char *name;
1955 unsigned int version;
1956 } ext4_sb_encoding_map[] = {
1957 {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
1958 };
1959
1960 static const struct ext4_sb_encodings *
ext4_sb_read_encoding(const struct ext4_super_block * es)1961 ext4_sb_read_encoding(const struct ext4_super_block *es)
1962 {
1963 __u16 magic = le16_to_cpu(es->s_encoding);
1964 int i;
1965
1966 for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
1967 if (magic == ext4_sb_encoding_map[i].magic)
1968 return &ext4_sb_encoding_map[i];
1969
1970 return NULL;
1971 }
1972 #endif
1973
1974 #define EXT4_SPEC_JQUOTA (1 << 0)
1975 #define EXT4_SPEC_JQFMT (1 << 1)
1976 #define EXT4_SPEC_DATAJ (1 << 2)
1977 #define EXT4_SPEC_SB_BLOCK (1 << 3)
1978 #define EXT4_SPEC_JOURNAL_DEV (1 << 4)
1979 #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5)
1980 #define EXT4_SPEC_s_want_extra_isize (1 << 7)
1981 #define EXT4_SPEC_s_max_batch_time (1 << 8)
1982 #define EXT4_SPEC_s_min_batch_time (1 << 9)
1983 #define EXT4_SPEC_s_inode_readahead_blks (1 << 10)
1984 #define EXT4_SPEC_s_li_wait_mult (1 << 11)
1985 #define EXT4_SPEC_s_max_dir_size_kb (1 << 12)
1986 #define EXT4_SPEC_s_stripe (1 << 13)
1987 #define EXT4_SPEC_s_resuid (1 << 14)
1988 #define EXT4_SPEC_s_resgid (1 << 15)
1989 #define EXT4_SPEC_s_commit_interval (1 << 16)
1990 #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17)
1991 #define EXT4_SPEC_s_sb_block (1 << 18)
1992 #define EXT4_SPEC_mb_optimize_scan (1 << 19)
1993
1994 struct ext4_fs_context {
1995 char *s_qf_names[EXT4_MAXQUOTAS];
1996 struct fscrypt_dummy_policy dummy_enc_policy;
1997 int s_jquota_fmt; /* Format of quota to use */
1998 #ifdef CONFIG_EXT4_DEBUG
1999 int s_fc_debug_max_replay;
2000 #endif
2001 unsigned short qname_spec;
2002 unsigned long vals_s_flags; /* Bits to set in s_flags */
2003 unsigned long mask_s_flags; /* Bits changed in s_flags */
2004 unsigned long journal_devnum;
2005 unsigned long s_commit_interval;
2006 unsigned long s_stripe;
2007 unsigned int s_inode_readahead_blks;
2008 unsigned int s_want_extra_isize;
2009 unsigned int s_li_wait_mult;
2010 unsigned int s_max_dir_size_kb;
2011 unsigned int journal_ioprio;
2012 unsigned int vals_s_mount_opt;
2013 unsigned int mask_s_mount_opt;
2014 unsigned int vals_s_mount_opt2;
2015 unsigned int mask_s_mount_opt2;
2016 unsigned int opt_flags; /* MOPT flags */
2017 unsigned int spec;
2018 u32 s_max_batch_time;
2019 u32 s_min_batch_time;
2020 kuid_t s_resuid;
2021 kgid_t s_resgid;
2022 ext4_fsblk_t s_sb_block;
2023 };
2024
ext4_fc_free(struct fs_context * fc)2025 static void ext4_fc_free(struct fs_context *fc)
2026 {
2027 struct ext4_fs_context *ctx = fc->fs_private;
2028 int i;
2029
2030 if (!ctx)
2031 return;
2032
2033 for (i = 0; i < EXT4_MAXQUOTAS; i++)
2034 kfree(ctx->s_qf_names[i]);
2035
2036 fscrypt_free_dummy_policy(&ctx->dummy_enc_policy);
2037 kfree(ctx);
2038 }
2039
ext4_init_fs_context(struct fs_context * fc)2040 int ext4_init_fs_context(struct fs_context *fc)
2041 {
2042 struct ext4_fs_context *ctx;
2043
2044 ctx = kzalloc_obj(struct ext4_fs_context);
2045 if (!ctx)
2046 return -ENOMEM;
2047
2048 fc->fs_private = ctx;
2049 fc->ops = &ext4_context_ops;
2050
2051 /* i_version is always enabled now */
2052 fc->sb_flags |= SB_I_VERSION;
2053
2054 return 0;
2055 }
2056
2057 #ifdef CONFIG_QUOTA
2058 /*
2059 * Note the name of the specified quota file.
2060 */
note_qf_name(struct fs_context * fc,int qtype,struct fs_parameter * param)2061 static int note_qf_name(struct fs_context *fc, int qtype,
2062 struct fs_parameter *param)
2063 {
2064 struct ext4_fs_context *ctx = fc->fs_private;
2065 char *qname;
2066
2067 if (param->size < 1) {
2068 ext4_msg(NULL, KERN_ERR, "Missing quota name");
2069 return -EINVAL;
2070 }
2071 if (strchr(param->string, '/')) {
2072 ext4_msg(NULL, KERN_ERR,
2073 "quotafile must be on filesystem root");
2074 return -EINVAL;
2075 }
2076 if (ctx->s_qf_names[qtype]) {
2077 if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) {
2078 ext4_msg(NULL, KERN_ERR,
2079 "%s quota file already specified",
2080 QTYPE2NAME(qtype));
2081 return -EINVAL;
2082 }
2083 return 0;
2084 }
2085
2086 qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
2087 if (!qname) {
2088 ext4_msg(NULL, KERN_ERR,
2089 "Not enough memory for storing quotafile name");
2090 return -ENOMEM;
2091 }
2092 ctx->s_qf_names[qtype] = qname;
2093 ctx->qname_spec |= 1 << qtype;
2094 ctx->spec |= EXT4_SPEC_JQUOTA;
2095 return 0;
2096 }
2097
2098 /*
2099 * Clear the name of the specified quota file.
2100 */
unnote_qf_name(struct fs_context * fc,int qtype)2101 static int unnote_qf_name(struct fs_context *fc, int qtype)
2102 {
2103 struct ext4_fs_context *ctx = fc->fs_private;
2104
2105 kfree(ctx->s_qf_names[qtype]);
2106
2107 ctx->s_qf_names[qtype] = NULL;
2108 ctx->qname_spec |= 1 << qtype;
2109 ctx->spec |= EXT4_SPEC_JQUOTA;
2110 return 0;
2111 }
2112 #endif
2113
ext4_parse_test_dummy_encryption(const struct fs_parameter * param,struct ext4_fs_context * ctx)2114 static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param,
2115 struct ext4_fs_context *ctx)
2116 {
2117 int err;
2118
2119 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
2120 ext4_msg(NULL, KERN_WARNING,
2121 "test_dummy_encryption option not supported");
2122 return -EINVAL;
2123 }
2124 err = fscrypt_parse_test_dummy_encryption(param,
2125 &ctx->dummy_enc_policy);
2126 if (err == -EINVAL) {
2127 ext4_msg(NULL, KERN_WARNING,
2128 "Value of option \"%s\" is unrecognized", param->key);
2129 } else if (err == -EEXIST) {
2130 ext4_msg(NULL, KERN_WARNING,
2131 "Conflicting test_dummy_encryption options");
2132 return -EINVAL;
2133 }
2134 return err;
2135 }
2136
2137 #define EXT4_SET_CTX(name) \
2138 static inline __maybe_unused \
2139 void ctx_set_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2140 { \
2141 ctx->mask_s_##name |= flag; \
2142 ctx->vals_s_##name |= flag; \
2143 }
2144
2145 #define EXT4_CLEAR_CTX(name) \
2146 static inline __maybe_unused \
2147 void ctx_clear_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2148 { \
2149 ctx->mask_s_##name |= flag; \
2150 ctx->vals_s_##name &= ~flag; \
2151 }
2152
2153 #define EXT4_TEST_CTX(name) \
2154 static inline unsigned long \
2155 ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \
2156 { \
2157 return (ctx->vals_s_##name & flag); \
2158 }
2159
2160 EXT4_SET_CTX(flags); /* set only */
2161 EXT4_SET_CTX(mount_opt);
2162 EXT4_CLEAR_CTX(mount_opt);
2163 EXT4_TEST_CTX(mount_opt);
2164 EXT4_SET_CTX(mount_opt2);
2165 EXT4_CLEAR_CTX(mount_opt2);
2166 EXT4_TEST_CTX(mount_opt2);
2167
ext4_parse_param(struct fs_context * fc,struct fs_parameter * param)2168 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
2169 {
2170 struct ext4_fs_context *ctx = fc->fs_private;
2171 struct fs_parse_result result;
2172 const struct mount_opts *m;
2173 int is_remount;
2174 int token;
2175
2176 token = fs_parse(fc, ext4_param_specs, param, &result);
2177 if (token < 0)
2178 return token;
2179 is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
2180
2181 for (m = ext4_mount_opts; m->token != Opt_err; m++)
2182 if (token == m->token)
2183 break;
2184
2185 ctx->opt_flags |= m->flags;
2186
2187 if (m->flags & MOPT_EXPLICIT) {
2188 if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
2189 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC);
2190 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
2191 ctx_set_mount_opt2(ctx,
2192 EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM);
2193 } else
2194 return -EINVAL;
2195 }
2196
2197 if (m->flags & MOPT_NOSUPPORT) {
2198 ext4_msg(NULL, KERN_ERR, "%s option not supported",
2199 param->key);
2200 return 0;
2201 }
2202
2203 switch (token) {
2204 #ifdef CONFIG_QUOTA
2205 case Opt_usrjquota:
2206 if (!*param->string)
2207 return unnote_qf_name(fc, USRQUOTA);
2208 else
2209 return note_qf_name(fc, USRQUOTA, param);
2210 case Opt_grpjquota:
2211 if (!*param->string)
2212 return unnote_qf_name(fc, GRPQUOTA);
2213 else
2214 return note_qf_name(fc, GRPQUOTA, param);
2215 #endif
2216 case Opt_sb:
2217 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2218 ext4_msg(NULL, KERN_WARNING,
2219 "Ignoring %s option on remount", param->key);
2220 } else {
2221 ctx->s_sb_block = result.uint_32;
2222 ctx->spec |= EXT4_SPEC_s_sb_block;
2223 }
2224 return 0;
2225 case Opt_removed:
2226 ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option",
2227 param->key);
2228 return 0;
2229 case Opt_inlinecrypt:
2230 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
2231 ctx_set_flags(ctx, SB_INLINECRYPT);
2232 #else
2233 ext4_msg(NULL, KERN_ERR, "inline encryption not supported");
2234 #endif
2235 return 0;
2236 case Opt_errors:
2237 ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK);
2238 ctx_set_mount_opt(ctx, result.uint_32);
2239 return 0;
2240 #ifdef CONFIG_QUOTA
2241 case Opt_jqfmt:
2242 ctx->s_jquota_fmt = result.uint_32;
2243 ctx->spec |= EXT4_SPEC_JQFMT;
2244 return 0;
2245 #endif
2246 case Opt_data:
2247 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
2248 ctx_set_mount_opt(ctx, result.uint_32);
2249 ctx->spec |= EXT4_SPEC_DATAJ;
2250 return 0;
2251 case Opt_commit:
2252 if (result.uint_32 == 0)
2253 result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE;
2254 else if (result.uint_32 > INT_MAX / HZ) {
2255 ext4_msg(NULL, KERN_ERR,
2256 "Invalid commit interval %d, "
2257 "must be smaller than %d",
2258 result.uint_32, INT_MAX / HZ);
2259 return -EINVAL;
2260 }
2261 ctx->s_commit_interval = HZ * result.uint_32;
2262 ctx->spec |= EXT4_SPEC_s_commit_interval;
2263 return 0;
2264 case Opt_debug_want_extra_isize:
2265 if ((result.uint_32 & 1) || (result.uint_32 < 4)) {
2266 ext4_msg(NULL, KERN_ERR,
2267 "Invalid want_extra_isize %d", result.uint_32);
2268 return -EINVAL;
2269 }
2270 ctx->s_want_extra_isize = result.uint_32;
2271 ctx->spec |= EXT4_SPEC_s_want_extra_isize;
2272 return 0;
2273 case Opt_max_batch_time:
2274 ctx->s_max_batch_time = result.uint_32;
2275 ctx->spec |= EXT4_SPEC_s_max_batch_time;
2276 return 0;
2277 case Opt_min_batch_time:
2278 ctx->s_min_batch_time = result.uint_32;
2279 ctx->spec |= EXT4_SPEC_s_min_batch_time;
2280 return 0;
2281 case Opt_inode_readahead_blks:
2282 if (result.uint_32 &&
2283 (result.uint_32 > (1 << 30) ||
2284 !is_power_of_2(result.uint_32))) {
2285 ext4_msg(NULL, KERN_ERR,
2286 "EXT4-fs: inode_readahead_blks must be "
2287 "0 or a power of 2 smaller than 2^31");
2288 return -EINVAL;
2289 }
2290 ctx->s_inode_readahead_blks = result.uint_32;
2291 ctx->spec |= EXT4_SPEC_s_inode_readahead_blks;
2292 return 0;
2293 case Opt_init_itable:
2294 ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE);
2295 ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
2296 if (param->type == fs_value_is_string)
2297 ctx->s_li_wait_mult = result.uint_32;
2298 ctx->spec |= EXT4_SPEC_s_li_wait_mult;
2299 return 0;
2300 case Opt_max_dir_size_kb:
2301 ctx->s_max_dir_size_kb = result.uint_32;
2302 ctx->spec |= EXT4_SPEC_s_max_dir_size_kb;
2303 return 0;
2304 #ifdef CONFIG_EXT4_DEBUG
2305 case Opt_fc_debug_max_replay:
2306 ctx->s_fc_debug_max_replay = result.uint_32;
2307 ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay;
2308 return 0;
2309 #endif
2310 case Opt_stripe:
2311 ctx->s_stripe = result.uint_32;
2312 ctx->spec |= EXT4_SPEC_s_stripe;
2313 return 0;
2314 case Opt_resuid:
2315 ctx->s_resuid = result.uid;
2316 ctx->spec |= EXT4_SPEC_s_resuid;
2317 return 0;
2318 case Opt_resgid:
2319 ctx->s_resgid = result.gid;
2320 ctx->spec |= EXT4_SPEC_s_resgid;
2321 return 0;
2322 case Opt_journal_dev:
2323 if (is_remount) {
2324 ext4_msg(NULL, KERN_ERR,
2325 "Cannot specify journal on remount");
2326 return -EINVAL;
2327 }
2328 ctx->journal_devnum = result.uint_32;
2329 ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
2330 return 0;
2331 case Opt_journal_path:
2332 {
2333 struct inode *journal_inode;
2334 struct path path;
2335 int error;
2336
2337 if (is_remount) {
2338 ext4_msg(NULL, KERN_ERR,
2339 "Cannot specify journal on remount");
2340 return -EINVAL;
2341 }
2342
2343 error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path);
2344 if (error) {
2345 ext4_msg(NULL, KERN_ERR, "error: could not find "
2346 "journal device path");
2347 return -EINVAL;
2348 }
2349
2350 journal_inode = d_inode(path.dentry);
2351 ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev);
2352 ctx->spec |= EXT4_SPEC_JOURNAL_DEV;
2353 path_put(&path);
2354 return 0;
2355 }
2356 case Opt_journal_ioprio:
2357 if (result.uint_32 > 7) {
2358 ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority"
2359 " (must be 0-7)");
2360 return -EINVAL;
2361 }
2362 ctx->journal_ioprio =
2363 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32);
2364 ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO;
2365 return 0;
2366 case Opt_test_dummy_encryption:
2367 return ext4_parse_test_dummy_encryption(param, ctx);
2368 case Opt_dax:
2369 case Opt_dax_type:
2370 #ifdef CONFIG_FS_DAX
2371 {
2372 int type = (token == Opt_dax) ?
2373 Opt_dax : result.uint_32;
2374
2375 switch (type) {
2376 case Opt_dax:
2377 case Opt_dax_always:
2378 ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2379 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2380 break;
2381 case Opt_dax_never:
2382 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2383 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2384 break;
2385 case Opt_dax_inode:
2386 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS);
2387 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER);
2388 /* Strictly for printing options */
2389 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE);
2390 break;
2391 }
2392 return 0;
2393 }
2394 #else
2395 ext4_msg(NULL, KERN_INFO, "dax option not supported");
2396 return -EINVAL;
2397 #endif
2398 case Opt_data_err:
2399 if (result.uint_32 == Opt_data_err_abort)
2400 ctx_set_mount_opt(ctx, m->mount_opt);
2401 else if (result.uint_32 == Opt_data_err_ignore)
2402 ctx_clear_mount_opt(ctx, m->mount_opt);
2403 return 0;
2404 case Opt_mb_optimize_scan:
2405 if (result.int_32 == 1) {
2406 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
2407 ctx->spec |= EXT4_SPEC_mb_optimize_scan;
2408 } else if (result.int_32 == 0) {
2409 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN);
2410 ctx->spec |= EXT4_SPEC_mb_optimize_scan;
2411 } else {
2412 ext4_msg(NULL, KERN_WARNING,
2413 "mb_optimize_scan should be set to 0 or 1.");
2414 return -EINVAL;
2415 }
2416 return 0;
2417 }
2418
2419 /*
2420 * At this point we should only be getting options requiring MOPT_SET,
2421 * or MOPT_CLEAR. Anything else is a bug
2422 */
2423 if (m->token == Opt_err) {
2424 ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s",
2425 param->key);
2426 WARN_ON(1);
2427 return -EINVAL;
2428 }
2429
2430 else {
2431 unsigned int set = 0;
2432
2433 if ((param->type == fs_value_is_flag) ||
2434 result.uint_32 > 0)
2435 set = 1;
2436
2437 if (m->flags & MOPT_CLEAR)
2438 set = !set;
2439 else if (unlikely(!(m->flags & MOPT_SET))) {
2440 ext4_msg(NULL, KERN_WARNING,
2441 "buggy handling of option %s",
2442 param->key);
2443 WARN_ON(1);
2444 return -EINVAL;
2445 }
2446 if (m->flags & MOPT_2) {
2447 if (set != 0)
2448 ctx_set_mount_opt2(ctx, m->mount_opt);
2449 else
2450 ctx_clear_mount_opt2(ctx, m->mount_opt);
2451 } else {
2452 if (set != 0)
2453 ctx_set_mount_opt(ctx, m->mount_opt);
2454 else
2455 ctx_clear_mount_opt(ctx, m->mount_opt);
2456 }
2457 }
2458
2459 return 0;
2460 }
2461
parse_options(struct fs_context * fc,char * options)2462 static int parse_options(struct fs_context *fc, char *options)
2463 {
2464 struct fs_parameter param;
2465 int ret;
2466 char *key;
2467
2468 if (!options)
2469 return 0;
2470
2471 while ((key = strsep(&options, ",")) != NULL) {
2472 if (*key) {
2473 size_t v_len = 0;
2474 char *value = strchr(key, '=');
2475
2476 param.type = fs_value_is_flag;
2477 param.string = NULL;
2478
2479 if (value) {
2480 if (value == key)
2481 continue;
2482
2483 *value++ = 0;
2484 v_len = strlen(value);
2485 param.string = kmemdup_nul(value, v_len,
2486 GFP_KERNEL);
2487 if (!param.string)
2488 return -ENOMEM;
2489 param.type = fs_value_is_string;
2490 }
2491
2492 param.key = key;
2493 param.size = v_len;
2494
2495 ret = ext4_parse_param(fc, ¶m);
2496 kfree(param.string);
2497 if (ret < 0)
2498 return ret;
2499 }
2500 }
2501
2502 ret = ext4_validate_options(fc);
2503 if (ret < 0)
2504 return ret;
2505
2506 return 0;
2507 }
2508
parse_apply_sb_mount_options(struct super_block * sb,struct ext4_fs_context * m_ctx)2509 static int parse_apply_sb_mount_options(struct super_block *sb,
2510 struct ext4_fs_context *m_ctx)
2511 {
2512 struct ext4_sb_info *sbi = EXT4_SB(sb);
2513 char s_mount_opts[64];
2514 struct ext4_fs_context *s_ctx = NULL;
2515 struct fs_context *fc = NULL;
2516 int ret = -ENOMEM;
2517
2518 if (!sbi->s_es->s_mount_opts[0])
2519 return 0;
2520
2521 if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0)
2522 return -E2BIG;
2523
2524 fc = kzalloc_obj(struct fs_context);
2525 if (!fc)
2526 return -ENOMEM;
2527
2528 s_ctx = kzalloc_obj(struct ext4_fs_context);
2529 if (!s_ctx)
2530 goto out_free;
2531
2532 fc->fs_private = s_ctx;
2533 fc->s_fs_info = sbi;
2534
2535 ret = parse_options(fc, s_mount_opts);
2536 if (ret < 0)
2537 goto parse_failed;
2538
2539 ret = ext4_check_opt_consistency(fc, sb);
2540 if (ret < 0) {
2541 parse_failed:
2542 ext4_msg(sb, KERN_WARNING,
2543 "failed to parse options in superblock: %s",
2544 s_mount_opts);
2545 ret = 0;
2546 goto out_free;
2547 }
2548
2549 if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV)
2550 m_ctx->journal_devnum = s_ctx->journal_devnum;
2551 if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)
2552 m_ctx->journal_ioprio = s_ctx->journal_ioprio;
2553
2554 ext4_apply_options(fc, sb);
2555 ret = 0;
2556
2557 out_free:
2558 ext4_fc_free(fc);
2559 kfree(fc);
2560 return ret;
2561 }
2562
ext4_apply_quota_options(struct fs_context * fc,struct super_block * sb)2563 static void ext4_apply_quota_options(struct fs_context *fc,
2564 struct super_block *sb)
2565 {
2566 #ifdef CONFIG_QUOTA
2567 bool quota_feature = ext4_has_feature_quota(sb);
2568 struct ext4_fs_context *ctx = fc->fs_private;
2569 struct ext4_sb_info *sbi = EXT4_SB(sb);
2570 char *qname;
2571 int i;
2572
2573 if (quota_feature)
2574 return;
2575
2576 if (ctx->spec & EXT4_SPEC_JQUOTA) {
2577 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2578 if (!(ctx->qname_spec & (1 << i)))
2579 continue;
2580
2581 qname = ctx->s_qf_names[i]; /* May be NULL */
2582 if (qname)
2583 set_opt(sb, QUOTA);
2584 ctx->s_qf_names[i] = NULL;
2585 qname = rcu_replace_pointer(sbi->s_qf_names[i], qname,
2586 lockdep_is_held(&sb->s_umount));
2587 if (qname)
2588 kfree_rcu_mightsleep(qname);
2589 }
2590 }
2591
2592 if (ctx->spec & EXT4_SPEC_JQFMT)
2593 sbi->s_jquota_fmt = ctx->s_jquota_fmt;
2594 #endif
2595 }
2596
2597 /*
2598 * Check quota settings consistency.
2599 */
ext4_check_quota_consistency(struct fs_context * fc,struct super_block * sb)2600 static int ext4_check_quota_consistency(struct fs_context *fc,
2601 struct super_block *sb)
2602 {
2603 #ifdef CONFIG_QUOTA
2604 struct ext4_fs_context *ctx = fc->fs_private;
2605 struct ext4_sb_info *sbi = EXT4_SB(sb);
2606 bool quota_feature = ext4_has_feature_quota(sb);
2607 bool quota_loaded = sb_any_quota_loaded(sb);
2608 bool usr_qf_name, grp_qf_name, usrquota, grpquota;
2609 int quota_flags, i;
2610
2611 /*
2612 * We do the test below only for project quotas. 'usrquota' and
2613 * 'grpquota' mount options are allowed even without quota feature
2614 * to support legacy quotas in quota files.
2615 */
2616 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) &&
2617 !ext4_has_feature_project(sb)) {
2618 ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. "
2619 "Cannot enable project quota enforcement.");
2620 return -EINVAL;
2621 }
2622
2623 quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
2624 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA;
2625 if (quota_loaded &&
2626 ctx->mask_s_mount_opt & quota_flags &&
2627 !ctx_test_mount_opt(ctx, quota_flags))
2628 goto err_quota_change;
2629
2630 if (ctx->spec & EXT4_SPEC_JQUOTA) {
2631
2632 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2633 if (!(ctx->qname_spec & (1 << i)))
2634 continue;
2635
2636 if (quota_loaded &&
2637 !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i])
2638 goto err_jquota_change;
2639
2640 if (sbi->s_qf_names[i] && ctx->s_qf_names[i] &&
2641 strcmp(get_qf_name(sb, sbi, i),
2642 ctx->s_qf_names[i]) != 0)
2643 goto err_jquota_specified;
2644 }
2645
2646 if (quota_feature) {
2647 ext4_msg(NULL, KERN_INFO,
2648 "Journaled quota options ignored when "
2649 "QUOTA feature is enabled");
2650 return 0;
2651 }
2652 }
2653
2654 if (ctx->spec & EXT4_SPEC_JQFMT) {
2655 if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded)
2656 goto err_jquota_change;
2657 if (quota_feature) {
2658 ext4_msg(NULL, KERN_INFO, "Quota format mount options "
2659 "ignored when QUOTA feature is enabled");
2660 return 0;
2661 }
2662 }
2663
2664 /* Make sure we don't mix old and new quota format */
2665 usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) ||
2666 ctx->s_qf_names[USRQUOTA]);
2667 grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) ||
2668 ctx->s_qf_names[GRPQUOTA]);
2669
2670 usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
2671 test_opt(sb, USRQUOTA));
2672
2673 grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) ||
2674 test_opt(sb, GRPQUOTA));
2675
2676 if (usr_qf_name) {
2677 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
2678 usrquota = false;
2679 }
2680 if (grp_qf_name) {
2681 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
2682 grpquota = false;
2683 }
2684
2685 if (usr_qf_name || grp_qf_name) {
2686 if (usrquota || grpquota) {
2687 ext4_msg(NULL, KERN_ERR, "old and new quota "
2688 "format mixing");
2689 return -EINVAL;
2690 }
2691
2692 if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) {
2693 ext4_msg(NULL, KERN_ERR, "journaled quota format "
2694 "not specified");
2695 return -EINVAL;
2696 }
2697 }
2698
2699 return 0;
2700
2701 err_quota_change:
2702 ext4_msg(NULL, KERN_ERR,
2703 "Cannot change quota options when quota turned on");
2704 return -EINVAL;
2705 err_jquota_change:
2706 ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota "
2707 "options when quota turned on");
2708 return -EINVAL;
2709 err_jquota_specified:
2710 ext4_msg(NULL, KERN_ERR, "%s quota file already specified",
2711 QTYPE2NAME(i));
2712 return -EINVAL;
2713 #else
2714 return 0;
2715 #endif
2716 }
2717
ext4_check_test_dummy_encryption(const struct fs_context * fc,struct super_block * sb)2718 static int ext4_check_test_dummy_encryption(const struct fs_context *fc,
2719 struct super_block *sb)
2720 {
2721 const struct ext4_fs_context *ctx = fc->fs_private;
2722 const struct ext4_sb_info *sbi = EXT4_SB(sb);
2723
2724 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy))
2725 return 0;
2726
2727 if (!ext4_has_feature_encrypt(sb)) {
2728 ext4_msg(NULL, KERN_WARNING,
2729 "test_dummy_encryption requires encrypt feature");
2730 return -EINVAL;
2731 }
2732 /*
2733 * This mount option is just for testing, and it's not worthwhile to
2734 * implement the extra complexity (e.g. RCU protection) that would be
2735 * needed to allow it to be set or changed during remount. We do allow
2736 * it to be specified during remount, but only if there is no change.
2737 */
2738 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2739 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
2740 &ctx->dummy_enc_policy))
2741 return 0;
2742 ext4_msg(NULL, KERN_WARNING,
2743 "Can't set or change test_dummy_encryption on remount");
2744 return -EINVAL;
2745 }
2746 /* Also make sure s_mount_opts didn't contain a conflicting value. */
2747 if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) {
2748 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy,
2749 &ctx->dummy_enc_policy))
2750 return 0;
2751 ext4_msg(NULL, KERN_WARNING,
2752 "Conflicting test_dummy_encryption options");
2753 return -EINVAL;
2754 }
2755 return 0;
2756 }
2757
ext4_apply_test_dummy_encryption(struct ext4_fs_context * ctx,struct super_block * sb)2758 static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx,
2759 struct super_block *sb)
2760 {
2761 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) ||
2762 /* if already set, it was already verified to be the same */
2763 fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy))
2764 return;
2765 EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy;
2766 memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy));
2767 ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled");
2768 }
2769
ext4_check_opt_consistency(struct fs_context * fc,struct super_block * sb)2770 static int ext4_check_opt_consistency(struct fs_context *fc,
2771 struct super_block *sb)
2772 {
2773 struct ext4_fs_context *ctx = fc->fs_private;
2774 struct ext4_sb_info *sbi = fc->s_fs_info;
2775 int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
2776 int err;
2777
2778 if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
2779 ext4_msg(NULL, KERN_ERR,
2780 "Mount option(s) incompatible with ext2");
2781 return -EINVAL;
2782 }
2783 if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
2784 ext4_msg(NULL, KERN_ERR,
2785 "Mount option(s) incompatible with ext3");
2786 return -EINVAL;
2787 }
2788
2789 if (ctx->s_want_extra_isize >
2790 (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) {
2791 ext4_msg(NULL, KERN_ERR,
2792 "Invalid want_extra_isize %d",
2793 ctx->s_want_extra_isize);
2794 return -EINVAL;
2795 }
2796
2797 err = ext4_check_test_dummy_encryption(fc, sb);
2798 if (err)
2799 return err;
2800
2801 if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) {
2802 if (!sbi->s_journal) {
2803 ext4_msg(NULL, KERN_WARNING,
2804 "Remounting file system with no journal "
2805 "so ignoring journalled data option");
2806 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS);
2807 } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) !=
2808 test_opt(sb, DATA_FLAGS)) {
2809 ext4_msg(NULL, KERN_ERR, "Cannot change data mode "
2810 "on remount");
2811 return -EINVAL;
2812 }
2813 }
2814
2815 if (is_remount) {
2816 if (!sbi->s_journal &&
2817 ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) {
2818 ext4_msg(NULL, KERN_WARNING,
2819 "Remounting fs w/o journal so ignoring data_err option");
2820 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT);
2821 }
2822
2823 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
2824 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
2825 ext4_msg(NULL, KERN_ERR, "can't mount with "
2826 "both data=journal and dax");
2827 return -EINVAL;
2828 }
2829
2830 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) &&
2831 (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2832 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
2833 fail_dax_change_remount:
2834 ext4_msg(NULL, KERN_ERR, "can't change "
2835 "dax mount option while remounting");
2836 return -EINVAL;
2837 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) &&
2838 (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2839 (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) {
2840 goto fail_dax_change_remount;
2841 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) &&
2842 ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
2843 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
2844 !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) {
2845 goto fail_dax_change_remount;
2846 }
2847 }
2848
2849 return ext4_check_quota_consistency(fc, sb);
2850 }
2851
ext4_apply_options(struct fs_context * fc,struct super_block * sb)2852 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb)
2853 {
2854 struct ext4_fs_context *ctx = fc->fs_private;
2855 struct ext4_sb_info *sbi = fc->s_fs_info;
2856
2857 sbi->s_mount_opt &= ~ctx->mask_s_mount_opt;
2858 sbi->s_mount_opt |= ctx->vals_s_mount_opt;
2859 sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2;
2860 sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2;
2861 sb->s_flags &= ~ctx->mask_s_flags;
2862 sb->s_flags |= ctx->vals_s_flags;
2863
2864 #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; })
2865 APPLY(s_commit_interval);
2866 APPLY(s_stripe);
2867 APPLY(s_max_batch_time);
2868 APPLY(s_min_batch_time);
2869 APPLY(s_want_extra_isize);
2870 APPLY(s_inode_readahead_blks);
2871 APPLY(s_max_dir_size_kb);
2872 APPLY(s_li_wait_mult);
2873 APPLY(s_resgid);
2874 APPLY(s_resuid);
2875
2876 #ifdef CONFIG_EXT4_DEBUG
2877 APPLY(s_fc_debug_max_replay);
2878 #endif
2879
2880 ext4_apply_quota_options(fc, sb);
2881 ext4_apply_test_dummy_encryption(ctx, sb);
2882 }
2883
2884
ext4_validate_options(struct fs_context * fc)2885 static int ext4_validate_options(struct fs_context *fc)
2886 {
2887 #ifdef CONFIG_QUOTA
2888 struct ext4_fs_context *ctx = fc->fs_private;
2889 char *usr_qf_name, *grp_qf_name;
2890
2891 usr_qf_name = ctx->s_qf_names[USRQUOTA];
2892 grp_qf_name = ctx->s_qf_names[GRPQUOTA];
2893
2894 if (usr_qf_name || grp_qf_name) {
2895 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name)
2896 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA);
2897
2898 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name)
2899 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA);
2900
2901 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) ||
2902 ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) {
2903 ext4_msg(NULL, KERN_ERR, "old and new quota "
2904 "format mixing");
2905 return -EINVAL;
2906 }
2907 }
2908 #endif
2909 return 1;
2910 }
2911
ext4_show_quota_options(struct seq_file * seq,struct super_block * sb)2912 static inline void ext4_show_quota_options(struct seq_file *seq,
2913 struct super_block *sb)
2914 {
2915 #if defined(CONFIG_QUOTA)
2916 struct ext4_sb_info *sbi = EXT4_SB(sb);
2917 char *usr_qf_name, *grp_qf_name;
2918
2919 if (sbi->s_jquota_fmt) {
2920 char *fmtname = "";
2921
2922 switch (sbi->s_jquota_fmt) {
2923 case QFMT_VFS_OLD:
2924 fmtname = "vfsold";
2925 break;
2926 case QFMT_VFS_V0:
2927 fmtname = "vfsv0";
2928 break;
2929 case QFMT_VFS_V1:
2930 fmtname = "vfsv1";
2931 break;
2932 }
2933 seq_printf(seq, ",jqfmt=%s", fmtname);
2934 }
2935
2936 rcu_read_lock();
2937 usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
2938 grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
2939 if (usr_qf_name)
2940 seq_show_option(seq, "usrjquota", usr_qf_name);
2941 if (grp_qf_name)
2942 seq_show_option(seq, "grpjquota", grp_qf_name);
2943 rcu_read_unlock();
2944 #endif
2945 }
2946
token2str(int token)2947 static const char *token2str(int token)
2948 {
2949 const struct fs_parameter_spec *spec;
2950
2951 for (spec = ext4_param_specs; spec->name != NULL; spec++)
2952 if (spec->opt == token && !spec->type)
2953 break;
2954 return spec->name;
2955 }
2956
2957 /*
2958 * Show an option if
2959 * - it's set to a non-default value OR
2960 * - if the per-sb default is different from the global default
2961 */
_ext4_show_options(struct seq_file * seq,struct super_block * sb,int nodefs)2962 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2963 int nodefs)
2964 {
2965 struct ext4_sb_info *sbi = EXT4_SB(sb);
2966 struct ext4_super_block *es = sbi->s_es;
2967 int def_errors;
2968 const struct mount_opts *m;
2969 char sep = nodefs ? '\n' : ',';
2970
2971 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
2972 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2973
2974 if (sbi->s_sb_block != 1)
2975 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
2976
2977 for (m = ext4_mount_opts; m->token != Opt_err; m++) {
2978 int want_set = m->flags & MOPT_SET;
2979 int opt_2 = m->flags & MOPT_2;
2980 unsigned int mount_opt, def_mount_opt;
2981
2982 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
2983 m->flags & MOPT_SKIP)
2984 continue;
2985
2986 if (opt_2) {
2987 mount_opt = sbi->s_mount_opt2;
2988 def_mount_opt = sbi->s_def_mount_opt2;
2989 } else {
2990 mount_opt = sbi->s_mount_opt;
2991 def_mount_opt = sbi->s_def_mount_opt;
2992 }
2993 /* skip if same as the default */
2994 if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
2995 continue;
2996 /* select Opt_noFoo vs Opt_Foo */
2997 if ((want_set &&
2998 (mount_opt & m->mount_opt) != m->mount_opt) ||
2999 (!want_set && (mount_opt & m->mount_opt)))
3000 continue;
3001 SEQ_OPTS_PRINT("%s", token2str(m->token));
3002 }
3003
3004 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
3005 ext4_get_resuid(es) != EXT4_DEF_RESUID)
3006 SEQ_OPTS_PRINT("resuid=%u",
3007 from_kuid_munged(&init_user_ns, sbi->s_resuid));
3008 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
3009 ext4_get_resgid(es) != EXT4_DEF_RESGID)
3010 SEQ_OPTS_PRINT("resgid=%u",
3011 from_kgid_munged(&init_user_ns, sbi->s_resgid));
3012 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
3013 if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
3014 SEQ_OPTS_PUTS("errors=remount-ro");
3015 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
3016 SEQ_OPTS_PUTS("errors=continue");
3017 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
3018 SEQ_OPTS_PUTS("errors=panic");
3019 if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
3020 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
3021 if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
3022 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
3023 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
3024 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
3025 if (nodefs && sb->s_flags & SB_I_VERSION)
3026 SEQ_OPTS_PUTS("i_version");
3027 if (nodefs || sbi->s_stripe)
3028 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
3029 if (nodefs || EXT4_MOUNT_DATA_FLAGS &
3030 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
3031 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
3032 SEQ_OPTS_PUTS("data=journal");
3033 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
3034 SEQ_OPTS_PUTS("data=ordered");
3035 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
3036 SEQ_OPTS_PUTS("data=writeback");
3037 }
3038 if (nodefs ||
3039 sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
3040 SEQ_OPTS_PRINT("inode_readahead_blks=%u",
3041 sbi->s_inode_readahead_blks);
3042
3043 if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
3044 (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
3045 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
3046 if (nodefs || sbi->s_max_dir_size_kb)
3047 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
3048 if (test_opt(sb, DATA_ERR_ABORT))
3049 SEQ_OPTS_PUTS("data_err=abort");
3050
3051 fscrypt_show_test_dummy_encryption(seq, sep, sb);
3052
3053 if (sb->s_flags & SB_INLINECRYPT)
3054 SEQ_OPTS_PUTS("inlinecrypt");
3055
3056 if (test_opt(sb, DAX_ALWAYS)) {
3057 if (IS_EXT2_SB(sb))
3058 SEQ_OPTS_PUTS("dax");
3059 else
3060 SEQ_OPTS_PUTS("dax=always");
3061 } else if (test_opt2(sb, DAX_NEVER)) {
3062 SEQ_OPTS_PUTS("dax=never");
3063 } else if (test_opt2(sb, DAX_INODE)) {
3064 SEQ_OPTS_PUTS("dax=inode");
3065 }
3066
3067 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
3068 !test_opt2(sb, MB_OPTIMIZE_SCAN)) {
3069 SEQ_OPTS_PUTS("mb_optimize_scan=0");
3070 } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD &&
3071 test_opt2(sb, MB_OPTIMIZE_SCAN)) {
3072 SEQ_OPTS_PUTS("mb_optimize_scan=1");
3073 }
3074
3075 if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS))
3076 SEQ_OPTS_PUTS("prefetch_block_bitmaps");
3077
3078 if (ext4_emergency_ro(sb))
3079 SEQ_OPTS_PUTS("emergency_ro");
3080
3081 if (ext4_forced_shutdown(sb))
3082 SEQ_OPTS_PUTS("shutdown");
3083
3084 ext4_show_quota_options(seq, sb);
3085 return 0;
3086 }
3087
ext4_show_options(struct seq_file * seq,struct dentry * root)3088 static int ext4_show_options(struct seq_file *seq, struct dentry *root)
3089 {
3090 return _ext4_show_options(seq, root->d_sb, 0);
3091 }
3092
ext4_seq_options_show(struct seq_file * seq,void * offset)3093 int ext4_seq_options_show(struct seq_file *seq, void *offset)
3094 {
3095 struct super_block *sb = seq->private;
3096 int rc;
3097
3098 seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
3099 rc = _ext4_show_options(seq, sb, 1);
3100 seq_putc(seq, '\n');
3101 return rc;
3102 }
3103
ext4_setup_super(struct super_block * sb,struct ext4_super_block * es,int read_only)3104 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
3105 int read_only)
3106 {
3107 struct ext4_sb_info *sbi = EXT4_SB(sb);
3108 int err = 0;
3109
3110 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
3111 ext4_msg(sb, KERN_ERR, "revision level too high, "
3112 "forcing read-only mode");
3113 err = -EROFS;
3114 goto done;
3115 }
3116 if (read_only)
3117 goto done;
3118 if (!(sbi->s_mount_state & EXT4_VALID_FS))
3119 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
3120 "running e2fsck is recommended");
3121 else if (sbi->s_mount_state & EXT4_ERROR_FS)
3122 ext4_msg(sb, KERN_WARNING,
3123 "warning: mounting fs with errors, "
3124 "running e2fsck is recommended");
3125 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
3126 le16_to_cpu(es->s_mnt_count) >=
3127 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
3128 ext4_msg(sb, KERN_WARNING,
3129 "warning: maximal mount count reached, "
3130 "running e2fsck is recommended");
3131 else if (le32_to_cpu(es->s_checkinterval) &&
3132 (ext4_get_tstamp(es, s_lastcheck) +
3133 le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
3134 ext4_msg(sb, KERN_WARNING,
3135 "warning: checktime reached, "
3136 "running e2fsck is recommended");
3137 if (!sbi->s_journal)
3138 es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
3139 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
3140 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
3141 le16_add_cpu(&es->s_mnt_count, 1);
3142 ext4_update_tstamp(es, s_mtime);
3143 if (sbi->s_journal) {
3144 ext4_set_feature_journal_needs_recovery(sb);
3145 if (ext4_has_feature_orphan_file(sb))
3146 ext4_set_feature_orphan_present(sb);
3147 }
3148
3149 err = ext4_commit_super(sb);
3150 done:
3151 if (test_opt(sb, DEBUG))
3152 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
3153 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
3154 sb->s_blocksize,
3155 sbi->s_groups_count,
3156 EXT4_BLOCKS_PER_GROUP(sb),
3157 EXT4_INODES_PER_GROUP(sb),
3158 sbi->s_mount_opt, sbi->s_mount_opt2);
3159 return err;
3160 }
3161
ext4_alloc_flex_bg_array(struct super_block * sb,ext4_group_t ngroup)3162 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3163 {
3164 struct ext4_sb_info *sbi = EXT4_SB(sb);
3165 struct flex_groups **old_groups, **new_groups;
3166 int size, i, j;
3167
3168 if (!sbi->s_log_groups_per_flex)
3169 return 0;
3170
3171 size = ext4_flex_group(sbi, ngroup - 1) + 1;
3172 if (size <= sbi->s_flex_groups_allocated)
3173 return 0;
3174
3175 new_groups = kvzalloc(roundup_pow_of_two(size *
3176 sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
3177 if (!new_groups) {
3178 ext4_msg(sb, KERN_ERR,
3179 "not enough memory for %d flex group pointers", size);
3180 return -ENOMEM;
3181 }
3182 for (i = sbi->s_flex_groups_allocated; i < size; i++) {
3183 new_groups[i] = kvzalloc(roundup_pow_of_two(
3184 sizeof(struct flex_groups)),
3185 GFP_KERNEL);
3186 if (!new_groups[i]) {
3187 for (j = sbi->s_flex_groups_allocated; j < i; j++)
3188 kvfree(new_groups[j]);
3189 kvfree(new_groups);
3190 ext4_msg(sb, KERN_ERR,
3191 "not enough memory for %d flex groups", size);
3192 return -ENOMEM;
3193 }
3194 }
3195 rcu_read_lock();
3196 old_groups = rcu_dereference(sbi->s_flex_groups);
3197 if (old_groups)
3198 memcpy(new_groups, old_groups,
3199 (sbi->s_flex_groups_allocated *
3200 sizeof(struct flex_groups *)));
3201 rcu_read_unlock();
3202 rcu_assign_pointer(sbi->s_flex_groups, new_groups);
3203 sbi->s_flex_groups_allocated = size;
3204 if (old_groups)
3205 ext4_kvfree_array_rcu(old_groups);
3206 return 0;
3207 }
3208
ext4_fill_flex_info(struct super_block * sb)3209 static int ext4_fill_flex_info(struct super_block *sb)
3210 {
3211 struct ext4_sb_info *sbi = EXT4_SB(sb);
3212 struct ext4_group_desc *gdp = NULL;
3213 struct flex_groups *fg;
3214 ext4_group_t flex_group;
3215 int i, err;
3216
3217 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
3218 if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
3219 sbi->s_log_groups_per_flex = 0;
3220 return 1;
3221 }
3222
3223 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
3224 if (err)
3225 goto failed;
3226
3227 for (i = 0; i < sbi->s_groups_count; i++) {
3228 gdp = ext4_get_group_desc(sb, i, NULL);
3229
3230 flex_group = ext4_flex_group(sbi, i);
3231 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
3232 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
3233 atomic64_add(ext4_free_group_clusters(sb, gdp),
3234 &fg->free_clusters);
3235 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
3236 }
3237
3238 return 1;
3239 failed:
3240 return 0;
3241 }
3242
ext4_group_desc_csum(struct super_block * sb,__u32 block_group,struct ext4_group_desc * gdp)3243 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
3244 struct ext4_group_desc *gdp)
3245 {
3246 int offset = offsetof(struct ext4_group_desc, bg_checksum);
3247 __u16 crc = 0;
3248 __le32 le_group = cpu_to_le32(block_group);
3249 struct ext4_sb_info *sbi = EXT4_SB(sb);
3250
3251 if (ext4_has_feature_metadata_csum(sbi->s_sb)) {
3252 /* Use new metadata_csum algorithm */
3253 __u32 csum32;
3254 __u16 dummy_csum = 0;
3255
3256 csum32 = ext4_chksum(sbi->s_csum_seed, (__u8 *)&le_group,
3257 sizeof(le_group));
3258 csum32 = ext4_chksum(csum32, (__u8 *)gdp, offset);
3259 csum32 = ext4_chksum(csum32, (__u8 *)&dummy_csum,
3260 sizeof(dummy_csum));
3261 offset += sizeof(dummy_csum);
3262 if (offset < sbi->s_desc_size)
3263 csum32 = ext4_chksum(csum32, (__u8 *)gdp + offset,
3264 sbi->s_desc_size - offset);
3265
3266 crc = csum32 & 0xFFFF;
3267 goto out;
3268 }
3269
3270 /* old crc16 code */
3271 if (!ext4_has_feature_gdt_csum(sb))
3272 return 0;
3273
3274 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
3275 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
3276 crc = crc16(crc, (__u8 *)gdp, offset);
3277 offset += sizeof(gdp->bg_checksum); /* skip checksum */
3278 /* for checksum of struct ext4_group_desc do the rest...*/
3279 if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
3280 crc = crc16(crc, (__u8 *)gdp + offset,
3281 sbi->s_desc_size - offset);
3282
3283 out:
3284 return cpu_to_le16(crc);
3285 }
3286
ext4_group_desc_csum_verify(struct super_block * sb,__u32 block_group,struct ext4_group_desc * gdp)3287 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
3288 struct ext4_group_desc *gdp)
3289 {
3290 if (ext4_has_group_desc_csum(sb) &&
3291 (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
3292 return 0;
3293
3294 return 1;
3295 }
3296
ext4_group_desc_csum_set(struct super_block * sb,__u32 block_group,struct ext4_group_desc * gdp)3297 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
3298 struct ext4_group_desc *gdp)
3299 {
3300 if (!ext4_has_group_desc_csum(sb))
3301 return;
3302 gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
3303 }
3304
3305 /* Called at mount-time, super-block is locked */
ext4_check_descriptors(struct super_block * sb,ext4_fsblk_t sb_block,ext4_group_t * first_not_zeroed)3306 static int ext4_check_descriptors(struct super_block *sb,
3307 ext4_fsblk_t sb_block,
3308 ext4_group_t *first_not_zeroed)
3309 {
3310 struct ext4_sb_info *sbi = EXT4_SB(sb);
3311 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
3312 ext4_fsblk_t last_block;
3313 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
3314 ext4_fsblk_t block_bitmap;
3315 ext4_fsblk_t inode_bitmap;
3316 ext4_fsblk_t inode_table;
3317 int flexbg_flag = 0;
3318 ext4_group_t i, grp = sbi->s_groups_count;
3319
3320 if (ext4_has_feature_flex_bg(sb))
3321 flexbg_flag = 1;
3322
3323 ext4_debug("Checking group descriptors");
3324
3325 for (i = 0; i < sbi->s_groups_count; i++) {
3326 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
3327
3328 if (i == sbi->s_groups_count - 1 || flexbg_flag)
3329 last_block = ext4_blocks_count(sbi->s_es) - 1;
3330 else
3331 last_block = first_block +
3332 (EXT4_BLOCKS_PER_GROUP(sb) - 1);
3333
3334 if ((grp == sbi->s_groups_count) &&
3335 !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3336 grp = i;
3337
3338 block_bitmap = ext4_block_bitmap(sb, gdp);
3339 if (block_bitmap == sb_block) {
3340 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3341 "Block bitmap for group %u overlaps "
3342 "superblock", i);
3343 if (!sb_rdonly(sb))
3344 return 0;
3345 }
3346 if (block_bitmap >= sb_block + 1 &&
3347 block_bitmap <= last_bg_block) {
3348 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3349 "Block bitmap for group %u overlaps "
3350 "block group descriptors", i);
3351 if (!sb_rdonly(sb))
3352 return 0;
3353 }
3354 if (block_bitmap < first_block || block_bitmap > last_block) {
3355 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3356 "Block bitmap for group %u not in group "
3357 "(block %llu)!", i, block_bitmap);
3358 return 0;
3359 }
3360 inode_bitmap = ext4_inode_bitmap(sb, gdp);
3361 if (inode_bitmap == sb_block) {
3362 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3363 "Inode bitmap for group %u overlaps "
3364 "superblock", i);
3365 if (!sb_rdonly(sb))
3366 return 0;
3367 }
3368 if (inode_bitmap >= sb_block + 1 &&
3369 inode_bitmap <= last_bg_block) {
3370 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3371 "Inode bitmap for group %u overlaps "
3372 "block group descriptors", i);
3373 if (!sb_rdonly(sb))
3374 return 0;
3375 }
3376 if (inode_bitmap < first_block || inode_bitmap > last_block) {
3377 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3378 "Inode bitmap for group %u not in group "
3379 "(block %llu)!", i, inode_bitmap);
3380 return 0;
3381 }
3382 inode_table = ext4_inode_table(sb, gdp);
3383 if (inode_table == sb_block) {
3384 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3385 "Inode table for group %u overlaps "
3386 "superblock", i);
3387 if (!sb_rdonly(sb))
3388 return 0;
3389 }
3390 if (inode_table >= sb_block + 1 &&
3391 inode_table <= last_bg_block) {
3392 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3393 "Inode table for group %u overlaps "
3394 "block group descriptors", i);
3395 if (!sb_rdonly(sb))
3396 return 0;
3397 }
3398 if (inode_table < first_block ||
3399 inode_table + sbi->s_itb_per_group - 1 > last_block) {
3400 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3401 "Inode table for group %u not in group "
3402 "(block %llu)!", i, inode_table);
3403 return 0;
3404 }
3405 ext4_lock_group(sb, i);
3406 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
3407 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
3408 "Checksum for group %u failed (%u!=%u)",
3409 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
3410 gdp)), le16_to_cpu(gdp->bg_checksum));
3411 if (!sb_rdonly(sb)) {
3412 ext4_unlock_group(sb, i);
3413 return 0;
3414 }
3415 }
3416 ext4_unlock_group(sb, i);
3417 if (!flexbg_flag)
3418 first_block += EXT4_BLOCKS_PER_GROUP(sb);
3419 }
3420 if (NULL != first_not_zeroed)
3421 *first_not_zeroed = grp;
3422 return 1;
3423 }
3424
3425 /*
3426 * Maximal extent format file size.
3427 * Resulting logical blkno at s_maxbytes must fit in our on-disk
3428 * extent format containers, within a sector_t, and within i_blocks
3429 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
3430 * so that won't be a limiting factor.
3431 *
3432 * However there is other limiting factor. We do store extents in the form
3433 * of starting block and length, hence the resulting length of the extent
3434 * covering maximum file size must fit into on-disk format containers as
3435 * well. Given that length is always by 1 unit bigger than max unit (because
3436 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
3437 *
3438 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
3439 */
ext4_max_size(int blkbits,int has_huge_files)3440 static loff_t ext4_max_size(int blkbits, int has_huge_files)
3441 {
3442 loff_t res;
3443 loff_t upper_limit = MAX_LFS_FILESIZE;
3444
3445 BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
3446
3447 if (!has_huge_files) {
3448 upper_limit = (1LL << 32) - 1;
3449
3450 /* total blocks in file system block size */
3451 upper_limit >>= (blkbits - 9);
3452 upper_limit <<= blkbits;
3453 }
3454
3455 /*
3456 * 32-bit extent-start container, ee_block. We lower the maxbytes
3457 * by one fs block, so ee_len can cover the extent of maximum file
3458 * size
3459 */
3460 res = (1LL << 32) - 1;
3461 res <<= blkbits;
3462
3463 /* Sanity check against vm- & vfs- imposed limits */
3464 if (res > upper_limit)
3465 res = upper_limit;
3466
3467 return res;
3468 }
3469
3470 /*
3471 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect
3472 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
3473 * We need to be 1 filesystem block less than the 2^48 sector limit.
3474 */
ext4_max_bitmap_size(int bits,int has_huge_files)3475 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
3476 {
3477 loff_t upper_limit, res = EXT4_NDIR_BLOCKS;
3478 int meta_blocks;
3479 unsigned int ppb = 1 << (bits - 2);
3480
3481 /*
3482 * This is calculated to be the largest file size for a dense, block
3483 * mapped file such that the file's total number of 512-byte sectors,
3484 * including data and all indirect blocks, does not exceed (2^48 - 1).
3485 *
3486 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
3487 * number of 512-byte sectors of the file.
3488 */
3489 if (!has_huge_files) {
3490 /*
3491 * !has_huge_files or implies that the inode i_block field
3492 * represents total file blocks in 2^32 512-byte sectors ==
3493 * size of vfs inode i_blocks * 8
3494 */
3495 upper_limit = (1LL << 32) - 1;
3496
3497 /* total blocks in file system block size */
3498 upper_limit >>= (bits - 9);
3499
3500 } else {
3501 /*
3502 * We use 48 bit ext4_inode i_blocks
3503 * With EXT4_HUGE_FILE_FL set the i_blocks
3504 * represent total number of blocks in
3505 * file system block size
3506 */
3507 upper_limit = (1LL << 48) - 1;
3508
3509 }
3510
3511 /* Compute how many blocks we can address by block tree */
3512 res += ppb;
3513 res += ppb * ppb;
3514 res += ((loff_t)ppb) * ppb * ppb;
3515 /* Compute how many metadata blocks are needed */
3516 meta_blocks = 1;
3517 meta_blocks += 1 + ppb;
3518 meta_blocks += 1 + ppb + ppb * ppb;
3519 /* Does block tree limit file size? */
3520 if (res + meta_blocks <= upper_limit)
3521 goto check_lfs;
3522
3523 res = upper_limit;
3524 /* How many metadata blocks are needed for addressing upper_limit? */
3525 upper_limit -= EXT4_NDIR_BLOCKS;
3526 /* indirect blocks */
3527 meta_blocks = 1;
3528 upper_limit -= ppb;
3529 /* double indirect blocks */
3530 if (upper_limit < ppb * ppb) {
3531 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb);
3532 res -= meta_blocks;
3533 goto check_lfs;
3534 }
3535 meta_blocks += 1 + ppb;
3536 upper_limit -= ppb * ppb;
3537 /* tripple indirect blocks for the rest */
3538 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) +
3539 DIV_ROUND_UP_ULL(upper_limit, ppb*ppb);
3540 res -= meta_blocks;
3541 check_lfs:
3542 res <<= bits;
3543 if (res > MAX_LFS_FILESIZE)
3544 res = MAX_LFS_FILESIZE;
3545
3546 return res;
3547 }
3548
descriptor_loc(struct super_block * sb,ext4_fsblk_t logical_sb_block,int nr)3549 static ext4_fsblk_t descriptor_loc(struct super_block *sb,
3550 ext4_fsblk_t logical_sb_block, int nr)
3551 {
3552 struct ext4_sb_info *sbi = EXT4_SB(sb);
3553 ext4_group_t bg, first_meta_bg;
3554 int has_super = 0;
3555
3556 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
3557
3558 if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
3559 return logical_sb_block + nr + 1;
3560 bg = sbi->s_desc_per_block * nr;
3561 if (ext4_bg_has_super(sb, bg))
3562 has_super = 1;
3563
3564 /*
3565 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
3566 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled
3567 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
3568 * compensate.
3569 */
3570 if (sb->s_blocksize == 1024 && nr == 0 &&
3571 le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
3572 has_super++;
3573
3574 return (has_super + ext4_group_first_block_no(sb, bg));
3575 }
3576
3577 /**
3578 * ext4_get_stripe_size: Get the stripe size.
3579 * @sbi: In memory super block info
3580 *
3581 * If we have specified it via mount option, then
3582 * use the mount option value. If the value specified at mount time is
3583 * greater than the blocks per group use the super block value.
3584 * If the super block value is greater than blocks per group return 0.
3585 * Allocator needs it be less than blocks per group.
3586 *
3587 */
ext4_get_stripe_size(struct ext4_sb_info * sbi)3588 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
3589 {
3590 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
3591 unsigned long stripe_width =
3592 le32_to_cpu(sbi->s_es->s_raid_stripe_width);
3593 int ret;
3594
3595 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
3596 ret = sbi->s_stripe;
3597 else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
3598 ret = stripe_width;
3599 else if (stride && stride <= sbi->s_blocks_per_group)
3600 ret = stride;
3601 else
3602 ret = 0;
3603
3604 /*
3605 * If the stripe width is 1, this makes no sense and
3606 * we set it to 0 to turn off stripe handling code.
3607 */
3608 if (ret <= 1)
3609 ret = 0;
3610
3611 return ret;
3612 }
3613
3614 /*
3615 * Check whether this filesystem can be mounted based on
3616 * the features present and the RDONLY/RDWR mount requested.
3617 * Returns 1 if this filesystem can be mounted as requested,
3618 * 0 if it cannot be.
3619 */
ext4_feature_set_ok(struct super_block * sb,int readonly)3620 int ext4_feature_set_ok(struct super_block *sb, int readonly)
3621 {
3622 if (ext4_has_unknown_ext4_incompat_features(sb)) {
3623 ext4_msg(sb, KERN_ERR,
3624 "Couldn't mount because of "
3625 "unsupported optional features (%x)",
3626 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
3627 ~EXT4_FEATURE_INCOMPAT_SUPP));
3628 return 0;
3629 }
3630
3631 if (!IS_ENABLED(CONFIG_UNICODE) && ext4_has_feature_casefold(sb)) {
3632 ext4_msg(sb, KERN_ERR,
3633 "Filesystem with casefold feature cannot be "
3634 "mounted without CONFIG_UNICODE");
3635 return 0;
3636 }
3637
3638 if (readonly)
3639 return 1;
3640
3641 if (ext4_has_feature_readonly(sb)) {
3642 ext4_msg(sb, KERN_INFO, "filesystem is read-only");
3643 sb->s_flags |= SB_RDONLY;
3644 return 1;
3645 }
3646
3647 /* Check that feature set is OK for a read-write mount */
3648 if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
3649 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
3650 "unsupported optional features (%x)",
3651 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
3652 ~EXT4_FEATURE_RO_COMPAT_SUPP));
3653 return 0;
3654 }
3655 if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
3656 ext4_msg(sb, KERN_ERR,
3657 "Can't support bigalloc feature without "
3658 "extents feature\n");
3659 return 0;
3660 }
3661 if (ext4_has_feature_bigalloc(sb) &&
3662 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
3663 ext4_msg(sb, KERN_WARNING,
3664 "bad geometry: bigalloc file system with non-zero "
3665 "first_data_block\n");
3666 return 0;
3667 }
3668
3669 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
3670 if (!readonly && (ext4_has_feature_quota(sb) ||
3671 ext4_has_feature_project(sb))) {
3672 ext4_msg(sb, KERN_ERR,
3673 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
3674 return 0;
3675 }
3676 #endif /* CONFIG_QUOTA */
3677 return 1;
3678 }
3679
3680 /*
3681 * This function is called once a day by default if we have errors logged
3682 * on the file system.
3683 * Use the err_report_sec sysfs attribute to disable or adjust its call
3684 * freequency.
3685 */
print_daily_error_info(struct timer_list * t)3686 void print_daily_error_info(struct timer_list *t)
3687 {
3688 struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report);
3689 struct super_block *sb = sbi->s_sb;
3690 struct ext4_super_block *es = sbi->s_es;
3691
3692 if (es->s_error_count)
3693 /* fsck newer than v1.41.13 is needed to clean this condition. */
3694 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
3695 le32_to_cpu(es->s_error_count));
3696 if (es->s_first_error_time) {
3697 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
3698 sb->s_id,
3699 ext4_get_tstamp(es, s_first_error_time),
3700 (int) sizeof(es->s_first_error_func),
3701 es->s_first_error_func,
3702 le32_to_cpu(es->s_first_error_line));
3703 if (es->s_first_error_ino)
3704 printk(KERN_CONT ": inode %u",
3705 le32_to_cpu(es->s_first_error_ino));
3706 if (es->s_first_error_block)
3707 printk(KERN_CONT ": block %llu", (unsigned long long)
3708 le64_to_cpu(es->s_first_error_block));
3709 printk(KERN_CONT "\n");
3710 }
3711 if (es->s_last_error_time) {
3712 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
3713 sb->s_id,
3714 ext4_get_tstamp(es, s_last_error_time),
3715 (int) sizeof(es->s_last_error_func),
3716 es->s_last_error_func,
3717 le32_to_cpu(es->s_last_error_line));
3718 if (es->s_last_error_ino)
3719 printk(KERN_CONT ": inode %u",
3720 le32_to_cpu(es->s_last_error_ino));
3721 if (es->s_last_error_block)
3722 printk(KERN_CONT ": block %llu", (unsigned long long)
3723 le64_to_cpu(es->s_last_error_block));
3724 printk(KERN_CONT "\n");
3725 }
3726
3727 if (sbi->s_err_report_sec)
3728 mod_timer(&sbi->s_err_report, jiffies + secs_to_jiffies(sbi->s_err_report_sec));
3729 }
3730
3731 /* Find next suitable group and run ext4_init_inode_table */
ext4_run_li_request(struct ext4_li_request * elr)3732 static int ext4_run_li_request(struct ext4_li_request *elr)
3733 {
3734 struct ext4_group_desc *gdp = NULL;
3735 struct super_block *sb = elr->lr_super;
3736 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
3737 ext4_group_t group = elr->lr_next_group;
3738 unsigned int prefetch_ios = 0;
3739 int ret = 0;
3740 int nr = EXT4_SB(sb)->s_mb_prefetch;
3741 u64 start_time;
3742
3743 if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
3744 elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios);
3745 ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr);
3746 trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr);
3747 if (group >= elr->lr_next_group) {
3748 ret = 1;
3749 if (elr->lr_first_not_zeroed != ngroups &&
3750 !ext4_emergency_state(sb) && !sb_rdonly(sb) &&
3751 test_opt(sb, INIT_INODE_TABLE)) {
3752 elr->lr_next_group = elr->lr_first_not_zeroed;
3753 elr->lr_mode = EXT4_LI_MODE_ITABLE;
3754 ret = 0;
3755 }
3756 }
3757 return ret;
3758 }
3759
3760 for (; group < ngroups; group++) {
3761 gdp = ext4_get_group_desc(sb, group, NULL);
3762 if (!gdp) {
3763 ret = 1;
3764 break;
3765 }
3766
3767 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3768 break;
3769 }
3770
3771 if (group >= ngroups)
3772 ret = 1;
3773
3774 if (!ret) {
3775 start_time = ktime_get_ns();
3776 ret = ext4_init_inode_table(sb, group,
3777 elr->lr_timeout ? 0 : 1);
3778 trace_ext4_lazy_itable_init(sb, group);
3779 if (elr->lr_timeout == 0) {
3780 elr->lr_timeout = nsecs_to_jiffies((ktime_get_ns() - start_time) *
3781 EXT4_SB(elr->lr_super)->s_li_wait_mult);
3782 }
3783 elr->lr_next_sched = jiffies + elr->lr_timeout;
3784 elr->lr_next_group = group + 1;
3785 }
3786 return ret;
3787 }
3788
3789 /*
3790 * Remove lr_request from the list_request and free the
3791 * request structure. Should be called with li_list_mtx held
3792 */
ext4_remove_li_request(struct ext4_li_request * elr)3793 static void ext4_remove_li_request(struct ext4_li_request *elr)
3794 {
3795 if (!elr)
3796 return;
3797
3798 list_del(&elr->lr_request);
3799 EXT4_SB(elr->lr_super)->s_li_request = NULL;
3800 kfree(elr);
3801 }
3802
ext4_unregister_li_request(struct super_block * sb)3803 static void ext4_unregister_li_request(struct super_block *sb)
3804 {
3805 mutex_lock(&ext4_li_mtx);
3806 if (!ext4_li_info) {
3807 mutex_unlock(&ext4_li_mtx);
3808 return;
3809 }
3810
3811 mutex_lock(&ext4_li_info->li_list_mtx);
3812 ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
3813 mutex_unlock(&ext4_li_info->li_list_mtx);
3814 mutex_unlock(&ext4_li_mtx);
3815 }
3816
3817 static struct task_struct *ext4_lazyinit_task;
3818
3819 /*
3820 * This is the function where ext4lazyinit thread lives. It walks
3821 * through the request list searching for next scheduled filesystem.
3822 * When such a fs is found, run the lazy initialization request
3823 * (ext4_rn_li_request) and keep track of the time spend in this
3824 * function. Based on that time we compute next schedule time of
3825 * the request. When walking through the list is complete, compute
3826 * next waking time and put itself into sleep.
3827 */
ext4_lazyinit_thread(void * arg)3828 static int ext4_lazyinit_thread(void *arg)
3829 {
3830 struct ext4_lazy_init *eli = arg;
3831 struct list_head *pos, *n;
3832 struct ext4_li_request *elr;
3833 unsigned long next_wakeup, cur;
3834
3835 BUG_ON(NULL == eli);
3836 set_freezable();
3837
3838 cont_thread:
3839 while (true) {
3840 bool next_wakeup_initialized = false;
3841
3842 next_wakeup = 0;
3843 mutex_lock(&eli->li_list_mtx);
3844 if (list_empty(&eli->li_request_list)) {
3845 mutex_unlock(&eli->li_list_mtx);
3846 goto exit_thread;
3847 }
3848 list_for_each_safe(pos, n, &eli->li_request_list) {
3849 int err = 0;
3850 int progress = 0;
3851 elr = list_entry(pos, struct ext4_li_request,
3852 lr_request);
3853
3854 if (time_before(jiffies, elr->lr_next_sched)) {
3855 if (!next_wakeup_initialized ||
3856 time_before(elr->lr_next_sched, next_wakeup)) {
3857 next_wakeup = elr->lr_next_sched;
3858 next_wakeup_initialized = true;
3859 }
3860 continue;
3861 }
3862 if (down_read_trylock(&elr->lr_super->s_umount)) {
3863 if (sb_start_write_trylock(elr->lr_super)) {
3864 progress = 1;
3865 /*
3866 * We hold sb->s_umount, sb can not
3867 * be removed from the list, it is
3868 * now safe to drop li_list_mtx
3869 */
3870 mutex_unlock(&eli->li_list_mtx);
3871 err = ext4_run_li_request(elr);
3872 sb_end_write(elr->lr_super);
3873 mutex_lock(&eli->li_list_mtx);
3874 n = pos->next;
3875 }
3876 up_read((&elr->lr_super->s_umount));
3877 }
3878 /* error, remove the lazy_init job */
3879 if (err) {
3880 ext4_remove_li_request(elr);
3881 continue;
3882 }
3883 if (!progress) {
3884 elr->lr_next_sched = jiffies +
3885 get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
3886 }
3887 if (!next_wakeup_initialized ||
3888 time_before(elr->lr_next_sched, next_wakeup)) {
3889 next_wakeup = elr->lr_next_sched;
3890 next_wakeup_initialized = true;
3891 }
3892 }
3893 mutex_unlock(&eli->li_list_mtx);
3894
3895 try_to_freeze();
3896
3897 cur = jiffies;
3898 if (!next_wakeup_initialized || time_after_eq(cur, next_wakeup)) {
3899 cond_resched();
3900 continue;
3901 }
3902
3903 schedule_timeout_interruptible(next_wakeup - cur);
3904
3905 if (kthread_should_stop()) {
3906 ext4_clear_request_list();
3907 goto exit_thread;
3908 }
3909 }
3910
3911 exit_thread:
3912 /*
3913 * It looks like the request list is empty, but we need
3914 * to check it under the li_list_mtx lock, to prevent any
3915 * additions into it, and of course we should lock ext4_li_mtx
3916 * to atomically free the list and ext4_li_info, because at
3917 * this point another ext4 filesystem could be registering
3918 * new one.
3919 */
3920 mutex_lock(&ext4_li_mtx);
3921 mutex_lock(&eli->li_list_mtx);
3922 if (!list_empty(&eli->li_request_list)) {
3923 mutex_unlock(&eli->li_list_mtx);
3924 mutex_unlock(&ext4_li_mtx);
3925 goto cont_thread;
3926 }
3927 mutex_unlock(&eli->li_list_mtx);
3928 kfree(ext4_li_info);
3929 ext4_li_info = NULL;
3930 mutex_unlock(&ext4_li_mtx);
3931
3932 return 0;
3933 }
3934
ext4_clear_request_list(void)3935 static void ext4_clear_request_list(void)
3936 {
3937 struct list_head *pos, *n;
3938 struct ext4_li_request *elr;
3939
3940 mutex_lock(&ext4_li_info->li_list_mtx);
3941 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
3942 elr = list_entry(pos, struct ext4_li_request,
3943 lr_request);
3944 ext4_remove_li_request(elr);
3945 }
3946 mutex_unlock(&ext4_li_info->li_list_mtx);
3947 }
3948
ext4_run_lazyinit_thread(void)3949 static int ext4_run_lazyinit_thread(void)
3950 {
3951 ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
3952 ext4_li_info, "ext4lazyinit");
3953 if (IS_ERR(ext4_lazyinit_task)) {
3954 int err = PTR_ERR(ext4_lazyinit_task);
3955 ext4_clear_request_list();
3956 kfree(ext4_li_info);
3957 ext4_li_info = NULL;
3958 printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3959 "initialization thread\n",
3960 err);
3961 return err;
3962 }
3963 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
3964 return 0;
3965 }
3966
3967 /*
3968 * Check whether it make sense to run itable init. thread or not.
3969 * If there is at least one uninitialized inode table, return
3970 * corresponding group number, else the loop goes through all
3971 * groups and return total number of groups.
3972 */
ext4_has_uninit_itable(struct super_block * sb)3973 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3974 {
3975 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3976 struct ext4_group_desc *gdp = NULL;
3977
3978 if (!ext4_has_group_desc_csum(sb))
3979 return ngroups;
3980
3981 for (group = 0; group < ngroups; group++) {
3982 gdp = ext4_get_group_desc(sb, group, NULL);
3983 if (!gdp)
3984 continue;
3985
3986 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3987 break;
3988 }
3989
3990 return group;
3991 }
3992
ext4_li_info_new(void)3993 static int ext4_li_info_new(void)
3994 {
3995 struct ext4_lazy_init *eli = NULL;
3996
3997 eli = kzalloc_obj(*eli);
3998 if (!eli)
3999 return -ENOMEM;
4000
4001 INIT_LIST_HEAD(&eli->li_request_list);
4002 mutex_init(&eli->li_list_mtx);
4003
4004 eli->li_state |= EXT4_LAZYINIT_QUIT;
4005
4006 ext4_li_info = eli;
4007
4008 return 0;
4009 }
4010
ext4_li_request_new(struct super_block * sb,ext4_group_t start)4011 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
4012 ext4_group_t start)
4013 {
4014 struct ext4_li_request *elr;
4015
4016 elr = kzalloc_obj(*elr);
4017 if (!elr)
4018 return NULL;
4019
4020 elr->lr_super = sb;
4021 elr->lr_first_not_zeroed = start;
4022 if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) {
4023 elr->lr_mode = EXT4_LI_MODE_ITABLE;
4024 elr->lr_next_group = start;
4025 } else {
4026 elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP;
4027 }
4028
4029 /*
4030 * Randomize first schedule time of the request to
4031 * spread the inode table initialization requests
4032 * better.
4033 */
4034 elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
4035 return elr;
4036 }
4037
ext4_register_li_request(struct super_block * sb,ext4_group_t first_not_zeroed)4038 int ext4_register_li_request(struct super_block *sb,
4039 ext4_group_t first_not_zeroed)
4040 {
4041 struct ext4_sb_info *sbi = EXT4_SB(sb);
4042 struct ext4_li_request *elr = NULL;
4043 ext4_group_t ngroups = sbi->s_groups_count;
4044 int ret = 0;
4045
4046 mutex_lock(&ext4_li_mtx);
4047 if (sbi->s_li_request != NULL) {
4048 /*
4049 * Reset timeout so it can be computed again, because
4050 * s_li_wait_mult might have changed.
4051 */
4052 sbi->s_li_request->lr_timeout = 0;
4053 goto out;
4054 }
4055
4056 if (ext4_emergency_state(sb) || sb_rdonly(sb) ||
4057 (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) &&
4058 (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE))))
4059 goto out;
4060
4061 elr = ext4_li_request_new(sb, first_not_zeroed);
4062 if (!elr) {
4063 ret = -ENOMEM;
4064 goto out;
4065 }
4066
4067 if (NULL == ext4_li_info) {
4068 ret = ext4_li_info_new();
4069 if (ret)
4070 goto out;
4071 }
4072
4073 mutex_lock(&ext4_li_info->li_list_mtx);
4074 list_add(&elr->lr_request, &ext4_li_info->li_request_list);
4075 mutex_unlock(&ext4_li_info->li_list_mtx);
4076
4077 sbi->s_li_request = elr;
4078 /*
4079 * set elr to NULL here since it has been inserted to
4080 * the request_list and the removal and free of it is
4081 * handled by ext4_clear_request_list from now on.
4082 */
4083 elr = NULL;
4084
4085 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
4086 ret = ext4_run_lazyinit_thread();
4087 if (ret)
4088 goto out;
4089 }
4090 out:
4091 mutex_unlock(&ext4_li_mtx);
4092 if (ret)
4093 kfree(elr);
4094 return ret;
4095 }
4096
4097 /*
4098 * We do not need to lock anything since this is called on
4099 * module unload.
4100 */
ext4_destroy_lazyinit_thread(void)4101 static void ext4_destroy_lazyinit_thread(void)
4102 {
4103 /*
4104 * If thread exited earlier
4105 * there's nothing to be done.
4106 */
4107 if (!ext4_li_info || !ext4_lazyinit_task)
4108 return;
4109
4110 kthread_stop(ext4_lazyinit_task);
4111 }
4112
set_journal_csum_feature_set(struct super_block * sb)4113 static int set_journal_csum_feature_set(struct super_block *sb)
4114 {
4115 int ret = 1;
4116 int compat, incompat;
4117 struct ext4_sb_info *sbi = EXT4_SB(sb);
4118
4119 if (ext4_has_feature_metadata_csum(sb)) {
4120 /* journal checksum v3 */
4121 compat = 0;
4122 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
4123 } else {
4124 /* journal checksum v1 */
4125 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
4126 incompat = 0;
4127 }
4128
4129 jbd2_journal_clear_features(sbi->s_journal,
4130 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
4131 JBD2_FEATURE_INCOMPAT_CSUM_V3 |
4132 JBD2_FEATURE_INCOMPAT_CSUM_V2);
4133 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4134 ret = jbd2_journal_set_features(sbi->s_journal,
4135 compat, 0,
4136 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
4137 incompat);
4138 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
4139 ret = jbd2_journal_set_features(sbi->s_journal,
4140 compat, 0,
4141 incompat);
4142 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
4143 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
4144 } else {
4145 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
4146 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
4147 }
4148
4149 return ret;
4150 }
4151
4152 /*
4153 * Note: calculating the overhead so we can be compatible with
4154 * historical BSD practice is quite difficult in the face of
4155 * clusters/bigalloc. This is because multiple metadata blocks from
4156 * different block group can end up in the same allocation cluster.
4157 * Calculating the exact overhead in the face of clustered allocation
4158 * requires either O(all block bitmaps) in memory or O(number of block
4159 * groups**2) in time. We will still calculate the superblock for
4160 * older file systems --- and if we come across with a bigalloc file
4161 * system with zero in s_overhead_clusters the estimate will be close to
4162 * correct especially for very large cluster sizes --- but for newer
4163 * file systems, it's better to calculate this figure once at mkfs
4164 * time, and store it in the superblock. If the superblock value is
4165 * present (even for non-bigalloc file systems), we will use it.
4166 */
count_overhead(struct super_block * sb,ext4_group_t grp,char * buf)4167 static int count_overhead(struct super_block *sb, ext4_group_t grp,
4168 char *buf)
4169 {
4170 struct ext4_sb_info *sbi = EXT4_SB(sb);
4171 struct ext4_group_desc *gdp;
4172 ext4_fsblk_t first_block, last_block, b;
4173 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4174 int s, j, count = 0;
4175 int has_super = ext4_bg_has_super(sb, grp);
4176
4177 if (!ext4_has_feature_bigalloc(sb))
4178 return (has_super + ext4_bg_num_gdb(sb, grp) +
4179 (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
4180 sbi->s_itb_per_group + 2);
4181
4182 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
4183 (grp * EXT4_BLOCKS_PER_GROUP(sb));
4184 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
4185 for (i = 0; i < ngroups; i++) {
4186 gdp = ext4_get_group_desc(sb, i, NULL);
4187 b = ext4_block_bitmap(sb, gdp);
4188 if (b >= first_block && b <= last_block) {
4189 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
4190 count++;
4191 }
4192 b = ext4_inode_bitmap(sb, gdp);
4193 if (b >= first_block && b <= last_block) {
4194 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
4195 count++;
4196 }
4197 b = ext4_inode_table(sb, gdp);
4198 if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
4199 for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
4200 int c = EXT4_B2C(sbi, b - first_block);
4201 ext4_set_bit(c, buf);
4202 count++;
4203 }
4204 if (i != grp)
4205 continue;
4206 s = 0;
4207 if (ext4_bg_has_super(sb, grp)) {
4208 ext4_set_bit(s++, buf);
4209 count++;
4210 }
4211 j = ext4_bg_num_gdb(sb, grp);
4212 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
4213 ext4_error(sb, "Invalid number of block group "
4214 "descriptor blocks: %d", j);
4215 j = EXT4_BLOCKS_PER_GROUP(sb) - s;
4216 }
4217 count += j;
4218 for (; j > 0; j--)
4219 ext4_set_bit(EXT4_B2C(sbi, s++), buf);
4220 }
4221 if (!count)
4222 return 0;
4223 return EXT4_CLUSTERS_PER_GROUP(sb) -
4224 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
4225 }
4226
4227 /*
4228 * Compute the overhead and stash it in sbi->s_overhead
4229 */
ext4_calculate_overhead(struct super_block * sb)4230 int ext4_calculate_overhead(struct super_block *sb)
4231 {
4232 struct ext4_sb_info *sbi = EXT4_SB(sb);
4233 struct ext4_super_block *es = sbi->s_es;
4234 struct inode *j_inode;
4235 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
4236 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
4237 ext4_fsblk_t overhead = 0;
4238 char *buf = kvmalloc(sb->s_blocksize, GFP_NOFS | __GFP_ZERO);
4239
4240 if (!buf)
4241 return -ENOMEM;
4242
4243 /*
4244 * Compute the overhead (FS structures). This is constant
4245 * for a given filesystem unless the number of block groups
4246 * changes so we cache the previous value until it does.
4247 */
4248
4249 /*
4250 * All of the blocks before first_data_block are overhead
4251 */
4252 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
4253
4254 /*
4255 * Add the overhead found in each block group
4256 */
4257 for (i = 0; i < ngroups; i++) {
4258 int blks;
4259
4260 blks = count_overhead(sb, i, buf);
4261 overhead += blks;
4262 if (blks)
4263 memset(buf, 0, sb->s_blocksize);
4264 cond_resched();
4265 }
4266
4267 /*
4268 * Add the internal journal blocks whether the journal has been
4269 * loaded or not
4270 */
4271 if (sbi->s_journal && !sbi->s_journal_bdev_file)
4272 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
4273 else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
4274 /* j_inum for internal journal is non-zero */
4275 j_inode = ext4_get_journal_inode(sb, j_inum);
4276 if (!IS_ERR(j_inode)) {
4277 j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
4278 overhead += EXT4_NUM_B2C(sbi, j_blocks);
4279 iput(j_inode);
4280 } else {
4281 ext4_msg(sb, KERN_ERR, "can't get journal size");
4282 }
4283 }
4284 sbi->s_overhead = overhead;
4285 smp_wmb();
4286 kvfree(buf);
4287 return 0;
4288 }
4289
ext4_set_resv_clusters(struct super_block * sb)4290 static void ext4_set_resv_clusters(struct super_block *sb)
4291 {
4292 ext4_fsblk_t resv_clusters;
4293 struct ext4_sb_info *sbi = EXT4_SB(sb);
4294
4295 /*
4296 * There's no need to reserve anything when we aren't using extents.
4297 * The space estimates are exact, there are no unwritten extents,
4298 * hole punching doesn't need new metadata... This is needed especially
4299 * to keep ext2/3 backward compatibility.
4300 */
4301 if (!ext4_has_feature_extents(sb))
4302 return;
4303 /*
4304 * By default we reserve 2% or 4096 clusters, whichever is smaller.
4305 * This should cover the situations where we can not afford to run
4306 * out of space like for example punch hole, or converting
4307 * unwritten extents in delalloc path. In most cases such
4308 * allocation would require 1, or 2 blocks, higher numbers are
4309 * very rare.
4310 */
4311 resv_clusters = (ext4_blocks_count(sbi->s_es) >>
4312 sbi->s_cluster_bits);
4313
4314 do_div(resv_clusters, 50);
4315 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
4316
4317 atomic64_set(&sbi->s_resv_clusters, resv_clusters);
4318 }
4319
ext4_quota_mode(struct super_block * sb)4320 static const char *ext4_quota_mode(struct super_block *sb)
4321 {
4322 #ifdef CONFIG_QUOTA
4323 if (!ext4_quota_capable(sb))
4324 return "none";
4325
4326 if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb))
4327 return "journalled";
4328 else
4329 return "writeback";
4330 #else
4331 return "disabled";
4332 #endif
4333 }
4334
ext4_setup_csum_trigger(struct super_block * sb,enum ext4_journal_trigger_type type,void (* trigger)(struct jbd2_buffer_trigger_type * type,struct buffer_head * bh,void * mapped_data,size_t size))4335 static void ext4_setup_csum_trigger(struct super_block *sb,
4336 enum ext4_journal_trigger_type type,
4337 void (*trigger)(
4338 struct jbd2_buffer_trigger_type *type,
4339 struct buffer_head *bh,
4340 void *mapped_data,
4341 size_t size))
4342 {
4343 struct ext4_sb_info *sbi = EXT4_SB(sb);
4344
4345 sbi->s_journal_triggers[type].sb = sb;
4346 sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger;
4347 }
4348
ext4_free_sbi(struct ext4_sb_info * sbi)4349 static void ext4_free_sbi(struct ext4_sb_info *sbi)
4350 {
4351 if (!sbi)
4352 return;
4353
4354 kfree(sbi->s_blockgroup_lock);
4355 fs_put_dax(sbi->s_daxdev, NULL);
4356 kfree(sbi);
4357 }
4358
ext4_alloc_sbi(struct super_block * sb)4359 static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
4360 {
4361 struct ext4_sb_info *sbi;
4362
4363 sbi = kzalloc_obj(*sbi);
4364 if (!sbi)
4365 return NULL;
4366
4367 sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off,
4368 NULL, NULL);
4369
4370 sbi->s_blockgroup_lock =
4371 kzalloc_obj(struct blockgroup_lock);
4372
4373 if (!sbi->s_blockgroup_lock)
4374 goto err_out;
4375
4376 sb->s_fs_info = sbi;
4377 sbi->s_sb = sb;
4378 return sbi;
4379 err_out:
4380 fs_put_dax(sbi->s_daxdev, NULL);
4381 kfree(sbi);
4382 return NULL;
4383 }
4384
ext4_set_def_opts(struct super_block * sb,struct ext4_super_block * es)4385 static void ext4_set_def_opts(struct super_block *sb,
4386 struct ext4_super_block *es)
4387 {
4388 unsigned long def_mount_opts;
4389
4390 /* Set defaults before we parse the mount options */
4391 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
4392 set_opt(sb, INIT_INODE_TABLE);
4393 if (def_mount_opts & EXT4_DEFM_DEBUG)
4394 set_opt(sb, DEBUG);
4395 if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
4396 set_opt(sb, GRPID);
4397 if (def_mount_opts & EXT4_DEFM_UID16)
4398 set_opt(sb, NO_UID32);
4399 /* xattr user namespace & acls are now defaulted on */
4400 set_opt(sb, XATTR_USER);
4401 #ifdef CONFIG_EXT4_FS_POSIX_ACL
4402 set_opt(sb, POSIX_ACL);
4403 #endif
4404 if (ext4_has_feature_fast_commit(sb))
4405 set_opt2(sb, JOURNAL_FAST_COMMIT);
4406 /* don't forget to enable journal_csum when metadata_csum is enabled. */
4407 if (ext4_has_feature_metadata_csum(sb))
4408 set_opt(sb, JOURNAL_CHECKSUM);
4409
4410 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
4411 set_opt(sb, JOURNAL_DATA);
4412 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
4413 set_opt(sb, ORDERED_DATA);
4414 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
4415 set_opt(sb, WRITEBACK_DATA);
4416
4417 if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC)
4418 set_opt(sb, ERRORS_PANIC);
4419 else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE)
4420 set_opt(sb, ERRORS_CONT);
4421 else
4422 set_opt(sb, ERRORS_RO);
4423 /* block_validity enabled by default; disable with noblock_validity */
4424 set_opt(sb, BLOCK_VALIDITY);
4425 if (def_mount_opts & EXT4_DEFM_DISCARD)
4426 set_opt(sb, DISCARD);
4427
4428 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
4429 set_opt(sb, BARRIER);
4430
4431 /*
4432 * enable delayed allocation by default
4433 * Use -o nodelalloc to turn it off
4434 */
4435 if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
4436 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
4437 set_opt(sb, DELALLOC);
4438
4439 set_opt(sb, DIOREAD_NOLOCK);
4440 }
4441
ext4_handle_clustersize(struct super_block * sb)4442 static int ext4_handle_clustersize(struct super_block *sb)
4443 {
4444 struct ext4_sb_info *sbi = EXT4_SB(sb);
4445 struct ext4_super_block *es = sbi->s_es;
4446 int clustersize;
4447
4448 /* Handle clustersize */
4449 clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
4450 if (ext4_has_feature_bigalloc(sb)) {
4451 if (clustersize < sb->s_blocksize) {
4452 ext4_msg(sb, KERN_ERR,
4453 "cluster size (%d) smaller than "
4454 "block size (%lu)", clustersize, sb->s_blocksize);
4455 return -EINVAL;
4456 }
4457 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
4458 le32_to_cpu(es->s_log_block_size);
4459 } else {
4460 if (clustersize != sb->s_blocksize) {
4461 ext4_msg(sb, KERN_ERR,
4462 "fragment/cluster size (%d) != "
4463 "block size (%lu)", clustersize, sb->s_blocksize);
4464 return -EINVAL;
4465 }
4466 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
4467 ext4_msg(sb, KERN_ERR,
4468 "#blocks per group too big: %lu",
4469 sbi->s_blocks_per_group);
4470 return -EINVAL;
4471 }
4472 sbi->s_cluster_bits = 0;
4473 }
4474 sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group);
4475 if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
4476 ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu",
4477 sbi->s_clusters_per_group);
4478 return -EINVAL;
4479 }
4480 if (sbi->s_blocks_per_group !=
4481 (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
4482 ext4_msg(sb, KERN_ERR,
4483 "blocks per group (%lu) and clusters per group (%lu) inconsistent",
4484 sbi->s_blocks_per_group, sbi->s_clusters_per_group);
4485 return -EINVAL;
4486 }
4487 sbi->s_cluster_ratio = clustersize / sb->s_blocksize;
4488
4489 /* Do we have standard group size of clustersize * 8 blocks ? */
4490 if (sbi->s_blocks_per_group == clustersize << 3)
4491 set_opt2(sb, STD_GROUP_SIZE);
4492
4493 return 0;
4494 }
4495
4496 /*
4497 * ext4_atomic_write_init: Initializes filesystem min & max atomic write units.
4498 * With non-bigalloc filesystem awu will be based upon filesystem blocksize
4499 * & bdev awu units.
4500 * With bigalloc it will be based upon bigalloc cluster size & bdev awu units.
4501 * @sb: super block
4502 */
ext4_atomic_write_init(struct super_block * sb)4503 static void ext4_atomic_write_init(struct super_block *sb)
4504 {
4505 struct ext4_sb_info *sbi = EXT4_SB(sb);
4506 struct block_device *bdev = sb->s_bdev;
4507 unsigned int clustersize = EXT4_CLUSTER_SIZE(sb);
4508
4509 if (!bdev_can_atomic_write(bdev))
4510 return;
4511
4512 if (!ext4_has_feature_extents(sb))
4513 return;
4514
4515 sbi->s_awu_min = max(sb->s_blocksize,
4516 bdev_atomic_write_unit_min_bytes(bdev));
4517 sbi->s_awu_max = min(clustersize,
4518 bdev_atomic_write_unit_max_bytes(bdev));
4519 if (sbi->s_awu_min && sbi->s_awu_max &&
4520 sbi->s_awu_min <= sbi->s_awu_max) {
4521 ext4_msg(sb, KERN_NOTICE, "Supports (experimental) DIO atomic writes awu_min: %u, awu_max: %u",
4522 sbi->s_awu_min, sbi->s_awu_max);
4523 } else {
4524 sbi->s_awu_min = 0;
4525 sbi->s_awu_max = 0;
4526 }
4527 }
4528
ext4_fast_commit_init(struct super_block * sb)4529 static void ext4_fast_commit_init(struct super_block *sb)
4530 {
4531 struct ext4_sb_info *sbi = EXT4_SB(sb);
4532
4533 /* Initialize fast commit stuff */
4534 atomic_set(&sbi->s_fc_subtid, 0);
4535 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]);
4536 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]);
4537 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]);
4538 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
4539 sbi->s_fc_bytes = 0;
4540 ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
4541 sbi->s_fc_ineligible_tid = 0;
4542 mutex_init(&sbi->s_fc_lock);
4543 memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
4544 sbi->s_fc_replay_state.fc_regions = NULL;
4545 sbi->s_fc_replay_state.fc_regions_size = 0;
4546 sbi->s_fc_replay_state.fc_regions_used = 0;
4547 sbi->s_fc_replay_state.fc_regions_valid = 0;
4548 sbi->s_fc_replay_state.fc_modified_inodes = NULL;
4549 sbi->s_fc_replay_state.fc_modified_inodes_size = 0;
4550 sbi->s_fc_replay_state.fc_modified_inodes_used = 0;
4551 }
4552
ext4_inode_info_init(struct super_block * sb,struct ext4_super_block * es)4553 static int ext4_inode_info_init(struct super_block *sb,
4554 struct ext4_super_block *es)
4555 {
4556 struct ext4_sb_info *sbi = EXT4_SB(sb);
4557
4558 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
4559 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
4560 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
4561 } else {
4562 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
4563 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
4564 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
4565 ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
4566 sbi->s_first_ino);
4567 return -EINVAL;
4568 }
4569 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
4570 (!is_power_of_2(sbi->s_inode_size)) ||
4571 (sbi->s_inode_size > sb->s_blocksize)) {
4572 ext4_msg(sb, KERN_ERR,
4573 "unsupported inode size: %d",
4574 sbi->s_inode_size);
4575 ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize);
4576 return -EINVAL;
4577 }
4578 /*
4579 * i_atime_extra is the last extra field available for
4580 * [acm]times in struct ext4_inode. Checking for that
4581 * field should suffice to ensure we have extra space
4582 * for all three.
4583 */
4584 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
4585 sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
4586 sb->s_time_gran = 1;
4587 sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
4588 } else {
4589 sb->s_time_gran = NSEC_PER_SEC;
4590 sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
4591 }
4592 sb->s_time_min = EXT4_TIMESTAMP_MIN;
4593 }
4594
4595 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
4596 sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4597 EXT4_GOOD_OLD_INODE_SIZE;
4598 if (ext4_has_feature_extra_isize(sb)) {
4599 unsigned v, max = (sbi->s_inode_size -
4600 EXT4_GOOD_OLD_INODE_SIZE);
4601
4602 v = le16_to_cpu(es->s_want_extra_isize);
4603 if (v > max) {
4604 ext4_msg(sb, KERN_ERR,
4605 "bad s_want_extra_isize: %d", v);
4606 return -EINVAL;
4607 }
4608 if (sbi->s_want_extra_isize < v)
4609 sbi->s_want_extra_isize = v;
4610
4611 v = le16_to_cpu(es->s_min_extra_isize);
4612 if (v > max) {
4613 ext4_msg(sb, KERN_ERR,
4614 "bad s_min_extra_isize: %d", v);
4615 return -EINVAL;
4616 }
4617 if (sbi->s_want_extra_isize < v)
4618 sbi->s_want_extra_isize = v;
4619 }
4620 }
4621
4622 return 0;
4623 }
4624
4625 #if IS_ENABLED(CONFIG_UNICODE)
ext4_encoding_init(struct super_block * sb,struct ext4_super_block * es)4626 static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
4627 {
4628 const struct ext4_sb_encodings *encoding_info;
4629 struct unicode_map *encoding;
4630 __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags);
4631
4632 if (!ext4_has_feature_casefold(sb) || sb->s_encoding)
4633 return 0;
4634
4635 encoding_info = ext4_sb_read_encoding(es);
4636 if (!encoding_info) {
4637 ext4_msg(sb, KERN_ERR,
4638 "Encoding requested by superblock is unknown");
4639 return -EINVAL;
4640 }
4641
4642 encoding = utf8_load(encoding_info->version);
4643 if (IS_ERR(encoding)) {
4644 ext4_msg(sb, KERN_ERR,
4645 "can't mount with superblock charset: %s-%u.%u.%u "
4646 "not supported by the kernel. flags: 0x%x.",
4647 encoding_info->name,
4648 unicode_major(encoding_info->version),
4649 unicode_minor(encoding_info->version),
4650 unicode_rev(encoding_info->version),
4651 encoding_flags);
4652 return -EINVAL;
4653 }
4654 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
4655 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4656 unicode_major(encoding_info->version),
4657 unicode_minor(encoding_info->version),
4658 unicode_rev(encoding_info->version),
4659 encoding_flags);
4660
4661 sb->s_encoding = encoding;
4662 sb->s_encoding_flags = encoding_flags;
4663
4664 return 0;
4665 }
4666 #else
ext4_encoding_init(struct super_block * sb,struct ext4_super_block * es)4667 static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es)
4668 {
4669 return 0;
4670 }
4671 #endif
4672
ext4_init_metadata_csum(struct super_block * sb,struct ext4_super_block * es)4673 static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es)
4674 {
4675 struct ext4_sb_info *sbi = EXT4_SB(sb);
4676
4677 /* Warn if metadata_csum and gdt_csum are both set. */
4678 if (ext4_has_feature_metadata_csum(sb) &&
4679 ext4_has_feature_gdt_csum(sb))
4680 ext4_warning(sb, "metadata_csum and uninit_bg are "
4681 "redundant flags; please run fsck.");
4682
4683 /* Check for a known checksum algorithm */
4684 if (!ext4_verify_csum_type(sb, es)) {
4685 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4686 "unknown checksum algorithm.");
4687 return -EINVAL;
4688 }
4689 ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE,
4690 ext4_orphan_file_block_trigger);
4691
4692 /* Check superblock checksum */
4693 if (!ext4_superblock_csum_verify(sb, es)) {
4694 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
4695 "invalid superblock checksum. Run e2fsck?");
4696 return -EFSBADCRC;
4697 }
4698
4699 /* Precompute checksum seed for all metadata */
4700 if (ext4_has_feature_csum_seed(sb))
4701 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
4702 else if (ext4_has_feature_metadata_csum(sb) ||
4703 ext4_has_feature_ea_inode(sb))
4704 sbi->s_csum_seed = ext4_chksum(~0, es->s_uuid,
4705 sizeof(es->s_uuid));
4706 return 0;
4707 }
4708
ext4_check_feature_compatibility(struct super_block * sb,struct ext4_super_block * es,int silent)4709 static int ext4_check_feature_compatibility(struct super_block *sb,
4710 struct ext4_super_block *es,
4711 int silent)
4712 {
4713 struct ext4_sb_info *sbi = EXT4_SB(sb);
4714
4715 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
4716 (ext4_has_compat_features(sb) ||
4717 ext4_has_ro_compat_features(sb) ||
4718 ext4_has_incompat_features(sb)))
4719 ext4_msg(sb, KERN_WARNING,
4720 "feature flags set on rev 0 fs, "
4721 "running e2fsck is recommended");
4722
4723 if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
4724 set_opt2(sb, HURD_COMPAT);
4725 if (ext4_has_feature_64bit(sb)) {
4726 ext4_msg(sb, KERN_ERR,
4727 "The Hurd can't support 64-bit file systems");
4728 return -EINVAL;
4729 }
4730
4731 /*
4732 * ea_inode feature uses l_i_version field which is not
4733 * available in HURD_COMPAT mode.
4734 */
4735 if (ext4_has_feature_ea_inode(sb)) {
4736 ext4_msg(sb, KERN_ERR,
4737 "ea_inode feature is not supported for Hurd");
4738 return -EINVAL;
4739 }
4740 }
4741
4742 if (IS_EXT2_SB(sb)) {
4743 if (ext2_feature_set_ok(sb))
4744 ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
4745 "using the ext4 subsystem");
4746 else {
4747 /*
4748 * If we're probing be silent, if this looks like
4749 * it's actually an ext[34] filesystem.
4750 */
4751 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4752 return -EINVAL;
4753 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
4754 "to feature incompatibilities");
4755 return -EINVAL;
4756 }
4757 }
4758
4759 if (IS_EXT3_SB(sb)) {
4760 if (ext3_feature_set_ok(sb))
4761 ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
4762 "using the ext4 subsystem");
4763 else {
4764 /*
4765 * If we're probing be silent, if this looks like
4766 * it's actually an ext4 filesystem.
4767 */
4768 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
4769 return -EINVAL;
4770 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
4771 "to feature incompatibilities");
4772 return -EINVAL;
4773 }
4774 }
4775
4776 /*
4777 * Check feature flags regardless of the revision level, since we
4778 * previously didn't change the revision level when setting the flags,
4779 * so there is a chance incompat flags are set on a rev 0 filesystem.
4780 */
4781 if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
4782 return -EINVAL;
4783
4784 if (sbi->s_daxdev) {
4785 if (sb->s_blocksize == PAGE_SIZE)
4786 set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
4787 else
4788 ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
4789 }
4790
4791 if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
4792 if (ext4_has_feature_inline_data(sb)) {
4793 ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
4794 " that may contain inline data");
4795 return -EINVAL;
4796 }
4797 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
4798 ext4_msg(sb, KERN_ERR,
4799 "DAX unsupported by block device.");
4800 return -EINVAL;
4801 }
4802 }
4803
4804 if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
4805 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
4806 es->s_encryption_level);
4807 return -EINVAL;
4808 }
4809
4810 return 0;
4811 }
4812
ext4_check_geometry(struct super_block * sb,struct ext4_super_block * es)4813 static int ext4_check_geometry(struct super_block *sb,
4814 struct ext4_super_block *es)
4815 {
4816 struct ext4_sb_info *sbi = EXT4_SB(sb);
4817 __u64 blocks_count;
4818 int err;
4819
4820 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) {
4821 ext4_msg(sb, KERN_ERR,
4822 "Number of reserved GDT blocks insanely large: %d",
4823 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
4824 return -EINVAL;
4825 }
4826 /*
4827 * Test whether we have more sectors than will fit in sector_t,
4828 * and whether the max offset is addressable by the page cache.
4829 */
4830 err = generic_check_addressable(sb->s_blocksize_bits,
4831 ext4_blocks_count(es));
4832 if (err) {
4833 ext4_msg(sb, KERN_ERR, "filesystem"
4834 " too large to mount safely on this system");
4835 return err;
4836 }
4837
4838 /* check blocks count against device size */
4839 blocks_count = sb_bdev_nr_blocks(sb);
4840 if (blocks_count && ext4_blocks_count(es) > blocks_count) {
4841 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
4842 "exceeds size of device (%llu blocks)",
4843 ext4_blocks_count(es), blocks_count);
4844 return -EINVAL;
4845 }
4846
4847 /*
4848 * It makes no sense for the first data block to be beyond the end
4849 * of the filesystem.
4850 */
4851 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
4852 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4853 "block %u is beyond end of filesystem (%llu)",
4854 le32_to_cpu(es->s_first_data_block),
4855 ext4_blocks_count(es));
4856 return -EINVAL;
4857 }
4858 if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
4859 (sbi->s_cluster_ratio == 1)) {
4860 ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
4861 "block is 0 with a 1k block and cluster size");
4862 return -EINVAL;
4863 }
4864
4865 blocks_count = (ext4_blocks_count(es) -
4866 le32_to_cpu(es->s_first_data_block) +
4867 EXT4_BLOCKS_PER_GROUP(sb) - 1);
4868 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
4869 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
4870 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
4871 "(block count %llu, first data block %u, "
4872 "blocks per group %lu)", blocks_count,
4873 ext4_blocks_count(es),
4874 le32_to_cpu(es->s_first_data_block),
4875 EXT4_BLOCKS_PER_GROUP(sb));
4876 return -EINVAL;
4877 }
4878 sbi->s_groups_count = blocks_count;
4879 sbi->s_blockfile_groups = min(sbi->s_groups_count,
4880 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
4881 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
4882 le32_to_cpu(es->s_inodes_count)) {
4883 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
4884 le32_to_cpu(es->s_inodes_count),
4885 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
4886 return -EINVAL;
4887 }
4888
4889 return 0;
4890 }
4891
ext4_group_desc_init(struct super_block * sb,struct ext4_super_block * es,ext4_fsblk_t logical_sb_block,ext4_group_t * first_not_zeroed)4892 static int ext4_group_desc_init(struct super_block *sb,
4893 struct ext4_super_block *es,
4894 ext4_fsblk_t logical_sb_block,
4895 ext4_group_t *first_not_zeroed)
4896 {
4897 struct ext4_sb_info *sbi = EXT4_SB(sb);
4898 unsigned int db_count;
4899 ext4_fsblk_t block;
4900 int i;
4901
4902 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
4903 EXT4_DESC_PER_BLOCK(sb);
4904 if (ext4_has_feature_meta_bg(sb)) {
4905 if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
4906 ext4_msg(sb, KERN_WARNING,
4907 "first meta block group too large: %u "
4908 "(group descriptor block count %u)",
4909 le32_to_cpu(es->s_first_meta_bg), db_count);
4910 return -EINVAL;
4911 }
4912 }
4913 rcu_assign_pointer(sbi->s_group_desc,
4914 kvmalloc_objs(struct buffer_head *, db_count));
4915 if (sbi->s_group_desc == NULL) {
4916 ext4_msg(sb, KERN_ERR, "not enough memory");
4917 return -ENOMEM;
4918 }
4919
4920 bgl_lock_init(sbi->s_blockgroup_lock);
4921
4922 /* Pre-read the descriptors into the buffer cache */
4923 for (i = 0; i < db_count; i++) {
4924 block = descriptor_loc(sb, logical_sb_block, i);
4925 ext4_sb_breadahead_unmovable(sb, block);
4926 }
4927
4928 for (i = 0; i < db_count; i++) {
4929 struct buffer_head *bh;
4930
4931 block = descriptor_loc(sb, logical_sb_block, i);
4932 bh = ext4_sb_bread_unmovable(sb, block);
4933 if (IS_ERR(bh)) {
4934 ext4_msg(sb, KERN_ERR,
4935 "can't read group descriptor %d", i);
4936 sbi->s_gdb_count = i;
4937 return PTR_ERR(bh);
4938 }
4939 rcu_read_lock();
4940 rcu_dereference(sbi->s_group_desc)[i] = bh;
4941 rcu_read_unlock();
4942 }
4943 sbi->s_gdb_count = db_count;
4944 if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) {
4945 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4946 return -EFSCORRUPTED;
4947 }
4948
4949 return 0;
4950 }
4951
ext4_load_and_init_journal(struct super_block * sb,struct ext4_super_block * es,struct ext4_fs_context * ctx)4952 static int ext4_load_and_init_journal(struct super_block *sb,
4953 struct ext4_super_block *es,
4954 struct ext4_fs_context *ctx)
4955 {
4956 struct ext4_sb_info *sbi = EXT4_SB(sb);
4957 int err;
4958
4959 err = ext4_load_journal(sb, es, ctx->journal_devnum);
4960 if (err)
4961 return err;
4962
4963 if (ext4_has_feature_64bit(sb) &&
4964 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4965 JBD2_FEATURE_INCOMPAT_64BIT)) {
4966 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4967 goto out;
4968 }
4969
4970 if (!set_journal_csum_feature_set(sb)) {
4971 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
4972 "feature set");
4973 goto out;
4974 }
4975
4976 if (test_opt2(sb, JOURNAL_FAST_COMMIT) &&
4977 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
4978 JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) {
4979 ext4_msg(sb, KERN_ERR,
4980 "Failed to set fast commit journal feature");
4981 goto out;
4982 }
4983
4984 /* We have now updated the journal if required, so we can
4985 * validate the data journaling mode. */
4986 switch (test_opt(sb, DATA_FLAGS)) {
4987 case 0:
4988 /* No mode set, assume a default based on the journal
4989 * capabilities: ORDERED_DATA if the journal can
4990 * cope, else JOURNAL_DATA
4991 */
4992 if (jbd2_journal_check_available_features
4993 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4994 set_opt(sb, ORDERED_DATA);
4995 sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
4996 } else {
4997 set_opt(sb, JOURNAL_DATA);
4998 sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
4999 }
5000 break;
5001
5002 case EXT4_MOUNT_ORDERED_DATA:
5003 case EXT4_MOUNT_WRITEBACK_DATA:
5004 if (!jbd2_journal_check_available_features
5005 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
5006 ext4_msg(sb, KERN_ERR, "Journal does not support "
5007 "requested data journaling mode");
5008 goto out;
5009 }
5010 break;
5011 default:
5012 break;
5013 }
5014
5015 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
5016 test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
5017 ext4_msg(sb, KERN_ERR, "can't mount with "
5018 "journal_async_commit in data=ordered mode");
5019 goto out;
5020 }
5021
5022 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
5023
5024 sbi->s_journal->j_submit_inode_data_buffers =
5025 ext4_journal_submit_inode_data_buffers;
5026 sbi->s_journal->j_finish_inode_data_buffers =
5027 ext4_journal_finish_inode_data_buffers;
5028
5029 return 0;
5030
5031 out:
5032 ext4_journal_destroy(sbi, sbi->s_journal);
5033 return -EINVAL;
5034 }
5035
ext4_check_journal_data_mode(struct super_block * sb)5036 static int ext4_check_journal_data_mode(struct super_block *sb)
5037 {
5038 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
5039 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with "
5040 "data=journal disables delayed allocation, "
5041 "dioread_nolock, O_DIRECT and fast_commit support!\n");
5042 /* can't mount with both data=journal and dioread_nolock. */
5043 clear_opt(sb, DIOREAD_NOLOCK);
5044 clear_opt2(sb, JOURNAL_FAST_COMMIT);
5045 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
5046 ext4_msg(sb, KERN_ERR, "can't mount with "
5047 "both data=journal and delalloc");
5048 return -EINVAL;
5049 }
5050 if (test_opt(sb, DAX_ALWAYS)) {
5051 ext4_msg(sb, KERN_ERR, "can't mount with "
5052 "both data=journal and dax");
5053 return -EINVAL;
5054 }
5055 if (ext4_has_feature_encrypt(sb)) {
5056 ext4_msg(sb, KERN_WARNING,
5057 "encrypted files will use data=ordered "
5058 "instead of data journaling mode");
5059 }
5060 if (test_opt(sb, DELALLOC))
5061 clear_opt(sb, DELALLOC);
5062 } else {
5063 sb->s_iflags |= SB_I_CGROUPWB;
5064 }
5065
5066 return 0;
5067 }
5068
ext4_has_journal_option(struct super_block * sb)5069 static const char *ext4_has_journal_option(struct super_block *sb)
5070 {
5071 struct ext4_sb_info *sbi = EXT4_SB(sb);
5072
5073 if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
5074 return "journal_async_commit";
5075 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM))
5076 return "journal_checksum";
5077 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
5078 return "commit=";
5079 if (EXT4_MOUNT_DATA_FLAGS &
5080 (sbi->s_mount_opt ^ sbi->s_def_mount_opt))
5081 return "data=";
5082 if (test_opt(sb, DATA_ERR_ABORT))
5083 return "data_err=abort";
5084 return NULL;
5085 }
5086
5087 /*
5088 * Limit the maximum folio order to 2048 blocks to prevent overestimation
5089 * of reserve handle credits during the folio writeback in environments
5090 * where the PAGE_SIZE exceeds 4KB.
5091 */
5092 #define EXT4_MAX_PAGECACHE_ORDER(sb) \
5093 umin(MAX_PAGECACHE_ORDER, (11 + (sb)->s_blocksize_bits - PAGE_SHIFT))
ext4_set_max_mapping_order(struct super_block * sb)5094 static void ext4_set_max_mapping_order(struct super_block *sb)
5095 {
5096 struct ext4_sb_info *sbi = EXT4_SB(sb);
5097
5098 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5099 sbi->s_max_folio_order = sbi->s_min_folio_order;
5100 else
5101 sbi->s_max_folio_order = EXT4_MAX_PAGECACHE_ORDER(sb);
5102 }
5103
ext4_check_large_folio(struct super_block * sb)5104 static int ext4_check_large_folio(struct super_block *sb)
5105 {
5106 const char *err_str = NULL;
5107
5108 if (ext4_has_feature_encrypt(sb))
5109 err_str = "encrypt";
5110
5111 if (!err_str) {
5112 ext4_set_max_mapping_order(sb);
5113 } else if (sb->s_blocksize > PAGE_SIZE) {
5114 ext4_msg(sb, KERN_ERR, "bs(%lu) > ps(%lu) unsupported for %s",
5115 sb->s_blocksize, PAGE_SIZE, err_str);
5116 return -EINVAL;
5117 }
5118
5119 return 0;
5120 }
5121
ext4_load_super(struct super_block * sb,ext4_fsblk_t * lsb,int silent)5122 static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb,
5123 int silent)
5124 {
5125 struct ext4_sb_info *sbi = EXT4_SB(sb);
5126 struct ext4_super_block *es;
5127 ext4_fsblk_t logical_sb_block;
5128 unsigned long offset = 0;
5129 struct buffer_head *bh;
5130 int ret = -EINVAL;
5131 int blocksize;
5132
5133 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
5134 if (!blocksize) {
5135 ext4_msg(sb, KERN_ERR, "unable to set blocksize");
5136 return -EINVAL;
5137 }
5138
5139 /*
5140 * The ext4 superblock will not be buffer aligned for other than 1kB
5141 * block sizes. We need to calculate the offset from buffer start.
5142 */
5143 if (blocksize != EXT4_MIN_BLOCK_SIZE) {
5144 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
5145 offset = do_div(logical_sb_block, blocksize);
5146 } else {
5147 logical_sb_block = sbi->s_sb_block;
5148 }
5149
5150 bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
5151 if (IS_ERR(bh)) {
5152 ext4_msg(sb, KERN_ERR, "unable to read superblock");
5153 return PTR_ERR(bh);
5154 }
5155 /*
5156 * Note: s_es must be initialized as soon as possible because
5157 * some ext4 macro-instructions depend on its value
5158 */
5159 es = (struct ext4_super_block *) (bh->b_data + offset);
5160 sbi->s_es = es;
5161 sb->s_magic = le16_to_cpu(es->s_magic);
5162 if (sb->s_magic != EXT4_SUPER_MAGIC) {
5163 if (!silent)
5164 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5165 goto out;
5166 }
5167
5168 if (le32_to_cpu(es->s_log_block_size) >
5169 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
5170 ext4_msg(sb, KERN_ERR,
5171 "Invalid log block size: %u",
5172 le32_to_cpu(es->s_log_block_size));
5173 goto out;
5174 }
5175 if (le32_to_cpu(es->s_log_cluster_size) >
5176 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
5177 ext4_msg(sb, KERN_ERR,
5178 "Invalid log cluster size: %u",
5179 le32_to_cpu(es->s_log_cluster_size));
5180 goto out;
5181 }
5182
5183 blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
5184
5185 /*
5186 * If the default block size is not the same as the real block size,
5187 * we need to reload it.
5188 */
5189 if (sb->s_blocksize == blocksize)
5190 goto success;
5191
5192 /*
5193 * bh must be released before kill_bdev(), otherwise
5194 * it won't be freed and its page also. kill_bdev()
5195 * is called by sb_set_blocksize().
5196 */
5197 brelse(bh);
5198 /* Validate the filesystem blocksize */
5199 if (!sb_set_blocksize(sb, blocksize)) {
5200 ext4_msg(sb, KERN_ERR, "bad block size %d",
5201 blocksize);
5202 bh = NULL;
5203 goto out;
5204 }
5205
5206 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE;
5207 offset = do_div(logical_sb_block, blocksize);
5208 bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
5209 if (IS_ERR(bh)) {
5210 ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try");
5211 ret = PTR_ERR(bh);
5212 bh = NULL;
5213 goto out;
5214 }
5215 es = (struct ext4_super_block *)(bh->b_data + offset);
5216 sbi->s_es = es;
5217 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
5218 ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!");
5219 goto out;
5220 }
5221
5222 success:
5223 sbi->s_min_folio_order = get_order(blocksize);
5224 *lsb = logical_sb_block;
5225 sbi->s_sbh = bh;
5226 return 0;
5227 out:
5228 brelse(bh);
5229 return ret;
5230 }
5231
ext4_hash_info_init(struct super_block * sb)5232 static int ext4_hash_info_init(struct super_block *sb)
5233 {
5234 struct ext4_sb_info *sbi = EXT4_SB(sb);
5235 struct ext4_super_block *es = sbi->s_es;
5236 unsigned int i;
5237
5238 sbi->s_def_hash_version = es->s_def_hash_version;
5239
5240 if (sbi->s_def_hash_version > DX_HASH_LAST) {
5241 ext4_msg(sb, KERN_ERR,
5242 "Invalid default hash set in the superblock");
5243 return -EINVAL;
5244 } else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) {
5245 ext4_msg(sb, KERN_ERR,
5246 "SIPHASH is not a valid default hash value");
5247 return -EINVAL;
5248 }
5249
5250 for (i = 0; i < 4; i++)
5251 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
5252
5253 if (ext4_has_feature_dir_index(sb)) {
5254 i = le32_to_cpu(es->s_flags);
5255 if (i & EXT2_FLAGS_UNSIGNED_HASH)
5256 sbi->s_hash_unsigned = 3;
5257 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
5258 #ifdef __CHAR_UNSIGNED__
5259 if (!sb_rdonly(sb))
5260 es->s_flags |=
5261 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
5262 sbi->s_hash_unsigned = 3;
5263 #else
5264 if (!sb_rdonly(sb))
5265 es->s_flags |=
5266 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
5267 #endif
5268 }
5269 }
5270 return 0;
5271 }
5272
ext4_block_group_meta_init(struct super_block * sb,int silent)5273 static int ext4_block_group_meta_init(struct super_block *sb, int silent)
5274 {
5275 struct ext4_sb_info *sbi = EXT4_SB(sb);
5276 struct ext4_super_block *es = sbi->s_es;
5277 int has_huge_files;
5278
5279 has_huge_files = ext4_has_feature_huge_file(sb);
5280 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
5281 has_huge_files);
5282 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
5283
5284 sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
5285 if (ext4_has_feature_64bit(sb)) {
5286 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
5287 sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
5288 !is_power_of_2(sbi->s_desc_size)) {
5289 ext4_msg(sb, KERN_ERR,
5290 "unsupported descriptor size %lu",
5291 sbi->s_desc_size);
5292 return -EINVAL;
5293 }
5294 } else
5295 sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
5296
5297 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
5298 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
5299
5300 sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb);
5301 if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) {
5302 if (!silent)
5303 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5304 return -EINVAL;
5305 }
5306 if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
5307 sbi->s_inodes_per_group > sb->s_blocksize * 8) {
5308 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
5309 sbi->s_inodes_per_group);
5310 return -EINVAL;
5311 }
5312 sbi->s_itb_per_group = sbi->s_inodes_per_group /
5313 sbi->s_inodes_per_block;
5314 sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb);
5315 sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY;
5316 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
5317 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
5318
5319 return 0;
5320 }
5321
5322 /*
5323 * It's hard to get stripe aligned blocks if stripe is not aligned with
5324 * cluster, just disable stripe and alert user to simplify code and avoid
5325 * stripe aligned allocation which will rarely succeed.
5326 */
ext4_is_stripe_incompatible(struct super_block * sb,unsigned long stripe)5327 static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe)
5328 {
5329 struct ext4_sb_info *sbi = EXT4_SB(sb);
5330 return (stripe > 0 && sbi->s_cluster_ratio > 1 &&
5331 stripe % sbi->s_cluster_ratio != 0);
5332 }
5333
__ext4_fill_super(struct fs_context * fc,struct super_block * sb)5334 static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
5335 {
5336 struct ext4_super_block *es = NULL;
5337 struct ext4_sb_info *sbi = EXT4_SB(sb);
5338 ext4_fsblk_t logical_sb_block;
5339 struct inode *root;
5340 int needs_recovery;
5341 int err;
5342 ext4_group_t first_not_zeroed;
5343 struct ext4_fs_context *ctx = fc->fs_private;
5344 int silent = fc->sb_flags & SB_SILENT;
5345
5346 /* Set defaults for the variables that will be set during parsing */
5347 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO))
5348 ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
5349
5350 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
5351 sbi->s_sectors_written_start =
5352 part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
5353
5354 err = ext4_load_super(sb, &logical_sb_block, silent);
5355 if (err)
5356 goto out_fail;
5357
5358 es = sbi->s_es;
5359 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
5360
5361 err = ext4_init_metadata_csum(sb, es);
5362 if (err)
5363 goto failed_mount;
5364
5365 ext4_set_def_opts(sb, es);
5366
5367 sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es));
5368 sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es));
5369 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
5370 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
5371 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
5372 sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB;
5373 sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC;
5374
5375 /*
5376 * set default s_li_wait_mult for lazyinit, for the case there is
5377 * no mount option specified.
5378 */
5379 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
5380
5381 err = ext4_inode_info_init(sb, es);
5382 if (err)
5383 goto failed_mount;
5384
5385 err = parse_apply_sb_mount_options(sb, ctx);
5386 if (err < 0)
5387 goto failed_mount;
5388
5389 sbi->s_def_mount_opt = sbi->s_mount_opt;
5390 sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
5391
5392 err = ext4_check_opt_consistency(fc, sb);
5393 if (err < 0)
5394 goto failed_mount;
5395
5396 ext4_apply_options(fc, sb);
5397
5398 err = ext4_check_large_folio(sb);
5399 if (err < 0)
5400 goto failed_mount;
5401
5402 err = ext4_encoding_init(sb, es);
5403 if (err)
5404 goto failed_mount;
5405
5406 err = ext4_check_journal_data_mode(sb);
5407 if (err)
5408 goto failed_mount;
5409
5410 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
5411 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5412
5413 /* HSM events are allowed by default. */
5414 sb->s_iflags |= SB_I_ALLOW_HSM;
5415
5416 err = ext4_check_feature_compatibility(sb, es, silent);
5417 if (err)
5418 goto failed_mount;
5419
5420 err = ext4_block_group_meta_init(sb, silent);
5421 if (err)
5422 goto failed_mount;
5423
5424 err = ext4_hash_info_init(sb);
5425 if (err)
5426 goto failed_mount;
5427
5428 err = ext4_handle_clustersize(sb);
5429 if (err)
5430 goto failed_mount;
5431
5432 err = ext4_check_geometry(sb, es);
5433 if (err)
5434 goto failed_mount;
5435
5436 timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
5437 spin_lock_init(&sbi->s_error_lock);
5438 mutex_init(&sbi->s_error_notify_mutex);
5439 INIT_WORK(&sbi->s_sb_upd_work, update_super_work);
5440
5441 err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed);
5442 if (err)
5443 goto failed_mount3;
5444
5445 err = ext4_es_register_shrinker(sbi);
5446 if (err)
5447 goto failed_mount3;
5448
5449 sbi->s_stripe = ext4_get_stripe_size(sbi);
5450 if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) {
5451 ext4_msg(sb, KERN_WARNING,
5452 "stripe (%lu) is not aligned with cluster size (%u), "
5453 "stripe is disabled",
5454 sbi->s_stripe, sbi->s_cluster_ratio);
5455 sbi->s_stripe = 0;
5456 }
5457 sbi->s_extent_max_zeroout_kb = 32;
5458
5459 /*
5460 * set up enough so that it can read an inode
5461 */
5462 sb->s_op = &ext4_sops;
5463 sb->s_export_op = &ext4_export_ops;
5464 sb->s_xattr = ext4_xattr_handlers;
5465 #ifdef CONFIG_FS_ENCRYPTION
5466 sb->s_cop = &ext4_cryptops;
5467 #endif
5468 #ifdef CONFIG_FS_VERITY
5469 sb->s_vop = &ext4_verityops;
5470 #endif
5471 #ifdef CONFIG_QUOTA
5472 sb->dq_op = &ext4_quota_operations;
5473 if (ext4_has_feature_quota(sb))
5474 sb->s_qcop = &dquot_quotactl_sysfile_ops;
5475 else
5476 sb->s_qcop = &ext4_qctl_operations;
5477 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
5478 #endif
5479 super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid));
5480 super_set_sysfs_name_bdev(sb);
5481
5482 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
5483 mutex_init(&sbi->s_orphan_lock);
5484
5485 spin_lock_init(&sbi->s_bdev_wb_lock);
5486
5487 ext4_atomic_write_init(sb);
5488 ext4_fast_commit_init(sb);
5489
5490 sb->s_root = NULL;
5491
5492 needs_recovery = (es->s_last_orphan != 0 ||
5493 ext4_has_feature_orphan_present(sb) ||
5494 ext4_has_feature_journal_needs_recovery(sb));
5495
5496 if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) {
5497 err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block));
5498 if (err)
5499 goto failed_mount3a;
5500 }
5501
5502 err = -EINVAL;
5503 /*
5504 * The first inode we look at is the journal inode. Don't try
5505 * root first: it may be modified in the journal!
5506 */
5507 if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
5508 err = ext4_load_and_init_journal(sb, es, ctx);
5509 if (err)
5510 goto failed_mount3a;
5511 if (bdev_read_only(sb->s_bdev))
5512 needs_recovery = 0;
5513 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
5514 ext4_has_feature_journal_needs_recovery(sb)) {
5515 ext4_msg(sb, KERN_ERR, "required journal recovery "
5516 "suppressed and not mounted read-only");
5517 goto failed_mount3a;
5518 } else {
5519 const char *journal_option;
5520
5521 /* Nojournal mode, all journal mount options are illegal */
5522 journal_option = ext4_has_journal_option(sb);
5523 if (journal_option != NULL) {
5524 ext4_msg(sb, KERN_ERR,
5525 "can't mount with %s, fs mounted w/o journal",
5526 journal_option);
5527 goto failed_mount3a;
5528 }
5529
5530 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
5531 clear_opt(sb, JOURNAL_CHECKSUM);
5532 clear_opt(sb, DATA_FLAGS);
5533 clear_opt2(sb, JOURNAL_FAST_COMMIT);
5534 sbi->s_journal = NULL;
5535 needs_recovery = 0;
5536 }
5537
5538 if (!test_opt(sb, NO_MBCACHE)) {
5539 sbi->s_ea_block_cache = ext4_xattr_create_cache();
5540 if (!sbi->s_ea_block_cache) {
5541 ext4_msg(sb, KERN_ERR,
5542 "Failed to create ea_block_cache");
5543 err = -EINVAL;
5544 goto failed_mount_wq;
5545 }
5546
5547 if (ext4_has_feature_ea_inode(sb)) {
5548 sbi->s_ea_inode_cache = ext4_xattr_create_cache();
5549 if (!sbi->s_ea_inode_cache) {
5550 ext4_msg(sb, KERN_ERR,
5551 "Failed to create ea_inode_cache");
5552 err = -EINVAL;
5553 goto failed_mount_wq;
5554 }
5555 }
5556 }
5557
5558 /*
5559 * Get the # of file system overhead blocks from the
5560 * superblock if present.
5561 */
5562 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
5563 /* ignore the precalculated value if it is ridiculous */
5564 if (sbi->s_overhead > ext4_blocks_count(es))
5565 sbi->s_overhead = 0;
5566 /*
5567 * If the bigalloc feature is not enabled recalculating the
5568 * overhead doesn't take long, so we might as well just redo
5569 * it to make sure we are using the correct value.
5570 */
5571 if (!ext4_has_feature_bigalloc(sb))
5572 sbi->s_overhead = 0;
5573 if (sbi->s_overhead == 0) {
5574 err = ext4_calculate_overhead(sb);
5575 if (err)
5576 goto failed_mount_wq;
5577 }
5578
5579 /*
5580 * The maximum number of concurrent works can be high and
5581 * concurrency isn't really necessary. Limit it to 1.
5582 */
5583 EXT4_SB(sb)->rsv_conversion_wq =
5584 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
5585 if (!EXT4_SB(sb)->rsv_conversion_wq) {
5586 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
5587 err = -ENOMEM;
5588 goto failed_mount4;
5589 }
5590
5591 /*
5592 * The jbd2_journal_load will have done any necessary log recovery,
5593 * so we can safely mount the rest of the filesystem now.
5594 */
5595
5596 root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
5597 if (IS_ERR(root)) {
5598 ext4_msg(sb, KERN_ERR, "get root inode failed");
5599 err = PTR_ERR(root);
5600 root = NULL;
5601 goto failed_mount4;
5602 }
5603 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
5604 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
5605 iput(root);
5606 err = -EFSCORRUPTED;
5607 goto failed_mount4;
5608 }
5609
5610 generic_set_sb_d_ops(sb);
5611 sb->s_root = d_make_root(root);
5612 if (!sb->s_root) {
5613 ext4_msg(sb, KERN_ERR, "get root dentry failed");
5614 err = -ENOMEM;
5615 goto failed_mount4;
5616 }
5617
5618 err = ext4_setup_super(sb, es, sb_rdonly(sb));
5619 if (err == -EROFS) {
5620 sb->s_flags |= SB_RDONLY;
5621 } else if (err)
5622 goto failed_mount4a;
5623
5624 ext4_set_resv_clusters(sb);
5625
5626 if (test_opt(sb, BLOCK_VALIDITY)) {
5627 err = ext4_setup_system_zone(sb);
5628 if (err) {
5629 ext4_msg(sb, KERN_ERR, "failed to initialize system "
5630 "zone (%d)", err);
5631 goto failed_mount4a;
5632 }
5633 }
5634 ext4_fc_replay_cleanup(sb);
5635
5636 ext4_ext_init(sb);
5637
5638 /*
5639 * Enable optimize_scan if number of groups is > threshold. This can be
5640 * turned off by passing "mb_optimize_scan=0". This can also be
5641 * turned on forcefully by passing "mb_optimize_scan=1".
5642 */
5643 if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) {
5644 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD)
5645 set_opt2(sb, MB_OPTIMIZE_SCAN);
5646 else
5647 clear_opt2(sb, MB_OPTIMIZE_SCAN);
5648 }
5649
5650 err = ext4_percpu_param_init(sbi);
5651 if (err)
5652 goto failed_mount5;
5653
5654 err = ext4_mb_init(sb);
5655 if (err) {
5656 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
5657 err);
5658 goto failed_mount5;
5659 }
5660
5661 /*
5662 * We can only set up the journal commit callback once
5663 * mballoc is initialized
5664 */
5665 if (sbi->s_journal)
5666 sbi->s_journal->j_commit_callback =
5667 ext4_journal_commit_callback;
5668
5669 if (ext4_has_feature_flex_bg(sb))
5670 if (!ext4_fill_flex_info(sb)) {
5671 ext4_msg(sb, KERN_ERR,
5672 "unable to initialize "
5673 "flex_bg meta info!");
5674 err = -ENOMEM;
5675 goto failed_mount6;
5676 }
5677
5678 err = ext4_register_li_request(sb, first_not_zeroed);
5679 if (err)
5680 goto failed_mount6;
5681
5682 err = ext4_init_orphan_info(sb);
5683 if (err)
5684 goto failed_mount7;
5685 #ifdef CONFIG_QUOTA
5686 /* Enable quota usage during mount. */
5687 if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
5688 err = ext4_enable_quotas(sb);
5689 if (err)
5690 goto failed_mount8;
5691 }
5692 #endif /* CONFIG_QUOTA */
5693
5694 /*
5695 * Save the original bdev mapping's wb_err value which could be
5696 * used to detect the metadata async write error.
5697 */
5698 errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
5699 &sbi->s_bdev_wb_err);
5700 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
5701 ext4_orphan_cleanup(sb, es);
5702 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5703 /*
5704 * Update the checksum after updating free space/inode counters and
5705 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect
5706 * checksum in the buffer cache until it is written out and
5707 * e2fsprogs programs trying to open a file system immediately
5708 * after it is mounted can fail.
5709 */
5710 ext4_superblock_csum_set(sb);
5711 if (needs_recovery) {
5712 ext4_msg(sb, KERN_INFO, "recovery complete");
5713 err = ext4_mark_recovery_complete(sb, es);
5714 if (err)
5715 goto failed_mount9;
5716 }
5717
5718 if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) {
5719 ext4_msg(sb, KERN_WARNING,
5720 "mounting with \"discard\" option, but the device does not support discard");
5721 clear_opt(sb, DISCARD);
5722 }
5723
5724 if (es->s_error_count) {
5725 sbi->s_err_report_sec = 5*60; /* first time 5 minutes */
5726 mod_timer(&sbi->s_err_report,
5727 jiffies + secs_to_jiffies(sbi->s_err_report_sec));
5728 }
5729 sbi->s_err_report_sec = 24*60*60; /* Once a day */
5730
5731 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
5732 ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
5733 ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
5734 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
5735 atomic_set(&sbi->s_warning_count, 0);
5736 atomic_set(&sbi->s_msg_count, 0);
5737
5738 /* Register sysfs after all initializations are complete. */
5739 err = ext4_register_sysfs(sb);
5740 if (err)
5741 goto failed_mount9;
5742
5743 return 0;
5744
5745 failed_mount9:
5746 ext4_quotas_off(sb, EXT4_MAXQUOTAS);
5747 failed_mount8: __maybe_unused
5748 ext4_release_orphan_info(sb);
5749 failed_mount7:
5750 ext4_unregister_li_request(sb);
5751 failed_mount6:
5752 ext4_mb_release(sb);
5753 ext4_flex_groups_free(sbi);
5754 failed_mount5:
5755 ext4_percpu_param_destroy(sbi);
5756 ext4_ext_release(sb);
5757 ext4_release_system_zone(sb);
5758 failed_mount4a:
5759 dput(sb->s_root);
5760 sb->s_root = NULL;
5761 failed_mount4:
5762 ext4_msg(sb, KERN_ERR, "mount failed");
5763 if (EXT4_SB(sb)->rsv_conversion_wq)
5764 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
5765 failed_mount_wq:
5766 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
5767 sbi->s_ea_inode_cache = NULL;
5768
5769 ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
5770 sbi->s_ea_block_cache = NULL;
5771
5772 if (sbi->s_journal) {
5773 ext4_journal_destroy(sbi, sbi->s_journal);
5774 }
5775 failed_mount3a:
5776 ext4_es_unregister_shrinker(sbi);
5777 failed_mount3:
5778 /* flush s_sb_upd_work before sbi destroy */
5779 flush_work(&sbi->s_sb_upd_work);
5780 ext4_stop_mmpd(sbi);
5781 timer_delete_sync(&sbi->s_err_report);
5782 ext4_group_desc_free(sbi);
5783 failed_mount:
5784 #if IS_ENABLED(CONFIG_UNICODE)
5785 utf8_unload(sb->s_encoding);
5786 #endif
5787
5788 #ifdef CONFIG_QUOTA
5789 for (unsigned int i = 0; i < EXT4_MAXQUOTAS; i++)
5790 kfree(get_qf_name(sb, sbi, i));
5791 #endif
5792 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
5793 brelse(sbi->s_sbh);
5794 if (sbi->s_journal_bdev_file) {
5795 invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
5796 bdev_fput(sbi->s_journal_bdev_file);
5797 }
5798 out_fail:
5799 invalidate_bdev(sb->s_bdev);
5800 sb->s_fs_info = NULL;
5801 return err;
5802 }
5803
ext4_fill_super(struct super_block * sb,struct fs_context * fc)5804 static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
5805 {
5806 struct ext4_fs_context *ctx = fc->fs_private;
5807 struct ext4_sb_info *sbi;
5808 const char *descr;
5809 int ret;
5810
5811 sbi = ext4_alloc_sbi(sb);
5812 if (!sbi)
5813 return -ENOMEM;
5814
5815 fc->s_fs_info = sbi;
5816
5817 /* Cleanup superblock name */
5818 strreplace(sb->s_id, '/', '!');
5819
5820 sbi->s_sb_block = 1; /* Default super block location */
5821 if (ctx->spec & EXT4_SPEC_s_sb_block)
5822 sbi->s_sb_block = ctx->s_sb_block;
5823
5824 ret = __ext4_fill_super(fc, sb);
5825 if (ret < 0)
5826 goto free_sbi;
5827
5828 if (sbi->s_journal) {
5829 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5830 descr = " journalled data mode";
5831 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
5832 descr = " ordered data mode";
5833 else
5834 descr = " writeback data mode";
5835 } else
5836 descr = "out journal";
5837
5838 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
5839 ext4_msg(sb, KERN_INFO, "mounted filesystem %pU %s with%s. "
5840 "Quota mode: %s.", &sb->s_uuid,
5841 sb_rdonly(sb) ? "ro" : "r/w", descr,
5842 ext4_quota_mode(sb));
5843
5844 /* Update the s_overhead_clusters if necessary */
5845 ext4_update_overhead(sb, false);
5846 return 0;
5847
5848 free_sbi:
5849 ext4_free_sbi(sbi);
5850 fc->s_fs_info = NULL;
5851 return ret;
5852 }
5853
ext4_get_tree(struct fs_context * fc)5854 static int ext4_get_tree(struct fs_context *fc)
5855 {
5856 return get_tree_bdev(fc, ext4_fill_super);
5857 }
5858
5859 /*
5860 * Setup any per-fs journal parameters now. We'll do this both on
5861 * initial mount, once the journal has been initialised but before we've
5862 * done any recovery; and again on any subsequent remount.
5863 */
ext4_init_journal_params(struct super_block * sb,journal_t * journal)5864 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
5865 {
5866 struct ext4_sb_info *sbi = EXT4_SB(sb);
5867
5868 journal->j_commit_interval = sbi->s_commit_interval;
5869 journal->j_min_batch_time = sbi->s_min_batch_time;
5870 journal->j_max_batch_time = sbi->s_max_batch_time;
5871 ext4_fc_init(sb, journal);
5872
5873 write_lock(&journal->j_state_lock);
5874 if (test_opt(sb, BARRIER))
5875 journal->j_flags |= JBD2_BARRIER;
5876 else
5877 journal->j_flags &= ~JBD2_BARRIER;
5878 /*
5879 * Always enable journal cycle record option, letting the journal
5880 * records log transactions continuously between each mount.
5881 */
5882 journal->j_flags |= JBD2_CYCLE_RECORD;
5883 write_unlock(&journal->j_state_lock);
5884 }
5885
ext4_get_journal_inode(struct super_block * sb,unsigned int journal_inum)5886 static struct inode *ext4_get_journal_inode(struct super_block *sb,
5887 unsigned int journal_inum)
5888 {
5889 struct inode *journal_inode;
5890
5891 /*
5892 * Test for the existence of a valid inode on disk. Bad things
5893 * happen if we iget() an unused inode, as the subsequent iput()
5894 * will try to delete it.
5895 */
5896 journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
5897 if (IS_ERR(journal_inode)) {
5898 ext4_msg(sb, KERN_ERR, "no journal found");
5899 return ERR_CAST(journal_inode);
5900 }
5901 if (!journal_inode->i_nlink) {
5902 make_bad_inode(journal_inode);
5903 iput(journal_inode);
5904 ext4_msg(sb, KERN_ERR, "journal inode is deleted");
5905 return ERR_PTR(-EFSCORRUPTED);
5906 }
5907 if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
5908 ext4_msg(sb, KERN_ERR, "invalid journal inode");
5909 iput(journal_inode);
5910 return ERR_PTR(-EFSCORRUPTED);
5911 }
5912
5913 ext4_debug("Journal inode found at %p: %lld bytes\n",
5914 journal_inode, journal_inode->i_size);
5915 return journal_inode;
5916 }
5917
ext4_journal_bmap(journal_t * journal,sector_t * block)5918 static int ext4_journal_bmap(journal_t *journal, sector_t *block)
5919 {
5920 struct ext4_map_blocks map;
5921 int ret;
5922
5923 if (journal->j_inode == NULL)
5924 return 0;
5925
5926 map.m_lblk = *block;
5927 map.m_len = 1;
5928 ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0);
5929 if (ret <= 0) {
5930 ext4_msg(journal->j_inode->i_sb, KERN_CRIT,
5931 "journal bmap failed: block %llu ret %d\n",
5932 *block, ret);
5933 jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED);
5934 return ret;
5935 }
5936 *block = map.m_pblk;
5937 return 0;
5938 }
5939
ext4_open_inode_journal(struct super_block * sb,unsigned int journal_inum)5940 static journal_t *ext4_open_inode_journal(struct super_block *sb,
5941 unsigned int journal_inum)
5942 {
5943 struct inode *journal_inode;
5944 journal_t *journal;
5945
5946 journal_inode = ext4_get_journal_inode(sb, journal_inum);
5947 if (IS_ERR(journal_inode))
5948 return ERR_CAST(journal_inode);
5949
5950 journal = jbd2_journal_init_inode(journal_inode);
5951 if (IS_ERR(journal)) {
5952 ext4_msg(sb, KERN_ERR, "Could not load journal inode");
5953 iput(journal_inode);
5954 return ERR_CAST(journal);
5955 }
5956 journal->j_private = sb;
5957 journal->j_bmap = ext4_journal_bmap;
5958 ext4_init_journal_params(sb, journal);
5959 return journal;
5960 }
5961
ext4_get_journal_blkdev(struct super_block * sb,dev_t j_dev,ext4_fsblk_t * j_start,ext4_fsblk_t * j_len)5962 static struct file *ext4_get_journal_blkdev(struct super_block *sb,
5963 dev_t j_dev, ext4_fsblk_t *j_start,
5964 ext4_fsblk_t *j_len)
5965 {
5966 struct buffer_head *bh;
5967 struct block_device *bdev;
5968 struct file *bdev_file;
5969 int hblock, blocksize;
5970 ext4_fsblk_t sb_block;
5971 unsigned long offset;
5972 struct ext4_super_block *es;
5973 int errno;
5974
5975 bdev_file = bdev_file_open_by_dev(j_dev,
5976 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
5977 sb, &fs_holder_ops);
5978 if (IS_ERR(bdev_file)) {
5979 ext4_msg(sb, KERN_ERR,
5980 "failed to open journal device unknown-block(%u,%u) %ld",
5981 MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_file));
5982 return bdev_file;
5983 }
5984
5985 bdev = file_bdev(bdev_file);
5986 blocksize = sb->s_blocksize;
5987 hblock = bdev_logical_block_size(bdev);
5988 if (blocksize < hblock) {
5989 ext4_msg(sb, KERN_ERR,
5990 "blocksize too small for journal device");
5991 errno = -EINVAL;
5992 goto out_bdev;
5993 }
5994
5995 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
5996 offset = EXT4_MIN_BLOCK_SIZE % blocksize;
5997 set_blocksize(bdev_file, blocksize);
5998 bh = __bread(bdev, sb_block, blocksize);
5999 if (!bh) {
6000 ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
6001 "external journal");
6002 errno = -EINVAL;
6003 goto out_bdev;
6004 }
6005
6006 es = (struct ext4_super_block *) (bh->b_data + offset);
6007 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
6008 !(le32_to_cpu(es->s_feature_incompat) &
6009 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
6010 ext4_msg(sb, KERN_ERR, "external journal has bad superblock");
6011 errno = -EFSCORRUPTED;
6012 goto out_bh;
6013 }
6014
6015 if ((le32_to_cpu(es->s_feature_ro_compat) &
6016 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
6017 es->s_checksum != ext4_superblock_csum(es)) {
6018 ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock");
6019 errno = -EFSCORRUPTED;
6020 goto out_bh;
6021 }
6022
6023 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
6024 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
6025 errno = -EFSCORRUPTED;
6026 goto out_bh;
6027 }
6028
6029 *j_start = sb_block + 1;
6030 *j_len = ext4_blocks_count(es);
6031 brelse(bh);
6032 return bdev_file;
6033
6034 out_bh:
6035 brelse(bh);
6036 out_bdev:
6037 bdev_fput(bdev_file);
6038 return ERR_PTR(errno);
6039 }
6040
ext4_open_dev_journal(struct super_block * sb,dev_t j_dev)6041 static journal_t *ext4_open_dev_journal(struct super_block *sb,
6042 dev_t j_dev)
6043 {
6044 journal_t *journal;
6045 ext4_fsblk_t j_start;
6046 ext4_fsblk_t j_len;
6047 struct file *bdev_file;
6048 int errno = 0;
6049
6050 bdev_file = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
6051 if (IS_ERR(bdev_file))
6052 return ERR_CAST(bdev_file);
6053
6054 journal = jbd2_journal_init_dev(file_bdev(bdev_file), sb->s_bdev, j_start,
6055 j_len, sb->s_blocksize);
6056 if (IS_ERR(journal)) {
6057 ext4_msg(sb, KERN_ERR, "failed to create device journal");
6058 errno = PTR_ERR(journal);
6059 goto out_bdev;
6060 }
6061 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
6062 ext4_msg(sb, KERN_ERR, "External journal has more than one "
6063 "user (unsupported) - %d",
6064 be32_to_cpu(journal->j_superblock->s_nr_users));
6065 errno = -EINVAL;
6066 goto out_journal;
6067 }
6068 journal->j_private = sb;
6069 EXT4_SB(sb)->s_journal_bdev_file = bdev_file;
6070 ext4_init_journal_params(sb, journal);
6071 return journal;
6072
6073 out_journal:
6074 ext4_journal_destroy(EXT4_SB(sb), journal);
6075 out_bdev:
6076 bdev_fput(bdev_file);
6077 return ERR_PTR(errno);
6078 }
6079
ext4_load_journal(struct super_block * sb,struct ext4_super_block * es,unsigned long journal_devnum)6080 static int ext4_load_journal(struct super_block *sb,
6081 struct ext4_super_block *es,
6082 unsigned long journal_devnum)
6083 {
6084 journal_t *journal;
6085 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
6086 dev_t journal_dev;
6087 int err = 0;
6088 int really_read_only;
6089 int journal_dev_ro;
6090
6091 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
6092 return -EFSCORRUPTED;
6093
6094 if (journal_devnum &&
6095 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
6096 ext4_msg(sb, KERN_INFO, "external journal device major/minor "
6097 "numbers have changed");
6098 journal_dev = new_decode_dev(journal_devnum);
6099 } else
6100 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
6101
6102 if (journal_inum && journal_dev) {
6103 ext4_msg(sb, KERN_ERR,
6104 "filesystem has both journal inode and journal device!");
6105 return -EINVAL;
6106 }
6107
6108 if (journal_inum) {
6109 journal = ext4_open_inode_journal(sb, journal_inum);
6110 if (IS_ERR(journal))
6111 return PTR_ERR(journal);
6112 } else {
6113 journal = ext4_open_dev_journal(sb, journal_dev);
6114 if (IS_ERR(journal))
6115 return PTR_ERR(journal);
6116 }
6117
6118 journal_dev_ro = bdev_read_only(journal->j_dev);
6119 really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
6120
6121 if (journal_dev_ro && !sb_rdonly(sb)) {
6122 ext4_msg(sb, KERN_ERR,
6123 "journal device read-only, try mounting with '-o ro'");
6124 err = -EROFS;
6125 goto err_out;
6126 }
6127
6128 /*
6129 * Are we loading a blank journal or performing recovery after a
6130 * crash? For recovery, we need to check in advance whether we
6131 * can get read-write access to the device.
6132 */
6133 if (ext4_has_feature_journal_needs_recovery(sb)) {
6134 if (sb_rdonly(sb)) {
6135 ext4_msg(sb, KERN_INFO, "INFO: recovery "
6136 "required on readonly filesystem");
6137 if (really_read_only) {
6138 ext4_msg(sb, KERN_ERR, "write access "
6139 "unavailable, cannot proceed "
6140 "(try mounting with noload)");
6141 err = -EROFS;
6142 goto err_out;
6143 }
6144 ext4_msg(sb, KERN_INFO, "write access will "
6145 "be enabled during recovery");
6146 }
6147 }
6148
6149 if (!(journal->j_flags & JBD2_BARRIER))
6150 ext4_msg(sb, KERN_INFO, "barriers disabled");
6151
6152 if (!ext4_has_feature_journal_needs_recovery(sb))
6153 err = jbd2_journal_wipe(journal, !really_read_only);
6154 if (!err) {
6155 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
6156 __le16 orig_state;
6157 bool changed = false;
6158
6159 if (save)
6160 memcpy(save, ((char *) es) +
6161 EXT4_S_ERR_START, EXT4_S_ERR_LEN);
6162 err = jbd2_journal_load(journal);
6163 if (save && memcmp(((char *) es) + EXT4_S_ERR_START,
6164 save, EXT4_S_ERR_LEN)) {
6165 memcpy(((char *) es) + EXT4_S_ERR_START,
6166 save, EXT4_S_ERR_LEN);
6167 changed = true;
6168 }
6169 kfree(save);
6170 orig_state = es->s_state;
6171 es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state &
6172 EXT4_ERROR_FS);
6173 if (orig_state != es->s_state)
6174 changed = true;
6175 /* Write out restored error information to the superblock */
6176 if (changed && !really_read_only) {
6177 int err2;
6178 err2 = ext4_commit_super(sb);
6179 err = err ? : err2;
6180 }
6181 }
6182
6183 if (err) {
6184 ext4_msg(sb, KERN_ERR, "error loading journal");
6185 goto err_out;
6186 }
6187
6188 EXT4_SB(sb)->s_journal = journal;
6189 err = ext4_clear_journal_err(sb, es);
6190 if (err) {
6191 ext4_journal_destroy(EXT4_SB(sb), journal);
6192 return err;
6193 }
6194
6195 if (!really_read_only && journal_devnum &&
6196 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
6197 es->s_journal_dev = cpu_to_le32(journal_devnum);
6198 ext4_commit_super(sb);
6199 }
6200 if (!really_read_only && journal_inum &&
6201 journal_inum != le32_to_cpu(es->s_journal_inum)) {
6202 es->s_journal_inum = cpu_to_le32(journal_inum);
6203 ext4_commit_super(sb);
6204 }
6205
6206 return 0;
6207
6208 err_out:
6209 ext4_journal_destroy(EXT4_SB(sb), journal);
6210 return err;
6211 }
6212
6213 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
ext4_update_super(struct super_block * sb)6214 static void ext4_update_super(struct super_block *sb)
6215 {
6216 struct ext4_sb_info *sbi = EXT4_SB(sb);
6217 struct ext4_super_block *es = sbi->s_es;
6218 struct buffer_head *sbh = sbi->s_sbh;
6219
6220 lock_buffer(sbh);
6221 /*
6222 * If the file system is mounted read-only, don't update the
6223 * superblock write time. This avoids updating the superblock
6224 * write time when we are mounting the root file system
6225 * read/only but we need to replay the journal; at that point,
6226 * for people who are east of GMT and who make their clock
6227 * tick in localtime for Windows bug-for-bug compatibility,
6228 * the clock is set in the future, and this will cause e2fsck
6229 * to complain and force a full file system check.
6230 */
6231 if (!sb_rdonly(sb))
6232 ext4_update_tstamp(es, s_wtime);
6233 es->s_kbytes_written =
6234 cpu_to_le64(sbi->s_kbytes_written +
6235 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
6236 sbi->s_sectors_written_start) >> 1));
6237 if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
6238 ext4_free_blocks_count_set(es,
6239 EXT4_C2B(sbi, percpu_counter_sum_positive(
6240 &sbi->s_freeclusters_counter)));
6241 if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
6242 es->s_free_inodes_count =
6243 cpu_to_le32(percpu_counter_sum_positive(
6244 &sbi->s_freeinodes_counter));
6245 /* Copy error information to the on-disk superblock */
6246 spin_lock(&sbi->s_error_lock);
6247 if (sbi->s_add_error_count > 0) {
6248 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
6249 if (!es->s_first_error_time && !es->s_first_error_time_hi) {
6250 __ext4_update_tstamp(&es->s_first_error_time,
6251 &es->s_first_error_time_hi,
6252 sbi->s_first_error_time);
6253 strtomem_pad(es->s_first_error_func,
6254 sbi->s_first_error_func, 0);
6255 es->s_first_error_line =
6256 cpu_to_le32(sbi->s_first_error_line);
6257 es->s_first_error_ino =
6258 cpu_to_le32(sbi->s_first_error_ino);
6259 es->s_first_error_block =
6260 cpu_to_le64(sbi->s_first_error_block);
6261 es->s_first_error_errcode =
6262 ext4_errno_to_code(sbi->s_first_error_code);
6263 }
6264 __ext4_update_tstamp(&es->s_last_error_time,
6265 &es->s_last_error_time_hi,
6266 sbi->s_last_error_time);
6267 strtomem_pad(es->s_last_error_func, sbi->s_last_error_func, 0);
6268 es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
6269 es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
6270 es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
6271 es->s_last_error_errcode =
6272 ext4_errno_to_code(sbi->s_last_error_code);
6273 /*
6274 * Start the daily error reporting function if it hasn't been
6275 * started already and sbi->s_err_report_sec is not zero
6276 */
6277 if (!es->s_error_count && !sbi->s_err_report_sec)
6278 mod_timer(&sbi->s_err_report,
6279 jiffies + secs_to_jiffies(sbi->s_err_report_sec));
6280 le32_add_cpu(&es->s_error_count, sbi->s_add_error_count);
6281 sbi->s_add_error_count = 0;
6282 }
6283 spin_unlock(&sbi->s_error_lock);
6284
6285 ext4_superblock_csum_set(sb);
6286 unlock_buffer(sbh);
6287 }
6288
ext4_commit_super(struct super_block * sb)6289 static int ext4_commit_super(struct super_block *sb)
6290 {
6291 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
6292
6293 if (!sbh)
6294 return -EINVAL;
6295
6296 ext4_update_super(sb);
6297
6298 lock_buffer(sbh);
6299 /* Buffer got discarded which means block device got invalidated */
6300 if (!buffer_mapped(sbh)) {
6301 unlock_buffer(sbh);
6302 return -EIO;
6303 }
6304
6305 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
6306 /*
6307 * Oh, dear. A previous attempt to write the
6308 * superblock failed. This could happen because the
6309 * USB device was yanked out. Or it could happen to
6310 * be a transient write error and maybe the block will
6311 * be remapped. Nothing we can do but to retry the
6312 * write and hope for the best.
6313 */
6314 ext4_msg(sb, KERN_ERR, "previous I/O error to "
6315 "superblock detected");
6316 clear_buffer_write_io_error(sbh);
6317 set_buffer_uptodate(sbh);
6318 }
6319 get_bh(sbh);
6320 /* Clear potential dirty bit if it was journalled update */
6321 clear_buffer_dirty(sbh);
6322 sbh->b_end_io = end_buffer_write_sync;
6323 submit_bh(REQ_OP_WRITE | REQ_SYNC |
6324 (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
6325 wait_on_buffer(sbh);
6326 if (buffer_write_io_error(sbh)) {
6327 ext4_msg(sb, KERN_ERR, "I/O error while writing "
6328 "superblock");
6329 clear_buffer_write_io_error(sbh);
6330 set_buffer_uptodate(sbh);
6331 return -EIO;
6332 }
6333 return 0;
6334 }
6335
6336 /*
6337 * Have we just finished recovery? If so, and if we are mounting (or
6338 * remounting) the filesystem readonly, then we will end up with a
6339 * consistent fs on disk. Record that fact.
6340 */
ext4_mark_recovery_complete(struct super_block * sb,struct ext4_super_block * es)6341 static int ext4_mark_recovery_complete(struct super_block *sb,
6342 struct ext4_super_block *es)
6343 {
6344 int err;
6345 journal_t *journal = EXT4_SB(sb)->s_journal;
6346
6347 if (!ext4_has_feature_journal(sb)) {
6348 if (journal != NULL) {
6349 ext4_error(sb, "Journal got removed while the fs was "
6350 "mounted!");
6351 return -EFSCORRUPTED;
6352 }
6353 return 0;
6354 }
6355 jbd2_journal_lock_updates(journal);
6356 err = jbd2_journal_flush(journal, 0);
6357 if (err < 0)
6358 goto out;
6359
6360 if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) ||
6361 ext4_has_feature_orphan_present(sb))) {
6362 if (!ext4_orphan_file_empty(sb)) {
6363 ext4_error(sb, "Orphan file not empty on read-only fs.");
6364 err = -EFSCORRUPTED;
6365 goto out;
6366 }
6367 ext4_clear_feature_journal_needs_recovery(sb);
6368 ext4_clear_feature_orphan_present(sb);
6369 ext4_commit_super(sb);
6370 }
6371 out:
6372 jbd2_journal_unlock_updates(journal);
6373 return err;
6374 }
6375
6376 /*
6377 * If we are mounting (or read-write remounting) a filesystem whose journal
6378 * has recorded an error from a previous lifetime, move that error to the
6379 * main filesystem now.
6380 */
ext4_clear_journal_err(struct super_block * sb,struct ext4_super_block * es)6381 static int ext4_clear_journal_err(struct super_block *sb,
6382 struct ext4_super_block *es)
6383 {
6384 journal_t *journal;
6385 int j_errno;
6386 const char *errstr;
6387
6388 if (!ext4_has_feature_journal(sb)) {
6389 ext4_error(sb, "Journal got removed while the fs was mounted!");
6390 return -EFSCORRUPTED;
6391 }
6392
6393 journal = EXT4_SB(sb)->s_journal;
6394
6395 /*
6396 * Now check for any error status which may have been recorded in the
6397 * journal by a prior ext4_error() or ext4_abort()
6398 */
6399
6400 j_errno = jbd2_journal_errno(journal);
6401 if (j_errno) {
6402 char nbuf[16];
6403
6404 errstr = ext4_decode_error(sb, j_errno, nbuf);
6405 ext4_warning(sb, "Filesystem error recorded "
6406 "from previous mount: %s", errstr);
6407
6408 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
6409 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
6410 j_errno = ext4_commit_super(sb);
6411 if (j_errno)
6412 return j_errno;
6413 ext4_warning(sb, "Marked fs in need of filesystem check.");
6414
6415 jbd2_journal_clear_err(journal);
6416 jbd2_journal_update_sb_errno(journal);
6417 }
6418 return 0;
6419 }
6420
6421 /*
6422 * Force the running and committing transactions to commit,
6423 * and wait on the commit.
6424 */
ext4_force_commit(struct super_block * sb)6425 int ext4_force_commit(struct super_block *sb)
6426 {
6427 return ext4_journal_force_commit(EXT4_SB(sb)->s_journal);
6428 }
6429
ext4_sync_fs(struct super_block * sb,int wait)6430 static int ext4_sync_fs(struct super_block *sb, int wait)
6431 {
6432 int ret = 0;
6433 tid_t target;
6434 bool needs_barrier = false;
6435 struct ext4_sb_info *sbi = EXT4_SB(sb);
6436
6437 ret = ext4_emergency_state(sb);
6438 if (unlikely(ret))
6439 return ret;
6440
6441 trace_ext4_sync_fs(sb, wait);
6442 flush_workqueue(sbi->rsv_conversion_wq);
6443 /*
6444 * Writeback quota in non-journalled quota case - journalled quota has
6445 * no dirty dquots
6446 */
6447 dquot_writeback_dquots(sb, -1);
6448 /*
6449 * Data writeback is possible w/o journal transaction, so barrier must
6450 * being sent at the end of the function. But we can skip it if
6451 * transaction_commit will do it for us.
6452 */
6453 if (sbi->s_journal) {
6454 target = jbd2_get_latest_transaction(sbi->s_journal);
6455 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
6456 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
6457 needs_barrier = true;
6458
6459 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
6460 if (wait)
6461 ret = jbd2_log_wait_commit(sbi->s_journal,
6462 target);
6463 }
6464 } else if (wait && test_opt(sb, BARRIER))
6465 needs_barrier = true;
6466 if (needs_barrier) {
6467 int err;
6468 err = blkdev_issue_flush(sb->s_bdev);
6469 if (!ret)
6470 ret = err;
6471 }
6472
6473 return ret;
6474 }
6475
6476 /*
6477 * LVM calls this function before a (read-only) snapshot is created. This
6478 * gives us a chance to flush the journal completely and mark the fs clean.
6479 *
6480 * Note that only this function cannot bring a filesystem to be in a clean
6481 * state independently. It relies on upper layer to stop all data & metadata
6482 * modifications.
6483 */
ext4_freeze(struct super_block * sb)6484 static int ext4_freeze(struct super_block *sb)
6485 {
6486 int error = 0;
6487 journal_t *journal = EXT4_SB(sb)->s_journal;
6488
6489 if (journal) {
6490 /* Now we set up the journal barrier. */
6491 jbd2_journal_lock_updates(journal);
6492
6493 /*
6494 * Don't clear the needs_recovery flag if we failed to
6495 * flush the journal.
6496 */
6497 error = jbd2_journal_flush(journal, 0);
6498 if (error < 0)
6499 goto out;
6500
6501 /* Journal blocked and flushed, clear needs_recovery flag. */
6502 ext4_clear_feature_journal_needs_recovery(sb);
6503 if (ext4_orphan_file_empty(sb))
6504 ext4_clear_feature_orphan_present(sb);
6505 }
6506
6507 error = ext4_commit_super(sb);
6508 out:
6509 if (journal)
6510 /* we rely on upper layer to stop further updates */
6511 jbd2_journal_unlock_updates(journal);
6512 return error;
6513 }
6514
6515 /*
6516 * Called by LVM after the snapshot is done. We need to reset the RECOVER
6517 * flag here, even though the filesystem is not technically dirty yet.
6518 */
ext4_unfreeze(struct super_block * sb)6519 static int ext4_unfreeze(struct super_block *sb)
6520 {
6521 if (ext4_emergency_state(sb))
6522 return 0;
6523
6524 if (EXT4_SB(sb)->s_journal) {
6525 /* Reset the needs_recovery flag before the fs is unlocked. */
6526 ext4_set_feature_journal_needs_recovery(sb);
6527 if (ext4_has_feature_orphan_file(sb))
6528 ext4_set_feature_orphan_present(sb);
6529 }
6530
6531 ext4_commit_super(sb);
6532 return 0;
6533 }
6534
6535 /*
6536 * Structure to save mount options for ext4_remount's benefit
6537 */
6538 struct ext4_mount_options {
6539 unsigned long s_mount_opt;
6540 unsigned long s_mount_opt2;
6541 kuid_t s_resuid;
6542 kgid_t s_resgid;
6543 unsigned long s_commit_interval;
6544 u32 s_min_batch_time, s_max_batch_time;
6545 #ifdef CONFIG_QUOTA
6546 int s_jquota_fmt;
6547 char *s_qf_names[EXT4_MAXQUOTAS];
6548 #endif
6549 };
6550
__ext4_remount(struct fs_context * fc,struct super_block * sb)6551 static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
6552 {
6553 struct ext4_fs_context *ctx = fc->fs_private;
6554 struct ext4_super_block *es;
6555 struct ext4_sb_info *sbi = EXT4_SB(sb);
6556 unsigned long old_sb_flags;
6557 struct ext4_mount_options old_opts;
6558 ext4_group_t g;
6559 int err = 0;
6560 int alloc_ctx;
6561 #ifdef CONFIG_QUOTA
6562 int enable_quota = 0;
6563 int i, j;
6564 char *to_free[EXT4_MAXQUOTAS];
6565 #endif
6566
6567
6568 /* Store the original options */
6569 old_sb_flags = sb->s_flags;
6570 old_opts.s_mount_opt = sbi->s_mount_opt;
6571 old_opts.s_mount_opt2 = sbi->s_mount_opt2;
6572 old_opts.s_resuid = sbi->s_resuid;
6573 old_opts.s_resgid = sbi->s_resgid;
6574 old_opts.s_commit_interval = sbi->s_commit_interval;
6575 old_opts.s_min_batch_time = sbi->s_min_batch_time;
6576 old_opts.s_max_batch_time = sbi->s_max_batch_time;
6577 #ifdef CONFIG_QUOTA
6578 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
6579 for (i = 0; i < EXT4_MAXQUOTAS; i++)
6580 if (sbi->s_qf_names[i]) {
6581 char *qf_name = get_qf_name(sb, sbi, i);
6582
6583 old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
6584 if (!old_opts.s_qf_names[i]) {
6585 for (j = 0; j < i; j++)
6586 kfree(old_opts.s_qf_names[j]);
6587 return -ENOMEM;
6588 }
6589 } else
6590 old_opts.s_qf_names[i] = NULL;
6591 #endif
6592 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) {
6593 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
6594 ctx->journal_ioprio =
6595 sbi->s_journal->j_task->io_context->ioprio;
6596 else
6597 ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO;
6598
6599 }
6600
6601 if ((ctx->spec & EXT4_SPEC_s_stripe) &&
6602 ext4_is_stripe_incompatible(sb, ctx->s_stripe)) {
6603 ext4_msg(sb, KERN_WARNING,
6604 "stripe (%lu) is not aligned with cluster size (%u), "
6605 "stripe is disabled",
6606 ctx->s_stripe, sbi->s_cluster_ratio);
6607 ctx->s_stripe = 0;
6608 }
6609
6610 /*
6611 * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause
6612 * two calls to ext4_should_dioread_nolock() to return inconsistent
6613 * values, triggering WARN_ON in ext4_add_complete_io(). we grab
6614 * here s_writepages_rwsem to avoid race between writepages ops and
6615 * remount.
6616 */
6617 alloc_ctx = ext4_writepages_down_write(sb);
6618 ext4_apply_options(fc, sb);
6619 ext4_writepages_up_write(sb, alloc_ctx);
6620
6621 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
6622 test_opt(sb, JOURNAL_CHECKSUM)) {
6623 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
6624 "during remount not supported; ignoring");
6625 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
6626 }
6627
6628 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
6629 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
6630 ext4_msg(sb, KERN_ERR, "can't mount with "
6631 "both data=journal and delalloc");
6632 err = -EINVAL;
6633 goto restore_opts;
6634 }
6635 if (test_opt(sb, DIOREAD_NOLOCK)) {
6636 ext4_msg(sb, KERN_ERR, "can't mount with "
6637 "both data=journal and dioread_nolock");
6638 err = -EINVAL;
6639 goto restore_opts;
6640 }
6641 } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
6642 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
6643 ext4_msg(sb, KERN_ERR, "can't mount with "
6644 "journal_async_commit in data=ordered mode");
6645 err = -EINVAL;
6646 goto restore_opts;
6647 }
6648 }
6649
6650 if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
6651 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
6652 err = -EINVAL;
6653 goto restore_opts;
6654 }
6655
6656 if ((old_opts.s_mount_opt & EXT4_MOUNT_DELALLOC) &&
6657 !test_opt(sb, DELALLOC)) {
6658 ext4_msg(sb, KERN_ERR, "can't disable delalloc during remount");
6659 err = -EINVAL;
6660 goto restore_opts;
6661 }
6662
6663 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
6664 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
6665
6666 es = sbi->s_es;
6667
6668 if (sbi->s_journal) {
6669 ext4_init_journal_params(sb, sbi->s_journal);
6670 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio);
6671 }
6672
6673 /* Flush outstanding errors before changing fs state */
6674 flush_work(&sbi->s_sb_upd_work);
6675
6676 if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
6677 if (ext4_emergency_state(sb)) {
6678 err = -EROFS;
6679 goto restore_opts;
6680 }
6681
6682 if (fc->sb_flags & SB_RDONLY) {
6683 err = sync_filesystem(sb);
6684 if (err < 0)
6685 goto restore_opts;
6686 err = dquot_suspend(sb, -1);
6687 if (err < 0)
6688 goto restore_opts;
6689
6690 /*
6691 * First of all, the unconditional stuff we have to do
6692 * to disable replay of the journal when we next remount
6693 */
6694 sb->s_flags |= SB_RDONLY;
6695
6696 /*
6697 * OK, test if we are remounting a valid rw partition
6698 * readonly, and if so set the rdonly flag and then
6699 * mark the partition as valid again.
6700 */
6701 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
6702 (sbi->s_mount_state & EXT4_VALID_FS))
6703 es->s_state = cpu_to_le16(sbi->s_mount_state);
6704
6705 if (sbi->s_journal) {
6706 /*
6707 * We let remount-ro finish even if marking fs
6708 * as clean failed...
6709 */
6710 ext4_mark_recovery_complete(sb, es);
6711 }
6712 } else {
6713 /* Make sure we can mount this feature set readwrite */
6714 if (ext4_has_feature_readonly(sb) ||
6715 !ext4_feature_set_ok(sb, 0)) {
6716 err = -EROFS;
6717 goto restore_opts;
6718 }
6719 /*
6720 * Make sure the group descriptor checksums
6721 * are sane. If they aren't, refuse to remount r/w.
6722 */
6723 for (g = 0; g < sbi->s_groups_count; g++) {
6724 struct ext4_group_desc *gdp =
6725 ext4_get_group_desc(sb, g, NULL);
6726
6727 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
6728 ext4_msg(sb, KERN_ERR,
6729 "ext4_remount: Checksum for group %u failed (%u!=%u)",
6730 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
6731 le16_to_cpu(gdp->bg_checksum));
6732 err = -EFSBADCRC;
6733 goto restore_opts;
6734 }
6735 }
6736
6737 /*
6738 * If we have an unprocessed orphan list hanging
6739 * around from a previously readonly bdev mount,
6740 * require a full umount/remount for now.
6741 */
6742 if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) {
6743 ext4_msg(sb, KERN_WARNING, "Couldn't "
6744 "remount RDWR because of unprocessed "
6745 "orphan inode list. Please "
6746 "umount/remount instead");
6747 err = -EINVAL;
6748 goto restore_opts;
6749 }
6750
6751 /*
6752 * Mounting a RDONLY partition read-write, so reread
6753 * and store the current valid flag. (It may have
6754 * been changed by e2fsck since we originally mounted
6755 * the partition.)
6756 */
6757 if (sbi->s_journal) {
6758 err = ext4_clear_journal_err(sb, es);
6759 if (err)
6760 goto restore_opts;
6761 }
6762 sbi->s_mount_state = (le16_to_cpu(es->s_state) &
6763 ~EXT4_FC_REPLAY);
6764
6765 err = ext4_setup_super(sb, es, 0);
6766 if (err)
6767 goto restore_opts;
6768
6769 sb->s_flags &= ~SB_RDONLY;
6770 if (ext4_has_feature_mmp(sb)) {
6771 err = ext4_multi_mount_protect(sb,
6772 le64_to_cpu(es->s_mmp_block));
6773 if (err)
6774 goto restore_opts;
6775 }
6776 #ifdef CONFIG_QUOTA
6777 enable_quota = 1;
6778 #endif
6779 }
6780 }
6781
6782 /*
6783 * Handle creation of system zone data early because it can fail.
6784 * Releasing of existing data is done when we are sure remount will
6785 * succeed.
6786 */
6787 if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) {
6788 err = ext4_setup_system_zone(sb);
6789 if (err)
6790 goto restore_opts;
6791 }
6792
6793 if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
6794 err = ext4_commit_super(sb);
6795 if (err)
6796 goto restore_opts;
6797 }
6798
6799 #ifdef CONFIG_QUOTA
6800 if (enable_quota) {
6801 if (sb_any_quota_suspended(sb))
6802 dquot_resume(sb, -1);
6803 else if (ext4_has_feature_quota(sb)) {
6804 err = ext4_enable_quotas(sb);
6805 if (err)
6806 goto restore_opts;
6807 }
6808 }
6809 /* Release old quota file names */
6810 for (i = 0; i < EXT4_MAXQUOTAS; i++)
6811 kfree(old_opts.s_qf_names[i]);
6812 #endif
6813 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6814 ext4_release_system_zone(sb);
6815
6816 /*
6817 * Reinitialize lazy itable initialization thread based on
6818 * current settings
6819 */
6820 if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
6821 ext4_unregister_li_request(sb);
6822 else {
6823 ext4_group_t first_not_zeroed;
6824 first_not_zeroed = ext4_has_uninit_itable(sb);
6825 ext4_register_li_request(sb, first_not_zeroed);
6826 }
6827
6828 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6829 ext4_stop_mmpd(sbi);
6830
6831 /*
6832 * Handle aborting the filesystem as the last thing during remount to
6833 * avoid obsure errors during remount when some option changes fail to
6834 * apply due to shutdown filesystem.
6835 */
6836 if (test_opt2(sb, ABORT))
6837 ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
6838
6839 return 0;
6840
6841 restore_opts:
6842 /*
6843 * If there was a failing r/w to ro transition, we may need to
6844 * re-enable quota
6845 */
6846 if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) &&
6847 sb_any_quota_suspended(sb))
6848 dquot_resume(sb, -1);
6849
6850 alloc_ctx = ext4_writepages_down_write(sb);
6851 sb->s_flags = old_sb_flags;
6852 sbi->s_mount_opt = old_opts.s_mount_opt;
6853 sbi->s_mount_opt2 = old_opts.s_mount_opt2;
6854 sbi->s_resuid = old_opts.s_resuid;
6855 sbi->s_resgid = old_opts.s_resgid;
6856 sbi->s_commit_interval = old_opts.s_commit_interval;
6857 sbi->s_min_batch_time = old_opts.s_min_batch_time;
6858 sbi->s_max_batch_time = old_opts.s_max_batch_time;
6859 ext4_writepages_up_write(sb, alloc_ctx);
6860
6861 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
6862 ext4_release_system_zone(sb);
6863 #ifdef CONFIG_QUOTA
6864 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
6865 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
6866 to_free[i] = get_qf_name(sb, sbi, i);
6867 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
6868 }
6869 synchronize_rcu();
6870 for (i = 0; i < EXT4_MAXQUOTAS; i++)
6871 kfree(to_free[i]);
6872 #endif
6873 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
6874 ext4_stop_mmpd(sbi);
6875 return err;
6876 }
6877
ext4_reconfigure(struct fs_context * fc)6878 static int ext4_reconfigure(struct fs_context *fc)
6879 {
6880 struct super_block *sb = fc->root->d_sb;
6881 int ret;
6882 bool old_ro = sb_rdonly(sb);
6883
6884 fc->s_fs_info = EXT4_SB(sb);
6885
6886 ret = ext4_check_opt_consistency(fc, sb);
6887 if (ret < 0)
6888 return ret;
6889
6890 ret = __ext4_remount(fc, sb);
6891 if (ret < 0)
6892 return ret;
6893
6894 ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.",
6895 &sb->s_uuid,
6896 (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
6897
6898 return 0;
6899 }
6900
6901 #ifdef CONFIG_QUOTA
ext4_statfs_project(struct super_block * sb,kprojid_t projid,struct kstatfs * buf)6902 static int ext4_statfs_project(struct super_block *sb,
6903 kprojid_t projid, struct kstatfs *buf)
6904 {
6905 struct kqid qid;
6906 struct dquot *dquot;
6907 u64 limit;
6908 u64 curblock;
6909
6910 qid = make_kqid_projid(projid);
6911 dquot = dqget(sb, qid);
6912 if (IS_ERR(dquot))
6913 return PTR_ERR(dquot);
6914 spin_lock(&dquot->dq_dqb_lock);
6915
6916 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
6917 dquot->dq_dqb.dqb_bhardlimit);
6918 limit >>= sb->s_blocksize_bits;
6919
6920 if (limit) {
6921 uint64_t remaining = 0;
6922
6923 curblock = (dquot->dq_dqb.dqb_curspace +
6924 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
6925 if (limit > curblock)
6926 remaining = limit - curblock;
6927
6928 buf->f_blocks = min(buf->f_blocks, limit);
6929 buf->f_bfree = min(buf->f_bfree, remaining);
6930 buf->f_bavail = min(buf->f_bavail, remaining);
6931 }
6932
6933 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
6934 dquot->dq_dqb.dqb_ihardlimit);
6935 if (limit) {
6936 uint64_t remaining = 0;
6937
6938 if (limit > dquot->dq_dqb.dqb_curinodes)
6939 remaining = limit - dquot->dq_dqb.dqb_curinodes;
6940
6941 buf->f_files = min(buf->f_files, limit);
6942 buf->f_ffree = min(buf->f_ffree, remaining);
6943 }
6944
6945 spin_unlock(&dquot->dq_dqb_lock);
6946 dqput(dquot);
6947 return 0;
6948 }
6949 #endif
6950
ext4_statfs(struct dentry * dentry,struct kstatfs * buf)6951 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
6952 {
6953 struct super_block *sb = dentry->d_sb;
6954 struct ext4_sb_info *sbi = EXT4_SB(sb);
6955 struct ext4_super_block *es = sbi->s_es;
6956 ext4_fsblk_t overhead = 0, resv_blocks;
6957 s64 bfree;
6958 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
6959
6960 if (!test_opt(sb, MINIX_DF))
6961 overhead = sbi->s_overhead;
6962
6963 buf->f_type = EXT4_SUPER_MAGIC;
6964 buf->f_bsize = sb->s_blocksize;
6965 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
6966 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
6967 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
6968 /* prevent underflow in case that few free space is available */
6969 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
6970 buf->f_bavail = buf->f_bfree -
6971 (ext4_r_blocks_count(es) + resv_blocks);
6972 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
6973 buf->f_bavail = 0;
6974 buf->f_files = le32_to_cpu(es->s_inodes_count);
6975 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
6976 buf->f_namelen = EXT4_NAME_LEN;
6977 buf->f_fsid = uuid_to_fsid(es->s_uuid);
6978
6979 #ifdef CONFIG_QUOTA
6980 if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
6981 sb_has_quota_limits_enabled(sb, PRJQUOTA))
6982 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
6983 #endif
6984 return 0;
6985 }
6986
6987
6988 #ifdef CONFIG_QUOTA
6989
6990 /*
6991 * Helper functions so that transaction is started before we acquire dqio_sem
6992 * to keep correct lock ordering of transaction > dqio_sem
6993 */
dquot_to_inode(struct dquot * dquot)6994 static inline struct inode *dquot_to_inode(struct dquot *dquot)
6995 {
6996 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
6997 }
6998
ext4_write_dquot(struct dquot * dquot)6999 static int ext4_write_dquot(struct dquot *dquot)
7000 {
7001 int ret, err;
7002 handle_t *handle;
7003 struct inode *inode;
7004
7005 inode = dquot_to_inode(dquot);
7006 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
7007 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
7008 if (IS_ERR(handle))
7009 return PTR_ERR(handle);
7010 ret = dquot_commit(dquot);
7011 if (ret < 0)
7012 ext4_error_err(dquot->dq_sb, -ret,
7013 "Failed to commit dquot type %d",
7014 dquot->dq_id.type);
7015 err = ext4_journal_stop(handle);
7016 if (!ret)
7017 ret = err;
7018 return ret;
7019 }
7020
ext4_acquire_dquot(struct dquot * dquot)7021 static int ext4_acquire_dquot(struct dquot *dquot)
7022 {
7023 int ret, err;
7024 handle_t *handle;
7025
7026 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
7027 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
7028 if (IS_ERR(handle))
7029 return PTR_ERR(handle);
7030 ret = dquot_acquire(dquot);
7031 if (ret < 0)
7032 ext4_error_err(dquot->dq_sb, -ret,
7033 "Failed to acquire dquot type %d",
7034 dquot->dq_id.type);
7035 err = ext4_journal_stop(handle);
7036 if (!ret)
7037 ret = err;
7038 return ret;
7039 }
7040
ext4_release_dquot(struct dquot * dquot)7041 static int ext4_release_dquot(struct dquot *dquot)
7042 {
7043 int ret, err;
7044 handle_t *handle;
7045 bool freeze_protected = false;
7046
7047 /*
7048 * Trying to sb_start_intwrite() in a running transaction
7049 * can result in a deadlock. Further, running transactions
7050 * are already protected from freezing.
7051 */
7052 if (!ext4_journal_current_handle()) {
7053 sb_start_intwrite(dquot->dq_sb);
7054 freeze_protected = true;
7055 }
7056
7057 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
7058 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
7059 if (IS_ERR(handle)) {
7060 /* Release dquot anyway to avoid endless cycle in dqput() */
7061 dquot_release(dquot);
7062 if (freeze_protected)
7063 sb_end_intwrite(dquot->dq_sb);
7064 return PTR_ERR(handle);
7065 }
7066 ret = dquot_release(dquot);
7067 if (ret < 0)
7068 ext4_error_err(dquot->dq_sb, -ret,
7069 "Failed to release dquot type %d",
7070 dquot->dq_id.type);
7071 err = ext4_journal_stop(handle);
7072 if (!ret)
7073 ret = err;
7074
7075 if (freeze_protected)
7076 sb_end_intwrite(dquot->dq_sb);
7077
7078 return ret;
7079 }
7080
ext4_mark_dquot_dirty(struct dquot * dquot)7081 static int ext4_mark_dquot_dirty(struct dquot *dquot)
7082 {
7083 struct super_block *sb = dquot->dq_sb;
7084
7085 if (ext4_is_quota_journalled(sb)) {
7086 dquot_mark_dquot_dirty(dquot);
7087 return ext4_write_dquot(dquot);
7088 } else {
7089 return dquot_mark_dquot_dirty(dquot);
7090 }
7091 }
7092
ext4_write_info(struct super_block * sb,int type)7093 static int ext4_write_info(struct super_block *sb, int type)
7094 {
7095 int ret, err;
7096 handle_t *handle;
7097
7098 /* Data block + inode block */
7099 handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
7100 if (IS_ERR(handle))
7101 return PTR_ERR(handle);
7102 ret = dquot_commit_info(sb, type);
7103 err = ext4_journal_stop(handle);
7104 if (!ret)
7105 ret = err;
7106 return ret;
7107 }
7108
lockdep_set_quota_inode(struct inode * inode,int subclass)7109 static void lockdep_set_quota_inode(struct inode *inode, int subclass)
7110 {
7111 struct ext4_inode_info *ei = EXT4_I(inode);
7112
7113 /* The first argument of lockdep_set_subclass has to be
7114 * *exactly* the same as the argument to init_rwsem() --- in
7115 * this case, in init_once() --- or lockdep gets unhappy
7116 * because the name of the lock is set using the
7117 * stringification of the argument to init_rwsem().
7118 */
7119 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
7120 lockdep_set_subclass(&ei->i_data_sem, subclass);
7121 }
7122
7123 /*
7124 * Standard function to be called on quota_on
7125 */
ext4_quota_on(struct super_block * sb,int type,int format_id,const struct path * path)7126 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
7127 const struct path *path)
7128 {
7129 int err;
7130
7131 if (!test_opt(sb, QUOTA))
7132 return -EINVAL;
7133
7134 /* Quotafile not on the same filesystem? */
7135 if (path->dentry->d_sb != sb)
7136 return -EXDEV;
7137
7138 /* Quota already enabled for this file? */
7139 if (IS_NOQUOTA(d_inode(path->dentry)))
7140 return -EBUSY;
7141
7142 /* Journaling quota? */
7143 if (EXT4_SB(sb)->s_qf_names[type]) {
7144 /* Quotafile not in fs root? */
7145 if (path->dentry->d_parent != sb->s_root)
7146 ext4_msg(sb, KERN_WARNING,
7147 "Quota file not on filesystem root. "
7148 "Journaled quota will not work");
7149 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
7150 } else {
7151 /*
7152 * Clear the flag just in case mount options changed since
7153 * last time.
7154 */
7155 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
7156 }
7157
7158 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
7159 err = dquot_quota_on(sb, type, format_id, path);
7160 if (!err) {
7161 struct inode *inode = d_inode(path->dentry);
7162 handle_t *handle;
7163
7164 /*
7165 * Set inode flags to prevent userspace from messing with quota
7166 * files. If this fails, we return success anyway since quotas
7167 * are already enabled and this is not a hard failure.
7168 */
7169 inode_lock(inode);
7170 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
7171 if (IS_ERR(handle))
7172 goto unlock_inode;
7173 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
7174 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
7175 S_NOATIME | S_IMMUTABLE);
7176 err = ext4_mark_inode_dirty(handle, inode);
7177 ext4_journal_stop(handle);
7178 unlock_inode:
7179 inode_unlock(inode);
7180 if (err)
7181 dquot_quota_off(sb, type);
7182 }
7183 if (err)
7184 lockdep_set_quota_inode(path->dentry->d_inode,
7185 I_DATA_SEM_NORMAL);
7186 return err;
7187 }
7188
ext4_check_quota_inum(int type,unsigned long qf_inum)7189 static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
7190 {
7191 switch (type) {
7192 case USRQUOTA:
7193 return qf_inum == EXT4_USR_QUOTA_INO;
7194 case GRPQUOTA:
7195 return qf_inum == EXT4_GRP_QUOTA_INO;
7196 case PRJQUOTA:
7197 return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
7198 default:
7199 BUG();
7200 }
7201 }
7202
ext4_quota_enable(struct super_block * sb,int type,int format_id,unsigned int flags)7203 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
7204 unsigned int flags)
7205 {
7206 int err;
7207 struct inode *qf_inode;
7208 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
7209 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
7210 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
7211 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
7212 };
7213
7214 BUG_ON(!ext4_has_feature_quota(sb));
7215
7216 if (!qf_inums[type])
7217 return -EPERM;
7218
7219 if (!ext4_check_quota_inum(type, qf_inums[type])) {
7220 ext4_error(sb, "Bad quota inum: %lu, type: %d",
7221 qf_inums[type], type);
7222 return -EUCLEAN;
7223 }
7224
7225 qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
7226 if (IS_ERR(qf_inode)) {
7227 ext4_error(sb, "Bad quota inode: %lu, type: %d",
7228 qf_inums[type], type);
7229 return PTR_ERR(qf_inode);
7230 }
7231
7232 /* Don't account quota for quota files to avoid recursion */
7233 qf_inode->i_flags |= S_NOQUOTA;
7234 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
7235 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
7236 if (err)
7237 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
7238 iput(qf_inode);
7239
7240 return err;
7241 }
7242
7243 /* Enable usage tracking for all quota types. */
ext4_enable_quotas(struct super_block * sb)7244 int ext4_enable_quotas(struct super_block *sb)
7245 {
7246 int type, err = 0;
7247 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
7248 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
7249 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
7250 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
7251 };
7252 bool quota_mopt[EXT4_MAXQUOTAS] = {
7253 test_opt(sb, USRQUOTA),
7254 test_opt(sb, GRPQUOTA),
7255 test_opt(sb, PRJQUOTA),
7256 };
7257
7258 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
7259 for (type = 0; type < EXT4_MAXQUOTAS; type++) {
7260 if (qf_inums[type]) {
7261 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
7262 DQUOT_USAGE_ENABLED |
7263 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
7264 if (err) {
7265 ext4_warning(sb,
7266 "Failed to enable quota tracking "
7267 "(type=%d, err=%d, ino=%lu). "
7268 "Please run e2fsck to fix.", type,
7269 err, qf_inums[type]);
7270
7271 ext4_quotas_off(sb, type);
7272 return err;
7273 }
7274 }
7275 }
7276 return 0;
7277 }
7278
ext4_quota_off(struct super_block * sb,int type)7279 static int ext4_quota_off(struct super_block *sb, int type)
7280 {
7281 struct inode *inode = sb_dqopt(sb)->files[type];
7282 handle_t *handle;
7283 int err;
7284
7285 /* Force all delayed allocation blocks to be allocated.
7286 * Caller already holds s_umount sem */
7287 if (test_opt(sb, DELALLOC))
7288 sync_filesystem(sb);
7289
7290 if (!inode || !igrab(inode))
7291 goto out;
7292
7293 err = dquot_quota_off(sb, type);
7294 if (err || ext4_has_feature_quota(sb))
7295 goto out_put;
7296 /*
7297 * When the filesystem was remounted read-only first, we cannot cleanup
7298 * inode flags here. Bad luck but people should be using QUOTA feature
7299 * these days anyway.
7300 */
7301 if (sb_rdonly(sb))
7302 goto out_put;
7303
7304 inode_lock(inode);
7305 /*
7306 * Update modification times of quota files when userspace can
7307 * start looking at them. If we fail, we return success anyway since
7308 * this is not a hard failure and quotas are already disabled.
7309 */
7310 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
7311 if (IS_ERR(handle)) {
7312 err = PTR_ERR(handle);
7313 goto out_unlock;
7314 }
7315 EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
7316 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
7317 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
7318 err = ext4_mark_inode_dirty(handle, inode);
7319 ext4_journal_stop(handle);
7320 out_unlock:
7321 inode_unlock(inode);
7322 out_put:
7323 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
7324 iput(inode);
7325 return err;
7326 out:
7327 return dquot_quota_off(sb, type);
7328 }
7329
7330 /* Read data from quotafile - avoid pagecache and such because we cannot afford
7331 * acquiring the locks... As quota files are never truncated and quota code
7332 * itself serializes the operations (and no one else should touch the files)
7333 * we don't have to be afraid of races */
ext4_quota_read(struct super_block * sb,int type,char * data,size_t len,loff_t off)7334 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
7335 size_t len, loff_t off)
7336 {
7337 struct inode *inode = sb_dqopt(sb)->files[type];
7338 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
7339 int offset = off & (sb->s_blocksize - 1);
7340 int tocopy;
7341 size_t toread;
7342 struct buffer_head *bh;
7343 loff_t i_size = i_size_read(inode);
7344
7345 if (off > i_size)
7346 return 0;
7347 if (off+len > i_size)
7348 len = i_size-off;
7349 toread = len;
7350 while (toread > 0) {
7351 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
7352 bh = ext4_bread(NULL, inode, blk, 0);
7353 if (IS_ERR(bh))
7354 return PTR_ERR(bh);
7355 if (!bh) /* A hole? */
7356 memset(data, 0, tocopy);
7357 else
7358 memcpy(data, bh->b_data+offset, tocopy);
7359 brelse(bh);
7360 offset = 0;
7361 toread -= tocopy;
7362 data += tocopy;
7363 blk++;
7364 }
7365 return len;
7366 }
7367
7368 /* Write to quotafile (we know the transaction is already started and has
7369 * enough credits) */
ext4_quota_write(struct super_block * sb,int type,const char * data,size_t len,loff_t off)7370 static ssize_t ext4_quota_write(struct super_block *sb, int type,
7371 const char *data, size_t len, loff_t off)
7372 {
7373 struct inode *inode = sb_dqopt(sb)->files[type];
7374 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
7375 int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
7376 int retries = 0;
7377 struct buffer_head *bh;
7378 handle_t *handle = journal_current_handle();
7379
7380 if (!handle) {
7381 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
7382 " cancelled because transaction is not started",
7383 (unsigned long long)off, (unsigned long long)len);
7384 return -EIO;
7385 }
7386 /*
7387 * Since we account only one data block in transaction credits,
7388 * then it is impossible to cross a block boundary.
7389 */
7390 if (sb->s_blocksize - offset < len) {
7391 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
7392 " cancelled because not block aligned",
7393 (unsigned long long)off, (unsigned long long)len);
7394 return -EIO;
7395 }
7396
7397 do {
7398 bh = ext4_bread(handle, inode, blk,
7399 EXT4_GET_BLOCKS_CREATE |
7400 EXT4_GET_BLOCKS_METADATA_NOFAIL);
7401 } while (PTR_ERR(bh) == -ENOSPC &&
7402 ext4_should_retry_alloc(inode->i_sb, &retries));
7403 if (IS_ERR(bh))
7404 return PTR_ERR(bh);
7405 if (!bh)
7406 goto out;
7407 BUFFER_TRACE(bh, "get write access");
7408 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE);
7409 if (err) {
7410 brelse(bh);
7411 return err;
7412 }
7413 lock_buffer(bh);
7414 memcpy(bh->b_data+offset, data, len);
7415 flush_dcache_folio(bh->b_folio);
7416 unlock_buffer(bh);
7417 err = ext4_handle_dirty_metadata(handle, NULL, bh);
7418 brelse(bh);
7419 out:
7420 if (inode->i_size < off + len) {
7421 i_size_write(inode, off + len);
7422 EXT4_I(inode)->i_disksize = inode->i_size;
7423 err2 = ext4_mark_inode_dirty(handle, inode);
7424 if (unlikely(err2 && !err))
7425 err = err2;
7426 }
7427 return err ? err : len;
7428 }
7429 #endif
7430
7431 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
register_as_ext2(void)7432 static inline void register_as_ext2(void)
7433 {
7434 int err = register_filesystem(&ext2_fs_type);
7435 if (err)
7436 printk(KERN_WARNING
7437 "EXT4-fs: Unable to register as ext2 (%d)\n", err);
7438 }
7439
unregister_as_ext2(void)7440 static inline void unregister_as_ext2(void)
7441 {
7442 unregister_filesystem(&ext2_fs_type);
7443 }
7444
ext2_feature_set_ok(struct super_block * sb)7445 static inline int ext2_feature_set_ok(struct super_block *sb)
7446 {
7447 if (ext4_has_unknown_ext2_incompat_features(sb))
7448 return 0;
7449 if (sb_rdonly(sb))
7450 return 1;
7451 if (ext4_has_unknown_ext2_ro_compat_features(sb))
7452 return 0;
7453 return 1;
7454 }
7455 #else
register_as_ext2(void)7456 static inline void register_as_ext2(void) { }
unregister_as_ext2(void)7457 static inline void unregister_as_ext2(void) { }
ext2_feature_set_ok(struct super_block * sb)7458 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
7459 #endif
7460
register_as_ext3(void)7461 static inline void register_as_ext3(void)
7462 {
7463 int err = register_filesystem(&ext3_fs_type);
7464 if (err)
7465 printk(KERN_WARNING
7466 "EXT4-fs: Unable to register as ext3 (%d)\n", err);
7467 }
7468
unregister_as_ext3(void)7469 static inline void unregister_as_ext3(void)
7470 {
7471 unregister_filesystem(&ext3_fs_type);
7472 }
7473
ext3_feature_set_ok(struct super_block * sb)7474 static inline int ext3_feature_set_ok(struct super_block *sb)
7475 {
7476 if (ext4_has_unknown_ext3_incompat_features(sb))
7477 return 0;
7478 if (!ext4_has_feature_journal(sb))
7479 return 0;
7480 if (sb_rdonly(sb))
7481 return 1;
7482 if (ext4_has_unknown_ext3_ro_compat_features(sb))
7483 return 0;
7484 return 1;
7485 }
7486
ext4_kill_sb(struct super_block * sb)7487 static void ext4_kill_sb(struct super_block *sb)
7488 {
7489 struct ext4_sb_info *sbi = EXT4_SB(sb);
7490 struct file *bdev_file = sbi ? sbi->s_journal_bdev_file : NULL;
7491
7492 kill_block_super(sb);
7493
7494 if (bdev_file)
7495 bdev_fput(bdev_file);
7496 }
7497
7498 static struct file_system_type ext4_fs_type = {
7499 .owner = THIS_MODULE,
7500 .name = "ext4",
7501 .init_fs_context = ext4_init_fs_context,
7502 .parameters = ext4_param_specs,
7503 .kill_sb = ext4_kill_sb,
7504 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
7505 FS_LBS,
7506 };
7507 MODULE_ALIAS_FS("ext4");
7508
ext4_init_fs(void)7509 static int __init ext4_init_fs(void)
7510 {
7511 int err;
7512
7513 ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
7514 ext4_li_info = NULL;
7515
7516 /* Build-time check for flags consistency */
7517 ext4_check_flag_values();
7518
7519 err = ext4_init_es();
7520 if (err)
7521 return err;
7522
7523 err = ext4_init_pending();
7524 if (err)
7525 goto out7;
7526
7527 err = ext4_init_post_read_processing();
7528 if (err)
7529 goto out6;
7530
7531 err = ext4_init_pageio();
7532 if (err)
7533 goto out5;
7534
7535 err = ext4_init_system_zone();
7536 if (err)
7537 goto out4;
7538
7539 err = ext4_init_sysfs();
7540 if (err)
7541 goto out3;
7542
7543 err = ext4_init_mballoc();
7544 if (err)
7545 goto out2;
7546 err = init_inodecache();
7547 if (err)
7548 goto out1;
7549
7550 err = ext4_fc_init_dentry_cache();
7551 if (err)
7552 goto out05;
7553
7554 register_as_ext3();
7555 register_as_ext2();
7556 err = register_filesystem(&ext4_fs_type);
7557 if (err)
7558 goto out;
7559
7560 return 0;
7561 out:
7562 unregister_as_ext2();
7563 unregister_as_ext3();
7564 ext4_fc_destroy_dentry_cache();
7565 out05:
7566 destroy_inodecache();
7567 out1:
7568 ext4_exit_mballoc();
7569 out2:
7570 ext4_exit_sysfs();
7571 out3:
7572 ext4_exit_system_zone();
7573 out4:
7574 ext4_exit_pageio();
7575 out5:
7576 ext4_exit_post_read_processing();
7577 out6:
7578 ext4_exit_pending();
7579 out7:
7580 ext4_exit_es();
7581
7582 return err;
7583 }
7584
ext4_exit_fs(void)7585 static void __exit ext4_exit_fs(void)
7586 {
7587 ext4_destroy_lazyinit_thread();
7588 unregister_as_ext2();
7589 unregister_as_ext3();
7590 unregister_filesystem(&ext4_fs_type);
7591 ext4_fc_destroy_dentry_cache();
7592 destroy_inodecache();
7593 ext4_exit_mballoc();
7594 ext4_exit_sysfs();
7595 ext4_exit_system_zone();
7596 ext4_exit_pageio();
7597 ext4_exit_post_read_processing();
7598 ext4_exit_es();
7599 ext4_exit_pending();
7600 }
7601
7602 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
7603 MODULE_DESCRIPTION("Fourth Extended Filesystem");
7604 MODULE_LICENSE("GPL");
7605 module_init(ext4_init_fs)
7606 module_exit(ext4_exit_fs)
7607