1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/inode.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
18 *
19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20 */
21
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/time.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/string.h>
30 #include <linux/buffer_head.h>
31 #include <linux/writeback.h>
32 #include <linux/pagevec.h>
33 #include <linux/mpage.h>
34 #include <linux/namei.h>
35 #include <linux/uio.h>
36 #include <linux/bio.h>
37 #include <linux/workqueue.h>
38 #include <linux/kernel.h>
39 #include <linux/printk.h>
40 #include <linux/slab.h>
41 #include <linux/bitops.h>
42 #include <linux/iomap.h>
43 #include <linux/iversion.h>
44
45 #include "ext4_jbd2.h"
46 #include "xattr.h"
47 #include "acl.h"
48 #include "truncate.h"
49
50 #include <trace/events/ext4.h>
51
ext4_inode_csum(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 struct ext4_inode_info *ei)
54 {
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56 __u32 csum;
57 __u16 dummy_csum = 0;
58 int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 unsigned int csum_size = sizeof(dummy_csum);
60
61 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 offset += csum_size;
64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 EXT4_GOOD_OLD_INODE_SIZE - offset);
66
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 offset = offsetof(struct ext4_inode, i_checksum_hi);
69 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 EXT4_GOOD_OLD_INODE_SIZE,
71 offset - EXT4_GOOD_OLD_INODE_SIZE);
72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74 csum_size);
75 offset += csum_size;
76 }
77 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
79 }
80
81 return csum;
82 }
83
ext4_inode_csum_verify(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 struct ext4_inode_info *ei)
86 {
87 __u32 provided, calculated;
88
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 cpu_to_le32(EXT4_OS_LINUX) ||
91 !ext4_has_metadata_csum(inode->i_sb))
92 return 1;
93
94 provided = le16_to_cpu(raw->i_checksum_lo);
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99 else
100 calculated &= 0xFFFF;
101
102 return provided == calculated;
103 }
104
ext4_inode_csum_set(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 struct ext4_inode_info *ei)
107 {
108 __u32 csum;
109
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 cpu_to_le32(EXT4_OS_LINUX) ||
112 !ext4_has_metadata_csum(inode->i_sb))
113 return;
114
115 csum = ext4_inode_csum(inode, raw, ei);
116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120 }
121
ext4_begin_ordered_truncate(struct inode * inode,loff_t new_size)122 static inline int ext4_begin_ordered_truncate(struct inode *inode,
123 loff_t new_size)
124 {
125 trace_ext4_begin_ordered_truncate(inode, new_size);
126 /*
127 * If jinode is zero, then we never opened the file for
128 * writing, so there's no need to call
129 * jbd2_journal_begin_ordered_truncate() since there's no
130 * outstanding writes we need to flush.
131 */
132 if (!EXT4_I(inode)->jinode)
133 return 0;
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
136 new_size);
137 }
138
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
140 int pextents);
141
142 /*
143 * Test whether an inode is a fast symlink.
144 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
145 */
ext4_inode_is_fast_symlink(struct inode * inode)146 int ext4_inode_is_fast_symlink(struct inode *inode)
147 {
148 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149 int ea_blocks = EXT4_I(inode)->i_file_acl ?
150 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
151
152 if (ext4_has_inline_data(inode))
153 return 0;
154
155 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
156 }
157 return S_ISLNK(inode->i_mode) && inode->i_size &&
158 (inode->i_size < EXT4_N_BLOCKS * 4);
159 }
160
161 /*
162 * Called at the last iput() if i_nlink is zero.
163 */
ext4_evict_inode(struct inode * inode)164 void ext4_evict_inode(struct inode *inode)
165 {
166 handle_t *handle;
167 int err;
168 /*
169 * Credits for final inode cleanup and freeing:
170 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171 * (xattr block freeing), bitmap, group descriptor (inode freeing)
172 */
173 int extra_credits = 6;
174 struct ext4_xattr_inode_array *ea_inode_array = NULL;
175 bool freeze_protected = false;
176
177 trace_ext4_evict_inode(inode);
178
179 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180 ext4_evict_ea_inode(inode);
181 if (inode->i_nlink) {
182 truncate_inode_pages_final(&inode->i_data);
183
184 goto no_delete;
185 }
186
187 if (is_bad_inode(inode))
188 goto no_delete;
189 dquot_initialize(inode);
190
191 if (ext4_should_order_data(inode))
192 ext4_begin_ordered_truncate(inode, 0);
193 truncate_inode_pages_final(&inode->i_data);
194
195 /*
196 * For inodes with journalled data, transaction commit could have
197 * dirtied the inode. And for inodes with dioread_nolock, unwritten
198 * extents converting worker could merge extents and also have dirtied
199 * the inode. Flush worker is ignoring it because of I_FREEING flag but
200 * we still need to remove the inode from the writeback lists.
201 */
202 if (!list_empty_careful(&inode->i_io_list))
203 inode_io_list_del(inode);
204
205 /*
206 * Protect us against freezing - iput() caller didn't have to have any
207 * protection against it. When we are in a running transaction though,
208 * we are already protected against freezing and we cannot grab further
209 * protection due to lock ordering constraints.
210 */
211 if (!ext4_journal_current_handle()) {
212 sb_start_intwrite(inode->i_sb);
213 freeze_protected = true;
214 }
215
216 if (!IS_NOQUOTA(inode))
217 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
218
219 /*
220 * Block bitmap, group descriptor, and inode are accounted in both
221 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
222 */
223 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
224 ext4_blocks_for_truncate(inode) + extra_credits - 3);
225 if (IS_ERR(handle)) {
226 ext4_std_error(inode->i_sb, PTR_ERR(handle));
227 /*
228 * If we're going to skip the normal cleanup, we still need to
229 * make sure that the in-core orphan linked list is properly
230 * cleaned up.
231 */
232 ext4_orphan_del(NULL, inode);
233 if (freeze_protected)
234 sb_end_intwrite(inode->i_sb);
235 goto no_delete;
236 }
237
238 if (IS_SYNC(inode))
239 ext4_handle_sync(handle);
240
241 /*
242 * Set inode->i_size to 0 before calling ext4_truncate(). We need
243 * special handling of symlinks here because i_size is used to
244 * determine whether ext4_inode_info->i_data contains symlink data or
245 * block mappings. Setting i_size to 0 will remove its fast symlink
246 * status. Erase i_data so that it becomes a valid empty block map.
247 */
248 if (ext4_inode_is_fast_symlink(inode))
249 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
250 inode->i_size = 0;
251 err = ext4_mark_inode_dirty(handle, inode);
252 if (err) {
253 ext4_warning(inode->i_sb,
254 "couldn't mark inode dirty (err %d)", err);
255 goto stop_handle;
256 }
257 if (inode->i_blocks) {
258 err = ext4_truncate(inode);
259 if (err) {
260 ext4_error_err(inode->i_sb, -err,
261 "couldn't truncate inode %lu (err %d)",
262 inode->i_ino, err);
263 goto stop_handle;
264 }
265 }
266
267 /* Remove xattr references. */
268 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
269 extra_credits);
270 if (err) {
271 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
272 stop_handle:
273 ext4_journal_stop(handle);
274 ext4_orphan_del(NULL, inode);
275 if (freeze_protected)
276 sb_end_intwrite(inode->i_sb);
277 ext4_xattr_inode_array_free(ea_inode_array);
278 goto no_delete;
279 }
280
281 /*
282 * Kill off the orphan record which ext4_truncate created.
283 * AKPM: I think this can be inside the above `if'.
284 * Note that ext4_orphan_del() has to be able to cope with the
285 * deletion of a non-existent orphan - this is because we don't
286 * know if ext4_truncate() actually created an orphan record.
287 * (Well, we could do this if we need to, but heck - it works)
288 */
289 ext4_orphan_del(handle, inode);
290 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
291
292 /*
293 * One subtle ordering requirement: if anything has gone wrong
294 * (transaction abort, IO errors, whatever), then we can still
295 * do these next steps (the fs will already have been marked as
296 * having errors), but we can't free the inode if the mark_dirty
297 * fails.
298 */
299 if (ext4_mark_inode_dirty(handle, inode))
300 /* If that failed, just do the required in-core inode clear. */
301 ext4_clear_inode(inode);
302 else
303 ext4_free_inode(handle, inode);
304 ext4_journal_stop(handle);
305 if (freeze_protected)
306 sb_end_intwrite(inode->i_sb);
307 ext4_xattr_inode_array_free(ea_inode_array);
308 return;
309 no_delete:
310 /*
311 * Check out some where else accidentally dirty the evicting inode,
312 * which may probably cause inode use-after-free issues later.
313 */
314 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
315
316 if (!list_empty(&EXT4_I(inode)->i_fc_list))
317 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
318 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
319 }
320
321 #ifdef CONFIG_QUOTA
ext4_get_reserved_space(struct inode * inode)322 qsize_t *ext4_get_reserved_space(struct inode *inode)
323 {
324 return &EXT4_I(inode)->i_reserved_quota;
325 }
326 #endif
327
328 /*
329 * Called with i_data_sem down, which is important since we can call
330 * ext4_discard_preallocations() from here.
331 */
ext4_da_update_reserve_space(struct inode * inode,int used,int quota_claim)332 void ext4_da_update_reserve_space(struct inode *inode,
333 int used, int quota_claim)
334 {
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 struct ext4_inode_info *ei = EXT4_I(inode);
337
338 spin_lock(&ei->i_block_reservation_lock);
339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340 if (unlikely(used > ei->i_reserved_data_blocks)) {
341 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
342 "with only %d reserved data blocks",
343 __func__, inode->i_ino, used,
344 ei->i_reserved_data_blocks);
345 WARN_ON(1);
346 used = ei->i_reserved_data_blocks;
347 }
348
349 /* Update per-inode reservations */
350 ei->i_reserved_data_blocks -= used;
351 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
352
353 spin_unlock(&ei->i_block_reservation_lock);
354
355 /* Update quota subsystem for data blocks */
356 if (quota_claim)
357 dquot_claim_block(inode, EXT4_C2B(sbi, used));
358 else {
359 /*
360 * We did fallocate with an offset that is already delayed
361 * allocated. So on delayed allocated writeback we should
362 * not re-claim the quota for fallocated blocks.
363 */
364 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
365 }
366
367 /*
368 * If we have done all the pending block allocations and if
369 * there aren't any writers on the inode, we can discard the
370 * inode's preallocations.
371 */
372 if ((ei->i_reserved_data_blocks == 0) &&
373 !inode_is_open_for_write(inode))
374 ext4_discard_preallocations(inode);
375 }
376
__check_block_validity(struct inode * inode,const char * func,unsigned int line,struct ext4_map_blocks * map)377 static int __check_block_validity(struct inode *inode, const char *func,
378 unsigned int line,
379 struct ext4_map_blocks *map)
380 {
381 if (ext4_has_feature_journal(inode->i_sb) &&
382 (inode->i_ino ==
383 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
384 return 0;
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
387 "lblock %lu mapped to illegal pblock %llu "
388 "(length %d)", (unsigned long) map->m_lblk,
389 map->m_pblk, map->m_len);
390 return -EFSCORRUPTED;
391 }
392 return 0;
393 }
394
ext4_issue_zeroout(struct inode * inode,ext4_lblk_t lblk,ext4_fsblk_t pblk,ext4_lblk_t len)395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
396 ext4_lblk_t len)
397 {
398 int ret;
399
400 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
401 return fscrypt_zeroout_range(inode, lblk, pblk, len);
402
403 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
404 if (ret > 0)
405 ret = 0;
406
407 return ret;
408 }
409
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
412
413 #ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle_t * handle,struct inode * inode,struct ext4_map_blocks * es_map,struct ext4_map_blocks * map,int flags)414 static void ext4_map_blocks_es_recheck(handle_t *handle,
415 struct inode *inode,
416 struct ext4_map_blocks *es_map,
417 struct ext4_map_blocks *map,
418 int flags)
419 {
420 int retval;
421
422 map->m_flags = 0;
423 /*
424 * There is a race window that the result is not the same.
425 * e.g. xfstests #223 when dioread_nolock enables. The reason
426 * is that we lookup a block mapping in extent status tree with
427 * out taking i_data_sem. So at the time the unwritten extent
428 * could be converted.
429 */
430 down_read(&EXT4_I(inode)->i_data_sem);
431 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
433 } else {
434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
435 }
436 up_read((&EXT4_I(inode)->i_data_sem));
437
438 /*
439 * We don't check m_len because extent will be collpased in status
440 * tree. So the m_len might not equal.
441 */
442 if (es_map->m_lblk != map->m_lblk ||
443 es_map->m_flags != map->m_flags ||
444 es_map->m_pblk != map->m_pblk) {
445 printk("ES cache assertion failed for inode: %lu "
446 "es_cached ex [%d/%d/%llu/%x] != "
447 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
448 inode->i_ino, es_map->m_lblk, es_map->m_len,
449 es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 map->m_len, map->m_pblk, map->m_flags,
451 retval, flags);
452 }
453 }
454 #endif /* ES_AGGRESSIVE_TEST */
455
ext4_map_query_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map)456 static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
457 struct ext4_map_blocks *map)
458 {
459 unsigned int status;
460 int retval;
461
462 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
463 retval = ext4_ext_map_blocks(handle, inode, map, 0);
464 else
465 retval = ext4_ind_map_blocks(handle, inode, map, 0);
466
467 if (retval <= 0)
468 return retval;
469
470 if (unlikely(retval != map->m_len)) {
471 ext4_warning(inode->i_sb,
472 "ES len assertion failed for inode "
473 "%lu: retval %d != map->m_len %d",
474 inode->i_ino, retval, map->m_len);
475 WARN_ON(1);
476 }
477
478 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
479 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
480 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
481 map->m_pblk, status);
482 return retval;
483 }
484
485 /*
486 * The ext4_map_blocks() function tries to look up the requested blocks,
487 * and returns if the blocks are already mapped.
488 *
489 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
490 * and store the allocated blocks in the result buffer head and mark it
491 * mapped.
492 *
493 * If file type is extents based, it will call ext4_ext_map_blocks(),
494 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
495 * based files
496 *
497 * On success, it returns the number of blocks being mapped or allocated.
498 * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are
499 * pre-allocated and unwritten, the resulting @map is marked as unwritten.
500 * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
501 *
502 * It returns 0 if plain look up failed (blocks have not been allocated), in
503 * that case, @map is returned as unmapped but we still do fill map->m_len to
504 * indicate the length of a hole starting at map->m_lblk.
505 *
506 * It returns the error in case of allocation failure.
507 */
ext4_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)508 int ext4_map_blocks(handle_t *handle, struct inode *inode,
509 struct ext4_map_blocks *map, int flags)
510 {
511 struct extent_status es;
512 int retval;
513 int ret = 0;
514 #ifdef ES_AGGRESSIVE_TEST
515 struct ext4_map_blocks orig_map;
516
517 memcpy(&orig_map, map, sizeof(*map));
518 #endif
519
520 map->m_flags = 0;
521 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
522 flags, map->m_len, (unsigned long) map->m_lblk);
523
524 /*
525 * ext4_map_blocks returns an int, and m_len is an unsigned int
526 */
527 if (unlikely(map->m_len > INT_MAX))
528 map->m_len = INT_MAX;
529
530 /* We can handle the block number less than EXT_MAX_BLOCKS */
531 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
532 return -EFSCORRUPTED;
533
534 /* Lookup extent status tree firstly */
535 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
536 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
537 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
538 map->m_pblk = ext4_es_pblock(&es) +
539 map->m_lblk - es.es_lblk;
540 map->m_flags |= ext4_es_is_written(&es) ?
541 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
542 retval = es.es_len - (map->m_lblk - es.es_lblk);
543 if (retval > map->m_len)
544 retval = map->m_len;
545 map->m_len = retval;
546 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
547 map->m_pblk = 0;
548 map->m_flags |= ext4_es_is_delayed(&es) ?
549 EXT4_MAP_DELAYED : 0;
550 retval = es.es_len - (map->m_lblk - es.es_lblk);
551 if (retval > map->m_len)
552 retval = map->m_len;
553 map->m_len = retval;
554 retval = 0;
555 } else {
556 BUG();
557 }
558
559 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
560 return retval;
561 #ifdef ES_AGGRESSIVE_TEST
562 ext4_map_blocks_es_recheck(handle, inode, map,
563 &orig_map, flags);
564 #endif
565 goto found;
566 }
567 /*
568 * In the query cache no-wait mode, nothing we can do more if we
569 * cannot find extent in the cache.
570 */
571 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
572 return 0;
573
574 /*
575 * Try to see if we can get the block without requesting a new
576 * file system block.
577 */
578 down_read(&EXT4_I(inode)->i_data_sem);
579 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
580 retval = ext4_ext_map_blocks(handle, inode, map, 0);
581 } else {
582 retval = ext4_ind_map_blocks(handle, inode, map, 0);
583 }
584 if (retval > 0) {
585 unsigned int status;
586
587 if (unlikely(retval != map->m_len)) {
588 ext4_warning(inode->i_sb,
589 "ES len assertion failed for inode "
590 "%lu: retval %d != map->m_len %d",
591 inode->i_ino, retval, map->m_len);
592 WARN_ON(1);
593 }
594
595 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
596 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
597 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
598 !(status & EXTENT_STATUS_WRITTEN) &&
599 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
600 map->m_lblk + map->m_len - 1))
601 status |= EXTENT_STATUS_DELAYED;
602 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
603 map->m_pblk, status);
604 }
605 up_read((&EXT4_I(inode)->i_data_sem));
606
607 found:
608 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
609 ret = check_block_validity(inode, map);
610 if (ret != 0)
611 return ret;
612 }
613
614 /* If it is only a block(s) look up */
615 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
616 return retval;
617
618 /*
619 * Returns if the blocks have already allocated
620 *
621 * Note that if blocks have been preallocated
622 * ext4_ext_map_blocks() returns with buffer head unmapped
623 */
624 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
625 /*
626 * If we need to convert extent to unwritten
627 * we continue and do the actual work in
628 * ext4_ext_map_blocks()
629 */
630 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
631 return retval;
632
633 /*
634 * Here we clear m_flags because after allocating an new extent,
635 * it will be set again.
636 */
637 map->m_flags &= ~EXT4_MAP_FLAGS;
638
639 /*
640 * New blocks allocate and/or writing to unwritten extent
641 * will possibly result in updating i_data, so we take
642 * the write lock of i_data_sem, and call get_block()
643 * with create == 1 flag.
644 */
645 down_write(&EXT4_I(inode)->i_data_sem);
646
647 /*
648 * We need to check for EXT4 here because migrate
649 * could have changed the inode type in between
650 */
651 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
652 retval = ext4_ext_map_blocks(handle, inode, map, flags);
653 } else {
654 retval = ext4_ind_map_blocks(handle, inode, map, flags);
655
656 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
657 /*
658 * We allocated new blocks which will result in
659 * i_data's format changing. Force the migrate
660 * to fail by clearing migrate flags
661 */
662 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
663 }
664 }
665
666 if (retval > 0) {
667 unsigned int status;
668
669 if (unlikely(retval != map->m_len)) {
670 ext4_warning(inode->i_sb,
671 "ES len assertion failed for inode "
672 "%lu: retval %d != map->m_len %d",
673 inode->i_ino, retval, map->m_len);
674 WARN_ON(1);
675 }
676
677 /*
678 * We have to zeroout blocks before inserting them into extent
679 * status tree. Otherwise someone could look them up there and
680 * use them before they are really zeroed. We also have to
681 * unmap metadata before zeroing as otherwise writeback can
682 * overwrite zeros with stale data from block device.
683 */
684 if (flags & EXT4_GET_BLOCKS_ZERO &&
685 map->m_flags & EXT4_MAP_MAPPED &&
686 map->m_flags & EXT4_MAP_NEW) {
687 ret = ext4_issue_zeroout(inode, map->m_lblk,
688 map->m_pblk, map->m_len);
689 if (ret) {
690 retval = ret;
691 goto out_sem;
692 }
693 }
694
695 /*
696 * If the extent has been zeroed out, we don't need to update
697 * extent status tree.
698 */
699 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
700 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
701 if (ext4_es_is_written(&es))
702 goto out_sem;
703 }
704 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
705 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
706 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
707 !(status & EXTENT_STATUS_WRITTEN) &&
708 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
709 map->m_lblk + map->m_len - 1))
710 status |= EXTENT_STATUS_DELAYED;
711 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
712 map->m_pblk, status);
713 }
714
715 out_sem:
716 up_write((&EXT4_I(inode)->i_data_sem));
717 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
718 ret = check_block_validity(inode, map);
719 if (ret != 0)
720 return ret;
721
722 /*
723 * Inodes with freshly allocated blocks where contents will be
724 * visible after transaction commit must be on transaction's
725 * ordered data list.
726 */
727 if (map->m_flags & EXT4_MAP_NEW &&
728 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
729 !(flags & EXT4_GET_BLOCKS_ZERO) &&
730 !ext4_is_quota_file(inode) &&
731 ext4_should_order_data(inode)) {
732 loff_t start_byte =
733 (loff_t)map->m_lblk << inode->i_blkbits;
734 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
735
736 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
737 ret = ext4_jbd2_inode_add_wait(handle, inode,
738 start_byte, length);
739 else
740 ret = ext4_jbd2_inode_add_write(handle, inode,
741 start_byte, length);
742 if (ret)
743 return ret;
744 }
745 }
746 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
747 map->m_flags & EXT4_MAP_MAPPED))
748 ext4_fc_track_range(handle, inode, map->m_lblk,
749 map->m_lblk + map->m_len - 1);
750 if (retval < 0)
751 ext_debug(inode, "failed with err %d\n", retval);
752 return retval;
753 }
754
755 /*
756 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
757 * we have to be careful as someone else may be manipulating b_state as well.
758 */
ext4_update_bh_state(struct buffer_head * bh,unsigned long flags)759 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
760 {
761 unsigned long old_state;
762 unsigned long new_state;
763
764 flags &= EXT4_MAP_FLAGS;
765
766 /* Dummy buffer_head? Set non-atomically. */
767 if (!bh->b_page) {
768 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
769 return;
770 }
771 /*
772 * Someone else may be modifying b_state. Be careful! This is ugly but
773 * once we get rid of using bh as a container for mapping information
774 * to pass to / from get_block functions, this can go away.
775 */
776 old_state = READ_ONCE(bh->b_state);
777 do {
778 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
779 } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
780 }
781
_ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int flags)782 static int _ext4_get_block(struct inode *inode, sector_t iblock,
783 struct buffer_head *bh, int flags)
784 {
785 struct ext4_map_blocks map;
786 int ret = 0;
787
788 if (ext4_has_inline_data(inode))
789 return -ERANGE;
790
791 map.m_lblk = iblock;
792 map.m_len = bh->b_size >> inode->i_blkbits;
793
794 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
795 flags);
796 if (ret > 0) {
797 map_bh(bh, inode->i_sb, map.m_pblk);
798 ext4_update_bh_state(bh, map.m_flags);
799 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
800 ret = 0;
801 } else if (ret == 0) {
802 /* hole case, need to fill in bh->b_size */
803 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
804 }
805 return ret;
806 }
807
ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)808 int ext4_get_block(struct inode *inode, sector_t iblock,
809 struct buffer_head *bh, int create)
810 {
811 return _ext4_get_block(inode, iblock, bh,
812 create ? EXT4_GET_BLOCKS_CREATE : 0);
813 }
814
815 /*
816 * Get block function used when preparing for buffered write if we require
817 * creating an unwritten extent if blocks haven't been allocated. The extent
818 * will be converted to written after the IO is complete.
819 */
ext4_get_block_unwritten(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)820 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
821 struct buffer_head *bh_result, int create)
822 {
823 int ret = 0;
824
825 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
826 inode->i_ino, create);
827 ret = _ext4_get_block(inode, iblock, bh_result,
828 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
829
830 /*
831 * If the buffer is marked unwritten, mark it as new to make sure it is
832 * zeroed out correctly in case of partial writes. Otherwise, there is
833 * a chance of stale data getting exposed.
834 */
835 if (ret == 0 && buffer_unwritten(bh_result))
836 set_buffer_new(bh_result);
837
838 return ret;
839 }
840
841 /* Maximum number of blocks we map for direct IO at once. */
842 #define DIO_MAX_BLOCKS 4096
843
844 /*
845 * `handle' can be NULL if create is zero
846 */
ext4_getblk(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)847 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
848 ext4_lblk_t block, int map_flags)
849 {
850 struct ext4_map_blocks map;
851 struct buffer_head *bh;
852 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
853 bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
854 int err;
855
856 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
857 || handle != NULL || create == 0);
858 ASSERT(create == 0 || !nowait);
859
860 map.m_lblk = block;
861 map.m_len = 1;
862 err = ext4_map_blocks(handle, inode, &map, map_flags);
863
864 if (err == 0)
865 return create ? ERR_PTR(-ENOSPC) : NULL;
866 if (err < 0)
867 return ERR_PTR(err);
868
869 if (nowait)
870 return sb_find_get_block(inode->i_sb, map.m_pblk);
871
872 bh = sb_getblk(inode->i_sb, map.m_pblk);
873 if (unlikely(!bh))
874 return ERR_PTR(-ENOMEM);
875 if (map.m_flags & EXT4_MAP_NEW) {
876 ASSERT(create != 0);
877 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
878 || (handle != NULL));
879
880 /*
881 * Now that we do not always journal data, we should
882 * keep in mind whether this should always journal the
883 * new buffer as metadata. For now, regular file
884 * writes use ext4_get_block instead, so it's not a
885 * problem.
886 */
887 lock_buffer(bh);
888 BUFFER_TRACE(bh, "call get_create_access");
889 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
890 EXT4_JTR_NONE);
891 if (unlikely(err)) {
892 unlock_buffer(bh);
893 goto errout;
894 }
895 if (!buffer_uptodate(bh)) {
896 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
897 set_buffer_uptodate(bh);
898 }
899 unlock_buffer(bh);
900 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
901 err = ext4_handle_dirty_metadata(handle, inode, bh);
902 if (unlikely(err))
903 goto errout;
904 } else
905 BUFFER_TRACE(bh, "not a new buffer");
906 return bh;
907 errout:
908 brelse(bh);
909 return ERR_PTR(err);
910 }
911
ext4_bread(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)912 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
913 ext4_lblk_t block, int map_flags)
914 {
915 struct buffer_head *bh;
916 int ret;
917
918 bh = ext4_getblk(handle, inode, block, map_flags);
919 if (IS_ERR(bh))
920 return bh;
921 if (!bh || ext4_buffer_uptodate(bh))
922 return bh;
923
924 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
925 if (ret) {
926 put_bh(bh);
927 return ERR_PTR(ret);
928 }
929 return bh;
930 }
931
932 /* Read a contiguous batch of blocks. */
ext4_bread_batch(struct inode * inode,ext4_lblk_t block,int bh_count,bool wait,struct buffer_head ** bhs)933 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
934 bool wait, struct buffer_head **bhs)
935 {
936 int i, err;
937
938 for (i = 0; i < bh_count; i++) {
939 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
940 if (IS_ERR(bhs[i])) {
941 err = PTR_ERR(bhs[i]);
942 bh_count = i;
943 goto out_brelse;
944 }
945 }
946
947 for (i = 0; i < bh_count; i++)
948 /* Note that NULL bhs[i] is valid because of holes. */
949 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
950 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
951
952 if (!wait)
953 return 0;
954
955 for (i = 0; i < bh_count; i++)
956 if (bhs[i])
957 wait_on_buffer(bhs[i]);
958
959 for (i = 0; i < bh_count; i++) {
960 if (bhs[i] && !buffer_uptodate(bhs[i])) {
961 err = -EIO;
962 goto out_brelse;
963 }
964 }
965 return 0;
966
967 out_brelse:
968 for (i = 0; i < bh_count; i++) {
969 brelse(bhs[i]);
970 bhs[i] = NULL;
971 }
972 return err;
973 }
974
ext4_walk_page_buffers(handle_t * handle,struct inode * inode,struct buffer_head * head,unsigned from,unsigned to,int * partial,int (* fn)(handle_t * handle,struct inode * inode,struct buffer_head * bh))975 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
976 struct buffer_head *head,
977 unsigned from,
978 unsigned to,
979 int *partial,
980 int (*fn)(handle_t *handle, struct inode *inode,
981 struct buffer_head *bh))
982 {
983 struct buffer_head *bh;
984 unsigned block_start, block_end;
985 unsigned blocksize = head->b_size;
986 int err, ret = 0;
987 struct buffer_head *next;
988
989 for (bh = head, block_start = 0;
990 ret == 0 && (bh != head || !block_start);
991 block_start = block_end, bh = next) {
992 next = bh->b_this_page;
993 block_end = block_start + blocksize;
994 if (block_end <= from || block_start >= to) {
995 if (partial && !buffer_uptodate(bh))
996 *partial = 1;
997 continue;
998 }
999 err = (*fn)(handle, inode, bh);
1000 if (!ret)
1001 ret = err;
1002 }
1003 return ret;
1004 }
1005
1006 /*
1007 * Helper for handling dirtying of journalled data. We also mark the folio as
1008 * dirty so that writeback code knows about this page (and inode) contains
1009 * dirty data. ext4_writepages() then commits appropriate transaction to
1010 * make data stable.
1011 */
ext4_dirty_journalled_data(handle_t * handle,struct buffer_head * bh)1012 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
1013 {
1014 folio_mark_dirty(bh->b_folio);
1015 return ext4_handle_dirty_metadata(handle, NULL, bh);
1016 }
1017
do_journal_get_write_access(handle_t * handle,struct inode * inode,struct buffer_head * bh)1018 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
1019 struct buffer_head *bh)
1020 {
1021 int dirty = buffer_dirty(bh);
1022 int ret;
1023
1024 if (!buffer_mapped(bh) || buffer_freed(bh))
1025 return 0;
1026 /*
1027 * __block_write_begin() could have dirtied some buffers. Clean
1028 * the dirty bit as jbd2_journal_get_write_access() could complain
1029 * otherwise about fs integrity issues. Setting of the dirty bit
1030 * by __block_write_begin() isn't a real problem here as we clear
1031 * the bit before releasing a page lock and thus writeback cannot
1032 * ever write the buffer.
1033 */
1034 if (dirty)
1035 clear_buffer_dirty(bh);
1036 BUFFER_TRACE(bh, "get write access");
1037 ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1038 EXT4_JTR_NONE);
1039 if (!ret && dirty)
1040 ret = ext4_dirty_journalled_data(handle, bh);
1041 return ret;
1042 }
1043
1044 #ifdef CONFIG_FS_ENCRYPTION
ext4_block_write_begin(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block)1045 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1046 get_block_t *get_block)
1047 {
1048 unsigned from = pos & (PAGE_SIZE - 1);
1049 unsigned to = from + len;
1050 struct inode *inode = folio->mapping->host;
1051 unsigned block_start, block_end;
1052 sector_t block;
1053 int err = 0;
1054 unsigned blocksize = inode->i_sb->s_blocksize;
1055 unsigned bbits;
1056 struct buffer_head *bh, *head, *wait[2];
1057 int nr_wait = 0;
1058 int i;
1059
1060 BUG_ON(!folio_test_locked(folio));
1061 BUG_ON(from > PAGE_SIZE);
1062 BUG_ON(to > PAGE_SIZE);
1063 BUG_ON(from > to);
1064
1065 head = folio_buffers(folio);
1066 if (!head)
1067 head = create_empty_buffers(folio, blocksize, 0);
1068 bbits = ilog2(blocksize);
1069 block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1070
1071 for (bh = head, block_start = 0; bh != head || !block_start;
1072 block++, block_start = block_end, bh = bh->b_this_page) {
1073 block_end = block_start + blocksize;
1074 if (block_end <= from || block_start >= to) {
1075 if (folio_test_uptodate(folio)) {
1076 set_buffer_uptodate(bh);
1077 }
1078 continue;
1079 }
1080 if (buffer_new(bh))
1081 clear_buffer_new(bh);
1082 if (!buffer_mapped(bh)) {
1083 WARN_ON(bh->b_size != blocksize);
1084 err = get_block(inode, block, bh, 1);
1085 if (err)
1086 break;
1087 if (buffer_new(bh)) {
1088 if (folio_test_uptodate(folio)) {
1089 clear_buffer_new(bh);
1090 set_buffer_uptodate(bh);
1091 mark_buffer_dirty(bh);
1092 continue;
1093 }
1094 if (block_end > to || block_start < from)
1095 folio_zero_segments(folio, to,
1096 block_end,
1097 block_start, from);
1098 continue;
1099 }
1100 }
1101 if (folio_test_uptodate(folio)) {
1102 set_buffer_uptodate(bh);
1103 continue;
1104 }
1105 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1106 !buffer_unwritten(bh) &&
1107 (block_start < from || block_end > to)) {
1108 ext4_read_bh_lock(bh, 0, false);
1109 wait[nr_wait++] = bh;
1110 }
1111 }
1112 /*
1113 * If we issued read requests, let them complete.
1114 */
1115 for (i = 0; i < nr_wait; i++) {
1116 wait_on_buffer(wait[i]);
1117 if (!buffer_uptodate(wait[i]))
1118 err = -EIO;
1119 }
1120 if (unlikely(err)) {
1121 folio_zero_new_buffers(folio, from, to);
1122 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1123 for (i = 0; i < nr_wait; i++) {
1124 int err2;
1125
1126 err2 = fscrypt_decrypt_pagecache_blocks(folio,
1127 blocksize, bh_offset(wait[i]));
1128 if (err2) {
1129 clear_buffer_uptodate(wait[i]);
1130 err = err2;
1131 }
1132 }
1133 }
1134
1135 return err;
1136 }
1137 #endif
1138
1139 /*
1140 * To preserve ordering, it is essential that the hole instantiation and
1141 * the data write be encapsulated in a single transaction. We cannot
1142 * close off a transaction and start a new one between the ext4_get_block()
1143 * and the ext4_write_end(). So doing the jbd2_journal_start at the start of
1144 * ext4_write_begin() is the right place.
1145 */
ext4_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)1146 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1147 loff_t pos, unsigned len,
1148 struct folio **foliop, void **fsdata)
1149 {
1150 struct inode *inode = mapping->host;
1151 int ret, needed_blocks;
1152 handle_t *handle;
1153 int retries = 0;
1154 struct folio *folio;
1155 pgoff_t index;
1156 unsigned from, to;
1157
1158 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1159 return -EIO;
1160
1161 trace_ext4_write_begin(inode, pos, len);
1162 /*
1163 * Reserve one block more for addition to orphan list in case
1164 * we allocate blocks but write fails for some reason
1165 */
1166 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1167 index = pos >> PAGE_SHIFT;
1168 from = pos & (PAGE_SIZE - 1);
1169 to = from + len;
1170
1171 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1172 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1173 foliop);
1174 if (ret < 0)
1175 return ret;
1176 if (ret == 1)
1177 return 0;
1178 }
1179
1180 /*
1181 * __filemap_get_folio() can take a long time if the
1182 * system is thrashing due to memory pressure, or if the folio
1183 * is being written back. So grab it first before we start
1184 * the transaction handle. This also allows us to allocate
1185 * the folio (if needed) without using GFP_NOFS.
1186 */
1187 retry_grab:
1188 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1189 mapping_gfp_mask(mapping));
1190 if (IS_ERR(folio))
1191 return PTR_ERR(folio);
1192 /*
1193 * The same as page allocation, we prealloc buffer heads before
1194 * starting the handle.
1195 */
1196 if (!folio_buffers(folio))
1197 create_empty_buffers(folio, inode->i_sb->s_blocksize, 0);
1198
1199 folio_unlock(folio);
1200
1201 retry_journal:
1202 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1203 if (IS_ERR(handle)) {
1204 folio_put(folio);
1205 return PTR_ERR(handle);
1206 }
1207
1208 folio_lock(folio);
1209 if (folio->mapping != mapping) {
1210 /* The folio got truncated from under us */
1211 folio_unlock(folio);
1212 folio_put(folio);
1213 ext4_journal_stop(handle);
1214 goto retry_grab;
1215 }
1216 /* In case writeback began while the folio was unlocked */
1217 folio_wait_stable(folio);
1218
1219 #ifdef CONFIG_FS_ENCRYPTION
1220 if (ext4_should_dioread_nolock(inode))
1221 ret = ext4_block_write_begin(folio, pos, len,
1222 ext4_get_block_unwritten);
1223 else
1224 ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
1225 #else
1226 if (ext4_should_dioread_nolock(inode))
1227 ret = __block_write_begin(folio, pos, len,
1228 ext4_get_block_unwritten);
1229 else
1230 ret = __block_write_begin(folio, pos, len, ext4_get_block);
1231 #endif
1232 if (!ret && ext4_should_journal_data(inode)) {
1233 ret = ext4_walk_page_buffers(handle, inode,
1234 folio_buffers(folio), from, to,
1235 NULL, do_journal_get_write_access);
1236 }
1237
1238 if (ret) {
1239 bool extended = (pos + len > inode->i_size) &&
1240 !ext4_verity_in_progress(inode);
1241
1242 folio_unlock(folio);
1243 /*
1244 * __block_write_begin may have instantiated a few blocks
1245 * outside i_size. Trim these off again. Don't need
1246 * i_size_read because we hold i_rwsem.
1247 *
1248 * Add inode to orphan list in case we crash before
1249 * truncate finishes
1250 */
1251 if (extended && ext4_can_truncate(inode))
1252 ext4_orphan_add(handle, inode);
1253
1254 ext4_journal_stop(handle);
1255 if (extended) {
1256 ext4_truncate_failed_write(inode);
1257 /*
1258 * If truncate failed early the inode might
1259 * still be on the orphan list; we need to
1260 * make sure the inode is removed from the
1261 * orphan list in that case.
1262 */
1263 if (inode->i_nlink)
1264 ext4_orphan_del(NULL, inode);
1265 }
1266
1267 if (ret == -ENOSPC &&
1268 ext4_should_retry_alloc(inode->i_sb, &retries))
1269 goto retry_journal;
1270 folio_put(folio);
1271 return ret;
1272 }
1273 *foliop = folio;
1274 return ret;
1275 }
1276
1277 /* For write_end() in data=journal mode */
write_end_fn(handle_t * handle,struct inode * inode,struct buffer_head * bh)1278 static int write_end_fn(handle_t *handle, struct inode *inode,
1279 struct buffer_head *bh)
1280 {
1281 int ret;
1282 if (!buffer_mapped(bh) || buffer_freed(bh))
1283 return 0;
1284 set_buffer_uptodate(bh);
1285 ret = ext4_dirty_journalled_data(handle, bh);
1286 clear_buffer_meta(bh);
1287 clear_buffer_prio(bh);
1288 return ret;
1289 }
1290
1291 /*
1292 * We need to pick up the new inode size which generic_commit_write gave us
1293 * `file' can be NULL - eg, when called from page_symlink().
1294 *
1295 * ext4 never places buffers on inode->i_mapping->i_private_list. metadata
1296 * buffers are managed internally.
1297 */
ext4_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)1298 static int ext4_write_end(struct file *file,
1299 struct address_space *mapping,
1300 loff_t pos, unsigned len, unsigned copied,
1301 struct folio *folio, void *fsdata)
1302 {
1303 handle_t *handle = ext4_journal_current_handle();
1304 struct inode *inode = mapping->host;
1305 loff_t old_size = inode->i_size;
1306 int ret = 0, ret2;
1307 int i_size_changed = 0;
1308 bool verity = ext4_verity_in_progress(inode);
1309
1310 trace_ext4_write_end(inode, pos, len, copied);
1311
1312 if (ext4_has_inline_data(inode) &&
1313 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1314 return ext4_write_inline_data_end(inode, pos, len, copied,
1315 folio);
1316
1317 copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
1318 /*
1319 * it's important to update i_size while still holding folio lock:
1320 * page writeout could otherwise come in and zero beyond i_size.
1321 *
1322 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1323 * blocks are being written past EOF, so skip the i_size update.
1324 */
1325 if (!verity)
1326 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1327 folio_unlock(folio);
1328 folio_put(folio);
1329
1330 if (old_size < pos && !verity)
1331 pagecache_isize_extended(inode, old_size, pos);
1332 /*
1333 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1334 * makes the holding time of folio lock longer. Second, it forces lock
1335 * ordering of folio lock and transaction start for journaling
1336 * filesystems.
1337 */
1338 if (i_size_changed)
1339 ret = ext4_mark_inode_dirty(handle, inode);
1340
1341 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1342 /* if we have allocated more blocks and copied
1343 * less. We will have blocks allocated outside
1344 * inode->i_size. So truncate them
1345 */
1346 ext4_orphan_add(handle, inode);
1347
1348 ret2 = ext4_journal_stop(handle);
1349 if (!ret)
1350 ret = ret2;
1351
1352 if (pos + len > inode->i_size && !verity) {
1353 ext4_truncate_failed_write(inode);
1354 /*
1355 * If truncate failed early the inode might still be
1356 * on the orphan list; we need to make sure the inode
1357 * is removed from the orphan list in that case.
1358 */
1359 if (inode->i_nlink)
1360 ext4_orphan_del(NULL, inode);
1361 }
1362
1363 return ret ? ret : copied;
1364 }
1365
1366 /*
1367 * This is a private version of folio_zero_new_buffers() which doesn't
1368 * set the buffer to be dirty, since in data=journalled mode we need
1369 * to call ext4_dirty_journalled_data() instead.
1370 */
ext4_journalled_zero_new_buffers(handle_t * handle,struct inode * inode,struct folio * folio,unsigned from,unsigned to)1371 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1372 struct inode *inode,
1373 struct folio *folio,
1374 unsigned from, unsigned to)
1375 {
1376 unsigned int block_start = 0, block_end;
1377 struct buffer_head *head, *bh;
1378
1379 bh = head = folio_buffers(folio);
1380 do {
1381 block_end = block_start + bh->b_size;
1382 if (buffer_new(bh)) {
1383 if (block_end > from && block_start < to) {
1384 if (!folio_test_uptodate(folio)) {
1385 unsigned start, size;
1386
1387 start = max(from, block_start);
1388 size = min(to, block_end) - start;
1389
1390 folio_zero_range(folio, start, size);
1391 write_end_fn(handle, inode, bh);
1392 }
1393 clear_buffer_new(bh);
1394 }
1395 }
1396 block_start = block_end;
1397 bh = bh->b_this_page;
1398 } while (bh != head);
1399 }
1400
ext4_journalled_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)1401 static int ext4_journalled_write_end(struct file *file,
1402 struct address_space *mapping,
1403 loff_t pos, unsigned len, unsigned copied,
1404 struct folio *folio, void *fsdata)
1405 {
1406 handle_t *handle = ext4_journal_current_handle();
1407 struct inode *inode = mapping->host;
1408 loff_t old_size = inode->i_size;
1409 int ret = 0, ret2;
1410 int partial = 0;
1411 unsigned from, to;
1412 int size_changed = 0;
1413 bool verity = ext4_verity_in_progress(inode);
1414
1415 trace_ext4_journalled_write_end(inode, pos, len, copied);
1416 from = pos & (PAGE_SIZE - 1);
1417 to = from + len;
1418
1419 BUG_ON(!ext4_handle_valid(handle));
1420
1421 if (ext4_has_inline_data(inode))
1422 return ext4_write_inline_data_end(inode, pos, len, copied,
1423 folio);
1424
1425 if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1426 copied = 0;
1427 ext4_journalled_zero_new_buffers(handle, inode, folio,
1428 from, to);
1429 } else {
1430 if (unlikely(copied < len))
1431 ext4_journalled_zero_new_buffers(handle, inode, folio,
1432 from + copied, to);
1433 ret = ext4_walk_page_buffers(handle, inode,
1434 folio_buffers(folio),
1435 from, from + copied, &partial,
1436 write_end_fn);
1437 if (!partial)
1438 folio_mark_uptodate(folio);
1439 }
1440 if (!verity)
1441 size_changed = ext4_update_inode_size(inode, pos + copied);
1442 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1443 folio_unlock(folio);
1444 folio_put(folio);
1445
1446 if (old_size < pos && !verity)
1447 pagecache_isize_extended(inode, old_size, pos);
1448
1449 if (size_changed) {
1450 ret2 = ext4_mark_inode_dirty(handle, inode);
1451 if (!ret)
1452 ret = ret2;
1453 }
1454
1455 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1456 /* if we have allocated more blocks and copied
1457 * less. We will have blocks allocated outside
1458 * inode->i_size. So truncate them
1459 */
1460 ext4_orphan_add(handle, inode);
1461
1462 ret2 = ext4_journal_stop(handle);
1463 if (!ret)
1464 ret = ret2;
1465 if (pos + len > inode->i_size && !verity) {
1466 ext4_truncate_failed_write(inode);
1467 /*
1468 * If truncate failed early the inode might still be
1469 * on the orphan list; we need to make sure the inode
1470 * is removed from the orphan list in that case.
1471 */
1472 if (inode->i_nlink)
1473 ext4_orphan_del(NULL, inode);
1474 }
1475
1476 return ret ? ret : copied;
1477 }
1478
1479 /*
1480 * Reserve space for 'nr_resv' clusters
1481 */
ext4_da_reserve_space(struct inode * inode,int nr_resv)1482 static int ext4_da_reserve_space(struct inode *inode, int nr_resv)
1483 {
1484 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1485 struct ext4_inode_info *ei = EXT4_I(inode);
1486 int ret;
1487
1488 /*
1489 * We will charge metadata quota at writeout time; this saves
1490 * us from metadata over-estimation, though we may go over by
1491 * a small amount in the end. Here we just reserve for data.
1492 */
1493 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, nr_resv));
1494 if (ret)
1495 return ret;
1496
1497 spin_lock(&ei->i_block_reservation_lock);
1498 if (ext4_claim_free_clusters(sbi, nr_resv, 0)) {
1499 spin_unlock(&ei->i_block_reservation_lock);
1500 dquot_release_reservation_block(inode, EXT4_C2B(sbi, nr_resv));
1501 return -ENOSPC;
1502 }
1503 ei->i_reserved_data_blocks += nr_resv;
1504 trace_ext4_da_reserve_space(inode, nr_resv);
1505 spin_unlock(&ei->i_block_reservation_lock);
1506
1507 return 0; /* success */
1508 }
1509
ext4_da_release_space(struct inode * inode,int to_free)1510 void ext4_da_release_space(struct inode *inode, int to_free)
1511 {
1512 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1513 struct ext4_inode_info *ei = EXT4_I(inode);
1514
1515 if (!to_free)
1516 return; /* Nothing to release, exit */
1517
1518 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1519
1520 trace_ext4_da_release_space(inode, to_free);
1521 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1522 /*
1523 * if there aren't enough reserved blocks, then the
1524 * counter is messed up somewhere. Since this
1525 * function is called from invalidate page, it's
1526 * harmless to return without any action.
1527 */
1528 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1529 "ino %lu, to_free %d with only %d reserved "
1530 "data blocks", inode->i_ino, to_free,
1531 ei->i_reserved_data_blocks);
1532 WARN_ON(1);
1533 to_free = ei->i_reserved_data_blocks;
1534 }
1535 ei->i_reserved_data_blocks -= to_free;
1536
1537 /* update fs dirty data blocks counter */
1538 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1539
1540 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1541
1542 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1543 }
1544
1545 /*
1546 * Delayed allocation stuff
1547 */
1548
1549 struct mpage_da_data {
1550 /* These are input fields for ext4_do_writepages() */
1551 struct inode *inode;
1552 struct writeback_control *wbc;
1553 unsigned int can_map:1; /* Can writepages call map blocks? */
1554
1555 /* These are internal state of ext4_do_writepages() */
1556 pgoff_t first_page; /* The first page to write */
1557 pgoff_t next_page; /* Current page to examine */
1558 pgoff_t last_page; /* Last page to examine */
1559 /*
1560 * Extent to map - this can be after first_page because that can be
1561 * fully mapped. We somewhat abuse m_flags to store whether the extent
1562 * is delalloc or unwritten.
1563 */
1564 struct ext4_map_blocks map;
1565 struct ext4_io_submit io_submit; /* IO submission data */
1566 unsigned int do_map:1;
1567 unsigned int scanned_until_end:1;
1568 unsigned int journalled_more_data:1;
1569 };
1570
mpage_release_unused_pages(struct mpage_da_data * mpd,bool invalidate)1571 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1572 bool invalidate)
1573 {
1574 unsigned nr, i;
1575 pgoff_t index, end;
1576 struct folio_batch fbatch;
1577 struct inode *inode = mpd->inode;
1578 struct address_space *mapping = inode->i_mapping;
1579
1580 /* This is necessary when next_page == 0. */
1581 if (mpd->first_page >= mpd->next_page)
1582 return;
1583
1584 mpd->scanned_until_end = 0;
1585 index = mpd->first_page;
1586 end = mpd->next_page - 1;
1587 if (invalidate) {
1588 ext4_lblk_t start, last;
1589 start = index << (PAGE_SHIFT - inode->i_blkbits);
1590 last = end << (PAGE_SHIFT - inode->i_blkbits);
1591
1592 /*
1593 * avoid racing with extent status tree scans made by
1594 * ext4_insert_delayed_block()
1595 */
1596 down_write(&EXT4_I(inode)->i_data_sem);
1597 ext4_es_remove_extent(inode, start, last - start + 1);
1598 up_write(&EXT4_I(inode)->i_data_sem);
1599 }
1600
1601 folio_batch_init(&fbatch);
1602 while (index <= end) {
1603 nr = filemap_get_folios(mapping, &index, end, &fbatch);
1604 if (nr == 0)
1605 break;
1606 for (i = 0; i < nr; i++) {
1607 struct folio *folio = fbatch.folios[i];
1608
1609 if (folio->index < mpd->first_page)
1610 continue;
1611 if (folio_next_index(folio) - 1 > end)
1612 continue;
1613 BUG_ON(!folio_test_locked(folio));
1614 BUG_ON(folio_test_writeback(folio));
1615 if (invalidate) {
1616 if (folio_mapped(folio))
1617 folio_clear_dirty_for_io(folio);
1618 block_invalidate_folio(folio, 0,
1619 folio_size(folio));
1620 folio_clear_uptodate(folio);
1621 }
1622 folio_unlock(folio);
1623 }
1624 folio_batch_release(&fbatch);
1625 }
1626 }
1627
ext4_print_free_blocks(struct inode * inode)1628 static void ext4_print_free_blocks(struct inode *inode)
1629 {
1630 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1631 struct super_block *sb = inode->i_sb;
1632 struct ext4_inode_info *ei = EXT4_I(inode);
1633
1634 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1635 EXT4_C2B(EXT4_SB(inode->i_sb),
1636 ext4_count_free_clusters(sb)));
1637 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1638 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1639 (long long) EXT4_C2B(EXT4_SB(sb),
1640 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1641 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1642 (long long) EXT4_C2B(EXT4_SB(sb),
1643 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1644 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1645 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1646 ei->i_reserved_data_blocks);
1647 return;
1648 }
1649
1650 /*
1651 * Check whether the cluster containing lblk has been allocated or has
1652 * delalloc reservation.
1653 *
1654 * Returns 0 if the cluster doesn't have either, 1 if it has delalloc
1655 * reservation, 2 if it's already been allocated, negative error code on
1656 * failure.
1657 */
ext4_clu_alloc_state(struct inode * inode,ext4_lblk_t lblk)1658 static int ext4_clu_alloc_state(struct inode *inode, ext4_lblk_t lblk)
1659 {
1660 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1661 int ret;
1662
1663 /* Has delalloc reservation? */
1664 if (ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk))
1665 return 1;
1666
1667 /* Already been allocated? */
1668 if (ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk))
1669 return 2;
1670 ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk));
1671 if (ret < 0)
1672 return ret;
1673 if (ret > 0)
1674 return 2;
1675
1676 return 0;
1677 }
1678
1679 /*
1680 * ext4_insert_delayed_blocks - adds a multiple delayed blocks to the extents
1681 * status tree, incrementing the reserved
1682 * cluster/block count or making pending
1683 * reservations where needed
1684 *
1685 * @inode - file containing the newly added block
1686 * @lblk - start logical block to be added
1687 * @len - length of blocks to be added
1688 *
1689 * Returns 0 on success, negative error code on failure.
1690 */
ext4_insert_delayed_blocks(struct inode * inode,ext4_lblk_t lblk,ext4_lblk_t len)1691 static int ext4_insert_delayed_blocks(struct inode *inode, ext4_lblk_t lblk,
1692 ext4_lblk_t len)
1693 {
1694 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1695 int ret;
1696 bool lclu_allocated = false;
1697 bool end_allocated = false;
1698 ext4_lblk_t resv_clu;
1699 ext4_lblk_t end = lblk + len - 1;
1700
1701 /*
1702 * If the cluster containing lblk or end is shared with a delayed,
1703 * written, or unwritten extent in a bigalloc file system, it's
1704 * already been accounted for and does not need to be reserved.
1705 * A pending reservation must be made for the cluster if it's
1706 * shared with a written or unwritten extent and doesn't already
1707 * have one. Written and unwritten extents can be purged from the
1708 * extents status tree if the system is under memory pressure, so
1709 * it's necessary to examine the extent tree if a search of the
1710 * extents status tree doesn't get a match.
1711 */
1712 if (sbi->s_cluster_ratio == 1) {
1713 ret = ext4_da_reserve_space(inode, len);
1714 if (ret != 0) /* ENOSPC */
1715 return ret;
1716 } else { /* bigalloc */
1717 resv_clu = EXT4_B2C(sbi, end) - EXT4_B2C(sbi, lblk) + 1;
1718
1719 ret = ext4_clu_alloc_state(inode, lblk);
1720 if (ret < 0)
1721 return ret;
1722 if (ret > 0) {
1723 resv_clu--;
1724 lclu_allocated = (ret == 2);
1725 }
1726
1727 if (EXT4_B2C(sbi, lblk) != EXT4_B2C(sbi, end)) {
1728 ret = ext4_clu_alloc_state(inode, end);
1729 if (ret < 0)
1730 return ret;
1731 if (ret > 0) {
1732 resv_clu--;
1733 end_allocated = (ret == 2);
1734 }
1735 }
1736
1737 if (resv_clu) {
1738 ret = ext4_da_reserve_space(inode, resv_clu);
1739 if (ret != 0) /* ENOSPC */
1740 return ret;
1741 }
1742 }
1743
1744 ext4_es_insert_delayed_extent(inode, lblk, len, lclu_allocated,
1745 end_allocated);
1746 return 0;
1747 }
1748
1749 /*
1750 * Looks up the requested blocks and sets the delalloc extent map.
1751 * First try to look up for the extent entry that contains the requested
1752 * blocks in the extent status tree without i_data_sem, then try to look
1753 * up for the ondisk extent mapping with i_data_sem in read mode,
1754 * finally hold i_data_sem in write mode, looks up again and add a
1755 * delalloc extent entry if it still couldn't find any extent. Pass out
1756 * the mapped extent through @map and return 0 on success.
1757 */
ext4_da_map_blocks(struct inode * inode,struct ext4_map_blocks * map)1758 static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map)
1759 {
1760 struct extent_status es;
1761 int retval;
1762 #ifdef ES_AGGRESSIVE_TEST
1763 struct ext4_map_blocks orig_map;
1764
1765 memcpy(&orig_map, map, sizeof(*map));
1766 #endif
1767
1768 map->m_flags = 0;
1769 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1770 (unsigned long) map->m_lblk);
1771
1772 /* Lookup extent status tree firstly */
1773 if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
1774 map->m_len = min_t(unsigned int, map->m_len,
1775 es.es_len - (map->m_lblk - es.es_lblk));
1776
1777 if (ext4_es_is_hole(&es))
1778 goto add_delayed;
1779
1780 found:
1781 /*
1782 * Delayed extent could be allocated by fallocate.
1783 * So we need to check it.
1784 */
1785 if (ext4_es_is_delonly(&es)) {
1786 map->m_flags |= EXT4_MAP_DELAYED;
1787 return 0;
1788 }
1789
1790 map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk;
1791 if (ext4_es_is_written(&es))
1792 map->m_flags |= EXT4_MAP_MAPPED;
1793 else if (ext4_es_is_unwritten(&es))
1794 map->m_flags |= EXT4_MAP_UNWRITTEN;
1795 else
1796 BUG();
1797
1798 #ifdef ES_AGGRESSIVE_TEST
1799 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1800 #endif
1801 return 0;
1802 }
1803
1804 /*
1805 * Try to see if we can get the block without requesting a new
1806 * file system block.
1807 */
1808 down_read(&EXT4_I(inode)->i_data_sem);
1809 if (ext4_has_inline_data(inode))
1810 retval = 0;
1811 else
1812 retval = ext4_map_query_blocks(NULL, inode, map);
1813 up_read(&EXT4_I(inode)->i_data_sem);
1814 if (retval)
1815 return retval < 0 ? retval : 0;
1816
1817 add_delayed:
1818 down_write(&EXT4_I(inode)->i_data_sem);
1819 /*
1820 * Page fault path (ext4_page_mkwrite does not take i_rwsem)
1821 * and fallocate path (no folio lock) can race. Make sure we
1822 * lookup the extent status tree here again while i_data_sem
1823 * is held in write mode, before inserting a new da entry in
1824 * the extent status tree.
1825 */
1826 if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
1827 map->m_len = min_t(unsigned int, map->m_len,
1828 es.es_len - (map->m_lblk - es.es_lblk));
1829
1830 if (!ext4_es_is_hole(&es)) {
1831 up_write(&EXT4_I(inode)->i_data_sem);
1832 goto found;
1833 }
1834 } else if (!ext4_has_inline_data(inode)) {
1835 retval = ext4_map_query_blocks(NULL, inode, map);
1836 if (retval) {
1837 up_write(&EXT4_I(inode)->i_data_sem);
1838 return retval < 0 ? retval : 0;
1839 }
1840 }
1841
1842 map->m_flags |= EXT4_MAP_DELAYED;
1843 retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len);
1844 up_write(&EXT4_I(inode)->i_data_sem);
1845
1846 return retval;
1847 }
1848
1849 /*
1850 * This is a special get_block_t callback which is used by
1851 * ext4_da_write_begin(). It will either return mapped block or
1852 * reserve space for a single block.
1853 *
1854 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1855 * We also have b_blocknr = -1 and b_bdev initialized properly
1856 *
1857 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1858 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1859 * initialized properly.
1860 */
ext4_da_get_block_prep(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)1861 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1862 struct buffer_head *bh, int create)
1863 {
1864 struct ext4_map_blocks map;
1865 sector_t invalid_block = ~((sector_t) 0xffff);
1866 int ret = 0;
1867
1868 BUG_ON(create == 0);
1869 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1870
1871 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1872 invalid_block = ~0;
1873
1874 map.m_lblk = iblock;
1875 map.m_len = 1;
1876
1877 /*
1878 * first, we need to know whether the block is allocated already
1879 * preallocated blocks are unmapped but should treated
1880 * the same as allocated blocks.
1881 */
1882 ret = ext4_da_map_blocks(inode, &map);
1883 if (ret < 0)
1884 return ret;
1885
1886 if (map.m_flags & EXT4_MAP_DELAYED) {
1887 map_bh(bh, inode->i_sb, invalid_block);
1888 set_buffer_new(bh);
1889 set_buffer_delay(bh);
1890 return 0;
1891 }
1892
1893 map_bh(bh, inode->i_sb, map.m_pblk);
1894 ext4_update_bh_state(bh, map.m_flags);
1895
1896 if (buffer_unwritten(bh)) {
1897 /* A delayed write to unwritten bh should be marked
1898 * new and mapped. Mapped ensures that we don't do
1899 * get_block multiple times when we write to the same
1900 * offset and new ensures that we do proper zero out
1901 * for partial write.
1902 */
1903 set_buffer_new(bh);
1904 set_buffer_mapped(bh);
1905 }
1906 return 0;
1907 }
1908
mpage_folio_done(struct mpage_da_data * mpd,struct folio * folio)1909 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
1910 {
1911 mpd->first_page += folio_nr_pages(folio);
1912 folio_unlock(folio);
1913 }
1914
mpage_submit_folio(struct mpage_da_data * mpd,struct folio * folio)1915 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
1916 {
1917 size_t len;
1918 loff_t size;
1919 int err;
1920
1921 BUG_ON(folio->index != mpd->first_page);
1922 folio_clear_dirty_for_io(folio);
1923 /*
1924 * We have to be very careful here! Nothing protects writeback path
1925 * against i_size changes and the page can be writeably mapped into
1926 * page tables. So an application can be growing i_size and writing
1927 * data through mmap while writeback runs. folio_clear_dirty_for_io()
1928 * write-protects our page in page tables and the page cannot get
1929 * written to again until we release folio lock. So only after
1930 * folio_clear_dirty_for_io() we are safe to sample i_size for
1931 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1932 * on the barrier provided by folio_test_clear_dirty() in
1933 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
1934 * after page tables are updated.
1935 */
1936 size = i_size_read(mpd->inode);
1937 len = folio_size(folio);
1938 if (folio_pos(folio) + len > size &&
1939 !ext4_verity_in_progress(mpd->inode))
1940 len = size & (len - 1);
1941 err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
1942 if (!err)
1943 mpd->wbc->nr_to_write--;
1944
1945 return err;
1946 }
1947
1948 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
1949
1950 /*
1951 * mballoc gives us at most this number of blocks...
1952 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1953 * The rest of mballoc seems to handle chunks up to full group size.
1954 */
1955 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1956
1957 /*
1958 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1959 *
1960 * @mpd - extent of blocks
1961 * @lblk - logical number of the block in the file
1962 * @bh - buffer head we want to add to the extent
1963 *
1964 * The function is used to collect contig. blocks in the same state. If the
1965 * buffer doesn't require mapping for writeback and we haven't started the
1966 * extent of buffers to map yet, the function returns 'true' immediately - the
1967 * caller can write the buffer right away. Otherwise the function returns true
1968 * if the block has been added to the extent, false if the block couldn't be
1969 * added.
1970 */
mpage_add_bh_to_extent(struct mpage_da_data * mpd,ext4_lblk_t lblk,struct buffer_head * bh)1971 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1972 struct buffer_head *bh)
1973 {
1974 struct ext4_map_blocks *map = &mpd->map;
1975
1976 /* Buffer that doesn't need mapping for writeback? */
1977 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1978 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1979 /* So far no extent to map => we write the buffer right away */
1980 if (map->m_len == 0)
1981 return true;
1982 return false;
1983 }
1984
1985 /* First block in the extent? */
1986 if (map->m_len == 0) {
1987 /* We cannot map unless handle is started... */
1988 if (!mpd->do_map)
1989 return false;
1990 map->m_lblk = lblk;
1991 map->m_len = 1;
1992 map->m_flags = bh->b_state & BH_FLAGS;
1993 return true;
1994 }
1995
1996 /* Don't go larger than mballoc is willing to allocate */
1997 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1998 return false;
1999
2000 /* Can we merge the block to our big extent? */
2001 if (lblk == map->m_lblk + map->m_len &&
2002 (bh->b_state & BH_FLAGS) == map->m_flags) {
2003 map->m_len++;
2004 return true;
2005 }
2006 return false;
2007 }
2008
2009 /*
2010 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2011 *
2012 * @mpd - extent of blocks for mapping
2013 * @head - the first buffer in the page
2014 * @bh - buffer we should start processing from
2015 * @lblk - logical number of the block in the file corresponding to @bh
2016 *
2017 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2018 * the page for IO if all buffers in this page were mapped and there's no
2019 * accumulated extent of buffers to map or add buffers in the page to the
2020 * extent of buffers to map. The function returns 1 if the caller can continue
2021 * by processing the next page, 0 if it should stop adding buffers to the
2022 * extent to map because we cannot extend it anymore. It can also return value
2023 * < 0 in case of error during IO submission.
2024 */
mpage_process_page_bufs(struct mpage_da_data * mpd,struct buffer_head * head,struct buffer_head * bh,ext4_lblk_t lblk)2025 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2026 struct buffer_head *head,
2027 struct buffer_head *bh,
2028 ext4_lblk_t lblk)
2029 {
2030 struct inode *inode = mpd->inode;
2031 int err;
2032 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2033 >> inode->i_blkbits;
2034
2035 if (ext4_verity_in_progress(inode))
2036 blocks = EXT_MAX_BLOCKS;
2037
2038 do {
2039 BUG_ON(buffer_locked(bh));
2040
2041 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2042 /* Found extent to map? */
2043 if (mpd->map.m_len)
2044 return 0;
2045 /* Buffer needs mapping and handle is not started? */
2046 if (!mpd->do_map)
2047 return 0;
2048 /* Everything mapped so far and we hit EOF */
2049 break;
2050 }
2051 } while (lblk++, (bh = bh->b_this_page) != head);
2052 /* So far everything mapped? Submit the page for IO. */
2053 if (mpd->map.m_len == 0) {
2054 err = mpage_submit_folio(mpd, head->b_folio);
2055 if (err < 0)
2056 return err;
2057 mpage_folio_done(mpd, head->b_folio);
2058 }
2059 if (lblk >= blocks) {
2060 mpd->scanned_until_end = 1;
2061 return 0;
2062 }
2063 return 1;
2064 }
2065
2066 /*
2067 * mpage_process_folio - update folio buffers corresponding to changed extent
2068 * and may submit fully mapped page for IO
2069 * @mpd: description of extent to map, on return next extent to map
2070 * @folio: Contains these buffers.
2071 * @m_lblk: logical block mapping.
2072 * @m_pblk: corresponding physical mapping.
2073 * @map_bh: determines on return whether this page requires any further
2074 * mapping or not.
2075 *
2076 * Scan given folio buffers corresponding to changed extent and update buffer
2077 * state according to new extent state.
2078 * We map delalloc buffers to their physical location, clear unwritten bits.
2079 * If the given folio is not fully mapped, we update @mpd to the next extent in
2080 * the given folio that needs mapping & return @map_bh as true.
2081 */
mpage_process_folio(struct mpage_da_data * mpd,struct folio * folio,ext4_lblk_t * m_lblk,ext4_fsblk_t * m_pblk,bool * map_bh)2082 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2083 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2084 bool *map_bh)
2085 {
2086 struct buffer_head *head, *bh;
2087 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2088 ext4_lblk_t lblk = *m_lblk;
2089 ext4_fsblk_t pblock = *m_pblk;
2090 int err = 0;
2091 int blkbits = mpd->inode->i_blkbits;
2092 ssize_t io_end_size = 0;
2093 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2094
2095 bh = head = folio_buffers(folio);
2096 do {
2097 if (lblk < mpd->map.m_lblk)
2098 continue;
2099 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2100 /*
2101 * Buffer after end of mapped extent.
2102 * Find next buffer in the folio to map.
2103 */
2104 mpd->map.m_len = 0;
2105 mpd->map.m_flags = 0;
2106 io_end_vec->size += io_end_size;
2107
2108 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2109 if (err > 0)
2110 err = 0;
2111 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2112 io_end_vec = ext4_alloc_io_end_vec(io_end);
2113 if (IS_ERR(io_end_vec)) {
2114 err = PTR_ERR(io_end_vec);
2115 goto out;
2116 }
2117 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2118 }
2119 *map_bh = true;
2120 goto out;
2121 }
2122 if (buffer_delay(bh)) {
2123 clear_buffer_delay(bh);
2124 bh->b_blocknr = pblock++;
2125 }
2126 clear_buffer_unwritten(bh);
2127 io_end_size += (1 << blkbits);
2128 } while (lblk++, (bh = bh->b_this_page) != head);
2129
2130 io_end_vec->size += io_end_size;
2131 *map_bh = false;
2132 out:
2133 *m_lblk = lblk;
2134 *m_pblk = pblock;
2135 return err;
2136 }
2137
2138 /*
2139 * mpage_map_buffers - update buffers corresponding to changed extent and
2140 * submit fully mapped pages for IO
2141 *
2142 * @mpd - description of extent to map, on return next extent to map
2143 *
2144 * Scan buffers corresponding to changed extent (we expect corresponding pages
2145 * to be already locked) and update buffer state according to new extent state.
2146 * We map delalloc buffers to their physical location, clear unwritten bits,
2147 * and mark buffers as uninit when we perform writes to unwritten extents
2148 * and do extent conversion after IO is finished. If the last page is not fully
2149 * mapped, we update @map to the next extent in the last page that needs
2150 * mapping. Otherwise we submit the page for IO.
2151 */
mpage_map_and_submit_buffers(struct mpage_da_data * mpd)2152 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2153 {
2154 struct folio_batch fbatch;
2155 unsigned nr, i;
2156 struct inode *inode = mpd->inode;
2157 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2158 pgoff_t start, end;
2159 ext4_lblk_t lblk;
2160 ext4_fsblk_t pblock;
2161 int err;
2162 bool map_bh = false;
2163
2164 start = mpd->map.m_lblk >> bpp_bits;
2165 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2166 lblk = start << bpp_bits;
2167 pblock = mpd->map.m_pblk;
2168
2169 folio_batch_init(&fbatch);
2170 while (start <= end) {
2171 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2172 if (nr == 0)
2173 break;
2174 for (i = 0; i < nr; i++) {
2175 struct folio *folio = fbatch.folios[i];
2176
2177 err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2178 &map_bh);
2179 /*
2180 * If map_bh is true, means page may require further bh
2181 * mapping, or maybe the page was submitted for IO.
2182 * So we return to call further extent mapping.
2183 */
2184 if (err < 0 || map_bh)
2185 goto out;
2186 /* Page fully mapped - let IO run! */
2187 err = mpage_submit_folio(mpd, folio);
2188 if (err < 0)
2189 goto out;
2190 mpage_folio_done(mpd, folio);
2191 }
2192 folio_batch_release(&fbatch);
2193 }
2194 /* Extent fully mapped and matches with page boundary. We are done. */
2195 mpd->map.m_len = 0;
2196 mpd->map.m_flags = 0;
2197 return 0;
2198 out:
2199 folio_batch_release(&fbatch);
2200 return err;
2201 }
2202
mpage_map_one_extent(handle_t * handle,struct mpage_da_data * mpd)2203 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2204 {
2205 struct inode *inode = mpd->inode;
2206 struct ext4_map_blocks *map = &mpd->map;
2207 int get_blocks_flags;
2208 int err, dioread_nolock;
2209
2210 trace_ext4_da_write_pages_extent(inode, map);
2211 /*
2212 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2213 * to convert an unwritten extent to be initialized (in the case
2214 * where we have written into one or more preallocated blocks). It is
2215 * possible that we're going to need more metadata blocks than
2216 * previously reserved. However we must not fail because we're in
2217 * writeback and there is nothing we can do about it so it might result
2218 * in data loss. So use reserved blocks to allocate metadata if
2219 * possible.
2220 *
2221 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2222 * the blocks in question are delalloc blocks. This indicates
2223 * that the blocks and quotas has already been checked when
2224 * the data was copied into the page cache.
2225 */
2226 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2227 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2228 EXT4_GET_BLOCKS_IO_SUBMIT;
2229 dioread_nolock = ext4_should_dioread_nolock(inode);
2230 if (dioread_nolock)
2231 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2232 if (map->m_flags & BIT(BH_Delay))
2233 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2234
2235 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2236 if (err < 0)
2237 return err;
2238 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2239 if (!mpd->io_submit.io_end->handle &&
2240 ext4_handle_valid(handle)) {
2241 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2242 handle->h_rsv_handle = NULL;
2243 }
2244 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2245 }
2246
2247 BUG_ON(map->m_len == 0);
2248 return 0;
2249 }
2250
2251 /*
2252 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2253 * mpd->len and submit pages underlying it for IO
2254 *
2255 * @handle - handle for journal operations
2256 * @mpd - extent to map
2257 * @give_up_on_write - we set this to true iff there is a fatal error and there
2258 * is no hope of writing the data. The caller should discard
2259 * dirty pages to avoid infinite loops.
2260 *
2261 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2262 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2263 * them to initialized or split the described range from larger unwritten
2264 * extent. Note that we need not map all the described range since allocation
2265 * can return less blocks or the range is covered by more unwritten extents. We
2266 * cannot map more because we are limited by reserved transaction credits. On
2267 * the other hand we always make sure that the last touched page is fully
2268 * mapped so that it can be written out (and thus forward progress is
2269 * guaranteed). After mapping we submit all mapped pages for IO.
2270 */
mpage_map_and_submit_extent(handle_t * handle,struct mpage_da_data * mpd,bool * give_up_on_write)2271 static int mpage_map_and_submit_extent(handle_t *handle,
2272 struct mpage_da_data *mpd,
2273 bool *give_up_on_write)
2274 {
2275 struct inode *inode = mpd->inode;
2276 struct ext4_map_blocks *map = &mpd->map;
2277 int err;
2278 loff_t disksize;
2279 int progress = 0;
2280 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2281 struct ext4_io_end_vec *io_end_vec;
2282
2283 io_end_vec = ext4_alloc_io_end_vec(io_end);
2284 if (IS_ERR(io_end_vec))
2285 return PTR_ERR(io_end_vec);
2286 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2287 do {
2288 err = mpage_map_one_extent(handle, mpd);
2289 if (err < 0) {
2290 struct super_block *sb = inode->i_sb;
2291
2292 if (ext4_forced_shutdown(sb))
2293 goto invalidate_dirty_pages;
2294 /*
2295 * Let the uper layers retry transient errors.
2296 * In the case of ENOSPC, if ext4_count_free_blocks()
2297 * is non-zero, a commit should free up blocks.
2298 */
2299 if ((err == -ENOMEM) ||
2300 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2301 if (progress)
2302 goto update_disksize;
2303 return err;
2304 }
2305 ext4_msg(sb, KERN_CRIT,
2306 "Delayed block allocation failed for "
2307 "inode %lu at logical offset %llu with"
2308 " max blocks %u with error %d",
2309 inode->i_ino,
2310 (unsigned long long)map->m_lblk,
2311 (unsigned)map->m_len, -err);
2312 ext4_msg(sb, KERN_CRIT,
2313 "This should not happen!! Data will "
2314 "be lost\n");
2315 if (err == -ENOSPC)
2316 ext4_print_free_blocks(inode);
2317 invalidate_dirty_pages:
2318 *give_up_on_write = true;
2319 return err;
2320 }
2321 progress = 1;
2322 /*
2323 * Update buffer state, submit mapped pages, and get us new
2324 * extent to map
2325 */
2326 err = mpage_map_and_submit_buffers(mpd);
2327 if (err < 0)
2328 goto update_disksize;
2329 } while (map->m_len);
2330
2331 update_disksize:
2332 /*
2333 * Update on-disk size after IO is submitted. Races with
2334 * truncate are avoided by checking i_size under i_data_sem.
2335 */
2336 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2337 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2338 int err2;
2339 loff_t i_size;
2340
2341 down_write(&EXT4_I(inode)->i_data_sem);
2342 i_size = i_size_read(inode);
2343 if (disksize > i_size)
2344 disksize = i_size;
2345 if (disksize > EXT4_I(inode)->i_disksize)
2346 EXT4_I(inode)->i_disksize = disksize;
2347 up_write(&EXT4_I(inode)->i_data_sem);
2348 err2 = ext4_mark_inode_dirty(handle, inode);
2349 if (err2) {
2350 ext4_error_err(inode->i_sb, -err2,
2351 "Failed to mark inode %lu dirty",
2352 inode->i_ino);
2353 }
2354 if (!err)
2355 err = err2;
2356 }
2357 return err;
2358 }
2359
2360 /*
2361 * Calculate the total number of credits to reserve for one writepages
2362 * iteration. This is called from ext4_writepages(). We map an extent of
2363 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2364 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2365 * bpp - 1 blocks in bpp different extents.
2366 */
ext4_da_writepages_trans_blocks(struct inode * inode)2367 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2368 {
2369 int bpp = ext4_journal_blocks_per_page(inode);
2370
2371 return ext4_meta_trans_blocks(inode,
2372 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2373 }
2374
ext4_journal_folio_buffers(handle_t * handle,struct folio * folio,size_t len)2375 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2376 size_t len)
2377 {
2378 struct buffer_head *page_bufs = folio_buffers(folio);
2379 struct inode *inode = folio->mapping->host;
2380 int ret, err;
2381
2382 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2383 NULL, do_journal_get_write_access);
2384 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2385 NULL, write_end_fn);
2386 if (ret == 0)
2387 ret = err;
2388 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2389 if (ret == 0)
2390 ret = err;
2391 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2392
2393 return ret;
2394 }
2395
mpage_journal_page_buffers(handle_t * handle,struct mpage_da_data * mpd,struct folio * folio)2396 static int mpage_journal_page_buffers(handle_t *handle,
2397 struct mpage_da_data *mpd,
2398 struct folio *folio)
2399 {
2400 struct inode *inode = mpd->inode;
2401 loff_t size = i_size_read(inode);
2402 size_t len = folio_size(folio);
2403
2404 folio_clear_checked(folio);
2405 mpd->wbc->nr_to_write--;
2406
2407 if (folio_pos(folio) + len > size &&
2408 !ext4_verity_in_progress(inode))
2409 len = size & (len - 1);
2410
2411 return ext4_journal_folio_buffers(handle, folio, len);
2412 }
2413
2414 /*
2415 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2416 * needing mapping, submit mapped pages
2417 *
2418 * @mpd - where to look for pages
2419 *
2420 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2421 * IO immediately. If we cannot map blocks, we submit just already mapped
2422 * buffers in the page for IO and keep page dirty. When we can map blocks and
2423 * we find a page which isn't mapped we start accumulating extent of buffers
2424 * underlying these pages that needs mapping (formed by either delayed or
2425 * unwritten buffers). We also lock the pages containing these buffers. The
2426 * extent found is returned in @mpd structure (starting at mpd->lblk with
2427 * length mpd->len blocks).
2428 *
2429 * Note that this function can attach bios to one io_end structure which are
2430 * neither logically nor physically contiguous. Although it may seem as an
2431 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2432 * case as we need to track IO to all buffers underlying a page in one io_end.
2433 */
mpage_prepare_extent_to_map(struct mpage_da_data * mpd)2434 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2435 {
2436 struct address_space *mapping = mpd->inode->i_mapping;
2437 struct folio_batch fbatch;
2438 unsigned int nr_folios;
2439 pgoff_t index = mpd->first_page;
2440 pgoff_t end = mpd->last_page;
2441 xa_mark_t tag;
2442 int i, err = 0;
2443 int blkbits = mpd->inode->i_blkbits;
2444 ext4_lblk_t lblk;
2445 struct buffer_head *head;
2446 handle_t *handle = NULL;
2447 int bpp = ext4_journal_blocks_per_page(mpd->inode);
2448
2449 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2450 tag = PAGECACHE_TAG_TOWRITE;
2451 else
2452 tag = PAGECACHE_TAG_DIRTY;
2453
2454 mpd->map.m_len = 0;
2455 mpd->next_page = index;
2456 if (ext4_should_journal_data(mpd->inode)) {
2457 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2458 bpp);
2459 if (IS_ERR(handle))
2460 return PTR_ERR(handle);
2461 }
2462 folio_batch_init(&fbatch);
2463 while (index <= end) {
2464 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2465 tag, &fbatch);
2466 if (nr_folios == 0)
2467 break;
2468
2469 for (i = 0; i < nr_folios; i++) {
2470 struct folio *folio = fbatch.folios[i];
2471
2472 /*
2473 * Accumulated enough dirty pages? This doesn't apply
2474 * to WB_SYNC_ALL mode. For integrity sync we have to
2475 * keep going because someone may be concurrently
2476 * dirtying pages, and we might have synced a lot of
2477 * newly appeared dirty pages, but have not synced all
2478 * of the old dirty pages.
2479 */
2480 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2481 mpd->wbc->nr_to_write <=
2482 mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2483 goto out;
2484
2485 /* If we can't merge this page, we are done. */
2486 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2487 goto out;
2488
2489 if (handle) {
2490 err = ext4_journal_ensure_credits(handle, bpp,
2491 0);
2492 if (err < 0)
2493 goto out;
2494 }
2495
2496 folio_lock(folio);
2497 /*
2498 * If the page is no longer dirty, or its mapping no
2499 * longer corresponds to inode we are writing (which
2500 * means it has been truncated or invalidated), or the
2501 * page is already under writeback and we are not doing
2502 * a data integrity writeback, skip the page
2503 */
2504 if (!folio_test_dirty(folio) ||
2505 (folio_test_writeback(folio) &&
2506 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2507 unlikely(folio->mapping != mapping)) {
2508 folio_unlock(folio);
2509 continue;
2510 }
2511
2512 folio_wait_writeback(folio);
2513 BUG_ON(folio_test_writeback(folio));
2514
2515 /*
2516 * Should never happen but for buggy code in
2517 * other subsystems that call
2518 * set_page_dirty() without properly warning
2519 * the file system first. See [1] for more
2520 * information.
2521 *
2522 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2523 */
2524 if (!folio_buffers(folio)) {
2525 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2526 folio_clear_dirty(folio);
2527 folio_unlock(folio);
2528 continue;
2529 }
2530
2531 if (mpd->map.m_len == 0)
2532 mpd->first_page = folio->index;
2533 mpd->next_page = folio_next_index(folio);
2534 /*
2535 * Writeout when we cannot modify metadata is simple.
2536 * Just submit the page. For data=journal mode we
2537 * first handle writeout of the page for checkpoint and
2538 * only after that handle delayed page dirtying. This
2539 * makes sure current data is checkpointed to the final
2540 * location before possibly journalling it again which
2541 * is desirable when the page is frequently dirtied
2542 * through a pin.
2543 */
2544 if (!mpd->can_map) {
2545 err = mpage_submit_folio(mpd, folio);
2546 if (err < 0)
2547 goto out;
2548 /* Pending dirtying of journalled data? */
2549 if (folio_test_checked(folio)) {
2550 err = mpage_journal_page_buffers(handle,
2551 mpd, folio);
2552 if (err < 0)
2553 goto out;
2554 mpd->journalled_more_data = 1;
2555 }
2556 mpage_folio_done(mpd, folio);
2557 } else {
2558 /* Add all dirty buffers to mpd */
2559 lblk = ((ext4_lblk_t)folio->index) <<
2560 (PAGE_SHIFT - blkbits);
2561 head = folio_buffers(folio);
2562 err = mpage_process_page_bufs(mpd, head, head,
2563 lblk);
2564 if (err <= 0)
2565 goto out;
2566 err = 0;
2567 }
2568 }
2569 folio_batch_release(&fbatch);
2570 cond_resched();
2571 }
2572 mpd->scanned_until_end = 1;
2573 if (handle)
2574 ext4_journal_stop(handle);
2575 return 0;
2576 out:
2577 folio_batch_release(&fbatch);
2578 if (handle)
2579 ext4_journal_stop(handle);
2580 return err;
2581 }
2582
ext4_do_writepages(struct mpage_da_data * mpd)2583 static int ext4_do_writepages(struct mpage_da_data *mpd)
2584 {
2585 struct writeback_control *wbc = mpd->wbc;
2586 pgoff_t writeback_index = 0;
2587 long nr_to_write = wbc->nr_to_write;
2588 int range_whole = 0;
2589 int cycled = 1;
2590 handle_t *handle = NULL;
2591 struct inode *inode = mpd->inode;
2592 struct address_space *mapping = inode->i_mapping;
2593 int needed_blocks, rsv_blocks = 0, ret = 0;
2594 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2595 struct blk_plug plug;
2596 bool give_up_on_write = false;
2597
2598 trace_ext4_writepages(inode, wbc);
2599
2600 /*
2601 * No pages to write? This is mainly a kludge to avoid starting
2602 * a transaction for special inodes like journal inode on last iput()
2603 * because that could violate lock ordering on umount
2604 */
2605 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2606 goto out_writepages;
2607
2608 /*
2609 * If the filesystem has aborted, it is read-only, so return
2610 * right away instead of dumping stack traces later on that
2611 * will obscure the real source of the problem. We test
2612 * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2613 * the latter could be true if the filesystem is mounted
2614 * read-only, and in that case, ext4_writepages should
2615 * *never* be called, so if that ever happens, we would want
2616 * the stack trace.
2617 */
2618 if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
2619 ret = -EROFS;
2620 goto out_writepages;
2621 }
2622
2623 /*
2624 * If we have inline data and arrive here, it means that
2625 * we will soon create the block for the 1st page, so
2626 * we'd better clear the inline data here.
2627 */
2628 if (ext4_has_inline_data(inode)) {
2629 /* Just inode will be modified... */
2630 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2631 if (IS_ERR(handle)) {
2632 ret = PTR_ERR(handle);
2633 goto out_writepages;
2634 }
2635 BUG_ON(ext4_test_inode_state(inode,
2636 EXT4_STATE_MAY_INLINE_DATA));
2637 ext4_destroy_inline_data(handle, inode);
2638 ext4_journal_stop(handle);
2639 }
2640
2641 /*
2642 * data=journal mode does not do delalloc so we just need to writeout /
2643 * journal already mapped buffers. On the other hand we need to commit
2644 * transaction to make data stable. We expect all the data to be
2645 * already in the journal (the only exception are DMA pinned pages
2646 * dirtied behind our back) so we commit transaction here and run the
2647 * writeback loop to checkpoint them. The checkpointing is not actually
2648 * necessary to make data persistent *but* quite a few places (extent
2649 * shifting operations, fsverity, ...) depend on being able to drop
2650 * pagecache pages after calling filemap_write_and_wait() and for that
2651 * checkpointing needs to happen.
2652 */
2653 if (ext4_should_journal_data(inode)) {
2654 mpd->can_map = 0;
2655 if (wbc->sync_mode == WB_SYNC_ALL)
2656 ext4_fc_commit(sbi->s_journal,
2657 EXT4_I(inode)->i_datasync_tid);
2658 }
2659 mpd->journalled_more_data = 0;
2660
2661 if (ext4_should_dioread_nolock(inode)) {
2662 /*
2663 * We may need to convert up to one extent per block in
2664 * the page and we may dirty the inode.
2665 */
2666 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2667 PAGE_SIZE >> inode->i_blkbits);
2668 }
2669
2670 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2671 range_whole = 1;
2672
2673 if (wbc->range_cyclic) {
2674 writeback_index = mapping->writeback_index;
2675 if (writeback_index)
2676 cycled = 0;
2677 mpd->first_page = writeback_index;
2678 mpd->last_page = -1;
2679 } else {
2680 mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2681 mpd->last_page = wbc->range_end >> PAGE_SHIFT;
2682 }
2683
2684 ext4_io_submit_init(&mpd->io_submit, wbc);
2685 retry:
2686 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2687 tag_pages_for_writeback(mapping, mpd->first_page,
2688 mpd->last_page);
2689 blk_start_plug(&plug);
2690
2691 /*
2692 * First writeback pages that don't need mapping - we can avoid
2693 * starting a transaction unnecessarily and also avoid being blocked
2694 * in the block layer on device congestion while having transaction
2695 * started.
2696 */
2697 mpd->do_map = 0;
2698 mpd->scanned_until_end = 0;
2699 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2700 if (!mpd->io_submit.io_end) {
2701 ret = -ENOMEM;
2702 goto unplug;
2703 }
2704 ret = mpage_prepare_extent_to_map(mpd);
2705 /* Unlock pages we didn't use */
2706 mpage_release_unused_pages(mpd, false);
2707 /* Submit prepared bio */
2708 ext4_io_submit(&mpd->io_submit);
2709 ext4_put_io_end_defer(mpd->io_submit.io_end);
2710 mpd->io_submit.io_end = NULL;
2711 if (ret < 0)
2712 goto unplug;
2713
2714 while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2715 /* For each extent of pages we use new io_end */
2716 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2717 if (!mpd->io_submit.io_end) {
2718 ret = -ENOMEM;
2719 break;
2720 }
2721
2722 WARN_ON_ONCE(!mpd->can_map);
2723 /*
2724 * We have two constraints: We find one extent to map and we
2725 * must always write out whole page (makes a difference when
2726 * blocksize < pagesize) so that we don't block on IO when we
2727 * try to write out the rest of the page. Journalled mode is
2728 * not supported by delalloc.
2729 */
2730 BUG_ON(ext4_should_journal_data(inode));
2731 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2732
2733 /* start a new transaction */
2734 handle = ext4_journal_start_with_reserve(inode,
2735 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2736 if (IS_ERR(handle)) {
2737 ret = PTR_ERR(handle);
2738 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2739 "%ld pages, ino %lu; err %d", __func__,
2740 wbc->nr_to_write, inode->i_ino, ret);
2741 /* Release allocated io_end */
2742 ext4_put_io_end(mpd->io_submit.io_end);
2743 mpd->io_submit.io_end = NULL;
2744 break;
2745 }
2746 mpd->do_map = 1;
2747
2748 trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2749 ret = mpage_prepare_extent_to_map(mpd);
2750 if (!ret && mpd->map.m_len)
2751 ret = mpage_map_and_submit_extent(handle, mpd,
2752 &give_up_on_write);
2753 /*
2754 * Caution: If the handle is synchronous,
2755 * ext4_journal_stop() can wait for transaction commit
2756 * to finish which may depend on writeback of pages to
2757 * complete or on page lock to be released. In that
2758 * case, we have to wait until after we have
2759 * submitted all the IO, released page locks we hold,
2760 * and dropped io_end reference (for extent conversion
2761 * to be able to complete) before stopping the handle.
2762 */
2763 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2764 ext4_journal_stop(handle);
2765 handle = NULL;
2766 mpd->do_map = 0;
2767 }
2768 /* Unlock pages we didn't use */
2769 mpage_release_unused_pages(mpd, give_up_on_write);
2770 /* Submit prepared bio */
2771 ext4_io_submit(&mpd->io_submit);
2772
2773 /*
2774 * Drop our io_end reference we got from init. We have
2775 * to be careful and use deferred io_end finishing if
2776 * we are still holding the transaction as we can
2777 * release the last reference to io_end which may end
2778 * up doing unwritten extent conversion.
2779 */
2780 if (handle) {
2781 ext4_put_io_end_defer(mpd->io_submit.io_end);
2782 ext4_journal_stop(handle);
2783 } else
2784 ext4_put_io_end(mpd->io_submit.io_end);
2785 mpd->io_submit.io_end = NULL;
2786
2787 if (ret == -ENOSPC && sbi->s_journal) {
2788 /*
2789 * Commit the transaction which would
2790 * free blocks released in the transaction
2791 * and try again
2792 */
2793 jbd2_journal_force_commit_nested(sbi->s_journal);
2794 ret = 0;
2795 continue;
2796 }
2797 /* Fatal error - ENOMEM, EIO... */
2798 if (ret)
2799 break;
2800 }
2801 unplug:
2802 blk_finish_plug(&plug);
2803 if (!ret && !cycled && wbc->nr_to_write > 0) {
2804 cycled = 1;
2805 mpd->last_page = writeback_index - 1;
2806 mpd->first_page = 0;
2807 goto retry;
2808 }
2809
2810 /* Update index */
2811 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2812 /*
2813 * Set the writeback_index so that range_cyclic
2814 * mode will write it back later
2815 */
2816 mapping->writeback_index = mpd->first_page;
2817
2818 out_writepages:
2819 trace_ext4_writepages_result(inode, wbc, ret,
2820 nr_to_write - wbc->nr_to_write);
2821 return ret;
2822 }
2823
ext4_writepages(struct address_space * mapping,struct writeback_control * wbc)2824 static int ext4_writepages(struct address_space *mapping,
2825 struct writeback_control *wbc)
2826 {
2827 struct super_block *sb = mapping->host->i_sb;
2828 struct mpage_da_data mpd = {
2829 .inode = mapping->host,
2830 .wbc = wbc,
2831 .can_map = 1,
2832 };
2833 int ret;
2834 int alloc_ctx;
2835
2836 if (unlikely(ext4_forced_shutdown(sb)))
2837 return -EIO;
2838
2839 alloc_ctx = ext4_writepages_down_read(sb);
2840 ret = ext4_do_writepages(&mpd);
2841 /*
2842 * For data=journal writeback we could have come across pages marked
2843 * for delayed dirtying (PageChecked) which were just added to the
2844 * running transaction. Try once more to get them to stable storage.
2845 */
2846 if (!ret && mpd.journalled_more_data)
2847 ret = ext4_do_writepages(&mpd);
2848 ext4_writepages_up_read(sb, alloc_ctx);
2849
2850 return ret;
2851 }
2852
ext4_normal_submit_inode_data_buffers(struct jbd2_inode * jinode)2853 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2854 {
2855 struct writeback_control wbc = {
2856 .sync_mode = WB_SYNC_ALL,
2857 .nr_to_write = LONG_MAX,
2858 .range_start = jinode->i_dirty_start,
2859 .range_end = jinode->i_dirty_end,
2860 };
2861 struct mpage_da_data mpd = {
2862 .inode = jinode->i_vfs_inode,
2863 .wbc = &wbc,
2864 .can_map = 0,
2865 };
2866 return ext4_do_writepages(&mpd);
2867 }
2868
ext4_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)2869 static int ext4_dax_writepages(struct address_space *mapping,
2870 struct writeback_control *wbc)
2871 {
2872 int ret;
2873 long nr_to_write = wbc->nr_to_write;
2874 struct inode *inode = mapping->host;
2875 int alloc_ctx;
2876
2877 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2878 return -EIO;
2879
2880 alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2881 trace_ext4_writepages(inode, wbc);
2882
2883 ret = dax_writeback_mapping_range(mapping,
2884 EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2885 trace_ext4_writepages_result(inode, wbc, ret,
2886 nr_to_write - wbc->nr_to_write);
2887 ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2888 return ret;
2889 }
2890
ext4_nonda_switch(struct super_block * sb)2891 static int ext4_nonda_switch(struct super_block *sb)
2892 {
2893 s64 free_clusters, dirty_clusters;
2894 struct ext4_sb_info *sbi = EXT4_SB(sb);
2895
2896 /*
2897 * switch to non delalloc mode if we are running low
2898 * on free block. The free block accounting via percpu
2899 * counters can get slightly wrong with percpu_counter_batch getting
2900 * accumulated on each CPU without updating global counters
2901 * Delalloc need an accurate free block accounting. So switch
2902 * to non delalloc when we are near to error range.
2903 */
2904 free_clusters =
2905 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2906 dirty_clusters =
2907 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2908 /*
2909 * Start pushing delalloc when 1/2 of free blocks are dirty.
2910 */
2911 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2912 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2913
2914 if (2 * free_clusters < 3 * dirty_clusters ||
2915 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2916 /*
2917 * free block count is less than 150% of dirty blocks
2918 * or free blocks is less than watermark
2919 */
2920 return 1;
2921 }
2922 return 0;
2923 }
2924
ext4_da_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)2925 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2926 loff_t pos, unsigned len,
2927 struct folio **foliop, void **fsdata)
2928 {
2929 int ret, retries = 0;
2930 struct folio *folio;
2931 pgoff_t index;
2932 struct inode *inode = mapping->host;
2933
2934 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2935 return -EIO;
2936
2937 index = pos >> PAGE_SHIFT;
2938
2939 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
2940 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2941 return ext4_write_begin(file, mapping, pos,
2942 len, foliop, fsdata);
2943 }
2944 *fsdata = (void *)0;
2945 trace_ext4_da_write_begin(inode, pos, len);
2946
2947 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2948 ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
2949 foliop, fsdata);
2950 if (ret < 0)
2951 return ret;
2952 if (ret == 1)
2953 return 0;
2954 }
2955
2956 retry:
2957 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2958 mapping_gfp_mask(mapping));
2959 if (IS_ERR(folio))
2960 return PTR_ERR(folio);
2961
2962 #ifdef CONFIG_FS_ENCRYPTION
2963 ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2964 #else
2965 ret = __block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2966 #endif
2967 if (ret < 0) {
2968 folio_unlock(folio);
2969 folio_put(folio);
2970 /*
2971 * block_write_begin may have instantiated a few blocks
2972 * outside i_size. Trim these off again. Don't need
2973 * i_size_read because we hold inode lock.
2974 */
2975 if (pos + len > inode->i_size)
2976 ext4_truncate_failed_write(inode);
2977
2978 if (ret == -ENOSPC &&
2979 ext4_should_retry_alloc(inode->i_sb, &retries))
2980 goto retry;
2981 return ret;
2982 }
2983
2984 *foliop = folio;
2985 return ret;
2986 }
2987
2988 /*
2989 * Check if we should update i_disksize
2990 * when write to the end of file but not require block allocation
2991 */
ext4_da_should_update_i_disksize(struct folio * folio,unsigned long offset)2992 static int ext4_da_should_update_i_disksize(struct folio *folio,
2993 unsigned long offset)
2994 {
2995 struct buffer_head *bh;
2996 struct inode *inode = folio->mapping->host;
2997 unsigned int idx;
2998 int i;
2999
3000 bh = folio_buffers(folio);
3001 idx = offset >> inode->i_blkbits;
3002
3003 for (i = 0; i < idx; i++)
3004 bh = bh->b_this_page;
3005
3006 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3007 return 0;
3008 return 1;
3009 }
3010
ext4_da_do_write_end(struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio)3011 static int ext4_da_do_write_end(struct address_space *mapping,
3012 loff_t pos, unsigned len, unsigned copied,
3013 struct folio *folio)
3014 {
3015 struct inode *inode = mapping->host;
3016 loff_t old_size = inode->i_size;
3017 bool disksize_changed = false;
3018 loff_t new_i_size;
3019
3020 if (unlikely(!folio_buffers(folio))) {
3021 folio_unlock(folio);
3022 folio_put(folio);
3023 return -EIO;
3024 }
3025 /*
3026 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
3027 * flag, which all that's needed to trigger page writeback.
3028 */
3029 copied = block_write_end(NULL, mapping, pos, len, copied,
3030 folio, NULL);
3031 new_i_size = pos + copied;
3032
3033 /*
3034 * It's important to update i_size while still holding folio lock,
3035 * because folio writeout could otherwise come in and zero beyond
3036 * i_size.
3037 *
3038 * Since we are holding inode lock, we are sure i_disksize <=
3039 * i_size. We also know that if i_disksize < i_size, there are
3040 * delalloc writes pending in the range up to i_size. If the end of
3041 * the current write is <= i_size, there's no need to touch
3042 * i_disksize since writeback will push i_disksize up to i_size
3043 * eventually. If the end of the current write is > i_size and
3044 * inside an allocated block which ext4_da_should_update_i_disksize()
3045 * checked, we need to update i_disksize here as certain
3046 * ext4_writepages() paths not allocating blocks and update i_disksize.
3047 */
3048 if (new_i_size > inode->i_size) {
3049 unsigned long end;
3050
3051 i_size_write(inode, new_i_size);
3052 end = (new_i_size - 1) & (PAGE_SIZE - 1);
3053 if (copied && ext4_da_should_update_i_disksize(folio, end)) {
3054 ext4_update_i_disksize(inode, new_i_size);
3055 disksize_changed = true;
3056 }
3057 }
3058
3059 folio_unlock(folio);
3060 folio_put(folio);
3061
3062 if (old_size < pos)
3063 pagecache_isize_extended(inode, old_size, pos);
3064
3065 if (disksize_changed) {
3066 handle_t *handle;
3067
3068 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3069 if (IS_ERR(handle))
3070 return PTR_ERR(handle);
3071 ext4_mark_inode_dirty(handle, inode);
3072 ext4_journal_stop(handle);
3073 }
3074
3075 return copied;
3076 }
3077
ext4_da_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3078 static int ext4_da_write_end(struct file *file,
3079 struct address_space *mapping,
3080 loff_t pos, unsigned len, unsigned copied,
3081 struct folio *folio, void *fsdata)
3082 {
3083 struct inode *inode = mapping->host;
3084 int write_mode = (int)(unsigned long)fsdata;
3085
3086 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3087 return ext4_write_end(file, mapping, pos,
3088 len, copied, folio, fsdata);
3089
3090 trace_ext4_da_write_end(inode, pos, len, copied);
3091
3092 if (write_mode != CONVERT_INLINE_DATA &&
3093 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3094 ext4_has_inline_data(inode))
3095 return ext4_write_inline_data_end(inode, pos, len, copied,
3096 folio);
3097
3098 if (unlikely(copied < len) && !folio_test_uptodate(folio))
3099 copied = 0;
3100
3101 return ext4_da_do_write_end(mapping, pos, len, copied, folio);
3102 }
3103
3104 /*
3105 * Force all delayed allocation blocks to be allocated for a given inode.
3106 */
ext4_alloc_da_blocks(struct inode * inode)3107 int ext4_alloc_da_blocks(struct inode *inode)
3108 {
3109 trace_ext4_alloc_da_blocks(inode);
3110
3111 if (!EXT4_I(inode)->i_reserved_data_blocks)
3112 return 0;
3113
3114 /*
3115 * We do something simple for now. The filemap_flush() will
3116 * also start triggering a write of the data blocks, which is
3117 * not strictly speaking necessary (and for users of
3118 * laptop_mode, not even desirable). However, to do otherwise
3119 * would require replicating code paths in:
3120 *
3121 * ext4_writepages() ->
3122 * write_cache_pages() ---> (via passed in callback function)
3123 * __mpage_da_writepage() -->
3124 * mpage_add_bh_to_extent()
3125 * mpage_da_map_blocks()
3126 *
3127 * The problem is that write_cache_pages(), located in
3128 * mm/page-writeback.c, marks pages clean in preparation for
3129 * doing I/O, which is not desirable if we're not planning on
3130 * doing I/O at all.
3131 *
3132 * We could call write_cache_pages(), and then redirty all of
3133 * the pages by calling redirty_page_for_writepage() but that
3134 * would be ugly in the extreme. So instead we would need to
3135 * replicate parts of the code in the above functions,
3136 * simplifying them because we wouldn't actually intend to
3137 * write out the pages, but rather only collect contiguous
3138 * logical block extents, call the multi-block allocator, and
3139 * then update the buffer heads with the block allocations.
3140 *
3141 * For now, though, we'll cheat by calling filemap_flush(),
3142 * which will map the blocks, and start the I/O, but not
3143 * actually wait for the I/O to complete.
3144 */
3145 return filemap_flush(inode->i_mapping);
3146 }
3147
3148 /*
3149 * bmap() is special. It gets used by applications such as lilo and by
3150 * the swapper to find the on-disk block of a specific piece of data.
3151 *
3152 * Naturally, this is dangerous if the block concerned is still in the
3153 * journal. If somebody makes a swapfile on an ext4 data-journaling
3154 * filesystem and enables swap, then they may get a nasty shock when the
3155 * data getting swapped to that swapfile suddenly gets overwritten by
3156 * the original zero's written out previously to the journal and
3157 * awaiting writeback in the kernel's buffer cache.
3158 *
3159 * So, if we see any bmap calls here on a modified, data-journaled file,
3160 * take extra steps to flush any blocks which might be in the cache.
3161 */
ext4_bmap(struct address_space * mapping,sector_t block)3162 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3163 {
3164 struct inode *inode = mapping->host;
3165 sector_t ret = 0;
3166
3167 inode_lock_shared(inode);
3168 /*
3169 * We can get here for an inline file via the FIBMAP ioctl
3170 */
3171 if (ext4_has_inline_data(inode))
3172 goto out;
3173
3174 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3175 (test_opt(inode->i_sb, DELALLOC) ||
3176 ext4_should_journal_data(inode))) {
3177 /*
3178 * With delalloc or journalled data we want to sync the file so
3179 * that we can make sure we allocate blocks for file and data
3180 * is in place for the user to see it
3181 */
3182 filemap_write_and_wait(mapping);
3183 }
3184
3185 ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3186
3187 out:
3188 inode_unlock_shared(inode);
3189 return ret;
3190 }
3191
ext4_read_folio(struct file * file,struct folio * folio)3192 static int ext4_read_folio(struct file *file, struct folio *folio)
3193 {
3194 int ret = -EAGAIN;
3195 struct inode *inode = folio->mapping->host;
3196
3197 trace_ext4_read_folio(inode, folio);
3198
3199 if (ext4_has_inline_data(inode))
3200 ret = ext4_readpage_inline(inode, folio);
3201
3202 if (ret == -EAGAIN)
3203 return ext4_mpage_readpages(inode, NULL, folio);
3204
3205 return ret;
3206 }
3207
ext4_readahead(struct readahead_control * rac)3208 static void ext4_readahead(struct readahead_control *rac)
3209 {
3210 struct inode *inode = rac->mapping->host;
3211
3212 /* If the file has inline data, no need to do readahead. */
3213 if (ext4_has_inline_data(inode))
3214 return;
3215
3216 ext4_mpage_readpages(inode, rac, NULL);
3217 }
3218
ext4_invalidate_folio(struct folio * folio,size_t offset,size_t length)3219 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3220 size_t length)
3221 {
3222 trace_ext4_invalidate_folio(folio, offset, length);
3223
3224 /* No journalling happens on data buffers when this function is used */
3225 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3226
3227 block_invalidate_folio(folio, offset, length);
3228 }
3229
__ext4_journalled_invalidate_folio(struct folio * folio,size_t offset,size_t length)3230 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3231 size_t offset, size_t length)
3232 {
3233 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3234
3235 trace_ext4_journalled_invalidate_folio(folio, offset, length);
3236
3237 /*
3238 * If it's a full truncate we just forget about the pending dirtying
3239 */
3240 if (offset == 0 && length == folio_size(folio))
3241 folio_clear_checked(folio);
3242
3243 return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3244 }
3245
3246 /* Wrapper for aops... */
ext4_journalled_invalidate_folio(struct folio * folio,size_t offset,size_t length)3247 static void ext4_journalled_invalidate_folio(struct folio *folio,
3248 size_t offset,
3249 size_t length)
3250 {
3251 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3252 }
3253
ext4_release_folio(struct folio * folio,gfp_t wait)3254 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3255 {
3256 struct inode *inode = folio->mapping->host;
3257 journal_t *journal = EXT4_JOURNAL(inode);
3258
3259 trace_ext4_release_folio(inode, folio);
3260
3261 /* Page has dirty journalled data -> cannot release */
3262 if (folio_test_checked(folio))
3263 return false;
3264 if (journal)
3265 return jbd2_journal_try_to_free_buffers(journal, folio);
3266 else
3267 return try_to_free_buffers(folio);
3268 }
3269
ext4_inode_datasync_dirty(struct inode * inode)3270 static bool ext4_inode_datasync_dirty(struct inode *inode)
3271 {
3272 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3273
3274 if (journal) {
3275 if (jbd2_transaction_committed(journal,
3276 EXT4_I(inode)->i_datasync_tid))
3277 return false;
3278 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3279 return !list_empty(&EXT4_I(inode)->i_fc_list);
3280 return true;
3281 }
3282
3283 /* Any metadata buffers to write? */
3284 if (!list_empty(&inode->i_mapping->i_private_list))
3285 return true;
3286 return inode->i_state & I_DIRTY_DATASYNC;
3287 }
3288
ext4_set_iomap(struct inode * inode,struct iomap * iomap,struct ext4_map_blocks * map,loff_t offset,loff_t length,unsigned int flags)3289 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3290 struct ext4_map_blocks *map, loff_t offset,
3291 loff_t length, unsigned int flags)
3292 {
3293 u8 blkbits = inode->i_blkbits;
3294
3295 /*
3296 * Writes that span EOF might trigger an I/O size update on completion,
3297 * so consider them to be dirty for the purpose of O_DSYNC, even if
3298 * there is no other metadata changes being made or are pending.
3299 */
3300 iomap->flags = 0;
3301 if (ext4_inode_datasync_dirty(inode) ||
3302 offset + length > i_size_read(inode))
3303 iomap->flags |= IOMAP_F_DIRTY;
3304
3305 if (map->m_flags & EXT4_MAP_NEW)
3306 iomap->flags |= IOMAP_F_NEW;
3307
3308 if (flags & IOMAP_DAX)
3309 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3310 else
3311 iomap->bdev = inode->i_sb->s_bdev;
3312 iomap->offset = (u64) map->m_lblk << blkbits;
3313 iomap->length = (u64) map->m_len << blkbits;
3314
3315 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3316 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3317 iomap->flags |= IOMAP_F_MERGED;
3318
3319 /*
3320 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3321 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3322 * set. In order for any allocated unwritten extents to be converted
3323 * into written extents correctly within the ->end_io() handler, we
3324 * need to ensure that the iomap->type is set appropriately. Hence, the
3325 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3326 * been set first.
3327 */
3328 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3329 iomap->type = IOMAP_UNWRITTEN;
3330 iomap->addr = (u64) map->m_pblk << blkbits;
3331 if (flags & IOMAP_DAX)
3332 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3333 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3334 iomap->type = IOMAP_MAPPED;
3335 iomap->addr = (u64) map->m_pblk << blkbits;
3336 if (flags & IOMAP_DAX)
3337 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3338 } else if (map->m_flags & EXT4_MAP_DELAYED) {
3339 iomap->type = IOMAP_DELALLOC;
3340 iomap->addr = IOMAP_NULL_ADDR;
3341 } else {
3342 iomap->type = IOMAP_HOLE;
3343 iomap->addr = IOMAP_NULL_ADDR;
3344 }
3345 }
3346
ext4_iomap_alloc(struct inode * inode,struct ext4_map_blocks * map,unsigned int flags)3347 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3348 unsigned int flags)
3349 {
3350 handle_t *handle;
3351 u8 blkbits = inode->i_blkbits;
3352 int ret, dio_credits, m_flags = 0, retries = 0;
3353
3354 /*
3355 * Trim the mapping request to the maximum value that we can map at
3356 * once for direct I/O.
3357 */
3358 if (map->m_len > DIO_MAX_BLOCKS)
3359 map->m_len = DIO_MAX_BLOCKS;
3360 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3361
3362 retry:
3363 /*
3364 * Either we allocate blocks and then don't get an unwritten extent, so
3365 * in that case we have reserved enough credits. Or, the blocks are
3366 * already allocated and unwritten. In that case, the extent conversion
3367 * fits into the credits as well.
3368 */
3369 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3370 if (IS_ERR(handle))
3371 return PTR_ERR(handle);
3372
3373 /*
3374 * DAX and direct I/O are the only two operations that are currently
3375 * supported with IOMAP_WRITE.
3376 */
3377 WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3378 if (flags & IOMAP_DAX)
3379 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3380 /*
3381 * We use i_size instead of i_disksize here because delalloc writeback
3382 * can complete at any point during the I/O and subsequently push the
3383 * i_disksize out to i_size. This could be beyond where direct I/O is
3384 * happening and thus expose allocated blocks to direct I/O reads.
3385 */
3386 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3387 m_flags = EXT4_GET_BLOCKS_CREATE;
3388 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3389 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3390
3391 ret = ext4_map_blocks(handle, inode, map, m_flags);
3392
3393 /*
3394 * We cannot fill holes in indirect tree based inodes as that could
3395 * expose stale data in the case of a crash. Use the magic error code
3396 * to fallback to buffered I/O.
3397 */
3398 if (!m_flags && !ret)
3399 ret = -ENOTBLK;
3400
3401 ext4_journal_stop(handle);
3402 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3403 goto retry;
3404
3405 return ret;
3406 }
3407
3408
ext4_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3409 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3410 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3411 {
3412 int ret;
3413 struct ext4_map_blocks map;
3414 u8 blkbits = inode->i_blkbits;
3415
3416 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3417 return -EINVAL;
3418
3419 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3420 return -ERANGE;
3421
3422 /*
3423 * Calculate the first and last logical blocks respectively.
3424 */
3425 map.m_lblk = offset >> blkbits;
3426 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3427 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3428
3429 if (flags & IOMAP_WRITE) {
3430 /*
3431 * We check here if the blocks are already allocated, then we
3432 * don't need to start a journal txn and we can directly return
3433 * the mapping information. This could boost performance
3434 * especially in multi-threaded overwrite requests.
3435 */
3436 if (offset + length <= i_size_read(inode)) {
3437 ret = ext4_map_blocks(NULL, inode, &map, 0);
3438 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3439 goto out;
3440 }
3441 ret = ext4_iomap_alloc(inode, &map, flags);
3442 } else {
3443 ret = ext4_map_blocks(NULL, inode, &map, 0);
3444 }
3445
3446 if (ret < 0)
3447 return ret;
3448 out:
3449 /*
3450 * When inline encryption is enabled, sometimes I/O to an encrypted file
3451 * has to be broken up to guarantee DUN contiguity. Handle this by
3452 * limiting the length of the mapping returned.
3453 */
3454 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3455
3456 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3457
3458 return 0;
3459 }
3460
ext4_iomap_overwrite_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3461 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3462 loff_t length, unsigned flags, struct iomap *iomap,
3463 struct iomap *srcmap)
3464 {
3465 int ret;
3466
3467 /*
3468 * Even for writes we don't need to allocate blocks, so just pretend
3469 * we are reading to save overhead of starting a transaction.
3470 */
3471 flags &= ~IOMAP_WRITE;
3472 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3473 WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
3474 return ret;
3475 }
3476
ext4_iomap_end(struct inode * inode,loff_t offset,loff_t length,ssize_t written,unsigned flags,struct iomap * iomap)3477 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3478 ssize_t written, unsigned flags, struct iomap *iomap)
3479 {
3480 /*
3481 * Check to see whether an error occurred while writing out the data to
3482 * the allocated blocks. If so, return the magic error code so that we
3483 * fallback to buffered I/O and attempt to complete the remainder of
3484 * the I/O. Any blocks that may have been allocated in preparation for
3485 * the direct I/O will be reused during buffered I/O.
3486 */
3487 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3488 return -ENOTBLK;
3489
3490 return 0;
3491 }
3492
3493 const struct iomap_ops ext4_iomap_ops = {
3494 .iomap_begin = ext4_iomap_begin,
3495 .iomap_end = ext4_iomap_end,
3496 };
3497
3498 const struct iomap_ops ext4_iomap_overwrite_ops = {
3499 .iomap_begin = ext4_iomap_overwrite_begin,
3500 .iomap_end = ext4_iomap_end,
3501 };
3502
ext4_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)3503 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3504 loff_t length, unsigned int flags,
3505 struct iomap *iomap, struct iomap *srcmap)
3506 {
3507 int ret;
3508 struct ext4_map_blocks map;
3509 u8 blkbits = inode->i_blkbits;
3510
3511 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3512 return -EINVAL;
3513
3514 if (ext4_has_inline_data(inode)) {
3515 ret = ext4_inline_data_iomap(inode, iomap);
3516 if (ret != -EAGAIN) {
3517 if (ret == 0 && offset >= iomap->length)
3518 ret = -ENOENT;
3519 return ret;
3520 }
3521 }
3522
3523 /*
3524 * Calculate the first and last logical block respectively.
3525 */
3526 map.m_lblk = offset >> blkbits;
3527 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3528 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3529
3530 /*
3531 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3532 * So handle it here itself instead of querying ext4_map_blocks().
3533 * Since ext4_map_blocks() will warn about it and will return
3534 * -EIO error.
3535 */
3536 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3537 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3538
3539 if (offset >= sbi->s_bitmap_maxbytes) {
3540 map.m_flags = 0;
3541 goto set_iomap;
3542 }
3543 }
3544
3545 ret = ext4_map_blocks(NULL, inode, &map, 0);
3546 if (ret < 0)
3547 return ret;
3548 set_iomap:
3549 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3550
3551 return 0;
3552 }
3553
3554 const struct iomap_ops ext4_iomap_report_ops = {
3555 .iomap_begin = ext4_iomap_begin_report,
3556 };
3557
3558 /*
3559 * For data=journal mode, folio should be marked dirty only when it was
3560 * writeably mapped. When that happens, it was already attached to the
3561 * transaction and marked as jbddirty (we take care of this in
3562 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3563 * so we should have nothing to do here, except for the case when someone
3564 * had the page pinned and dirtied the page through this pin (e.g. by doing
3565 * direct IO to it). In that case we'd need to attach buffers here to the
3566 * transaction but we cannot due to lock ordering. We cannot just dirty the
3567 * folio and leave attached buffers clean, because the buffers' dirty state is
3568 * "definitive". We cannot just set the buffers dirty or jbddirty because all
3569 * the journalling code will explode. So what we do is to mark the folio
3570 * "pending dirty" and next time ext4_writepages() is called, attach buffers
3571 * to the transaction appropriately.
3572 */
ext4_journalled_dirty_folio(struct address_space * mapping,struct folio * folio)3573 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3574 struct folio *folio)
3575 {
3576 WARN_ON_ONCE(!folio_buffers(folio));
3577 if (folio_maybe_dma_pinned(folio))
3578 folio_set_checked(folio);
3579 return filemap_dirty_folio(mapping, folio);
3580 }
3581
ext4_dirty_folio(struct address_space * mapping,struct folio * folio)3582 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3583 {
3584 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3585 WARN_ON_ONCE(!folio_buffers(folio));
3586 return block_dirty_folio(mapping, folio);
3587 }
3588
ext4_iomap_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)3589 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3590 struct file *file, sector_t *span)
3591 {
3592 return iomap_swapfile_activate(sis, file, span,
3593 &ext4_iomap_report_ops);
3594 }
3595
3596 static const struct address_space_operations ext4_aops = {
3597 .read_folio = ext4_read_folio,
3598 .readahead = ext4_readahead,
3599 .writepages = ext4_writepages,
3600 .write_begin = ext4_write_begin,
3601 .write_end = ext4_write_end,
3602 .dirty_folio = ext4_dirty_folio,
3603 .bmap = ext4_bmap,
3604 .invalidate_folio = ext4_invalidate_folio,
3605 .release_folio = ext4_release_folio,
3606 .migrate_folio = buffer_migrate_folio,
3607 .is_partially_uptodate = block_is_partially_uptodate,
3608 .error_remove_folio = generic_error_remove_folio,
3609 .swap_activate = ext4_iomap_swap_activate,
3610 };
3611
3612 static const struct address_space_operations ext4_journalled_aops = {
3613 .read_folio = ext4_read_folio,
3614 .readahead = ext4_readahead,
3615 .writepages = ext4_writepages,
3616 .write_begin = ext4_write_begin,
3617 .write_end = ext4_journalled_write_end,
3618 .dirty_folio = ext4_journalled_dirty_folio,
3619 .bmap = ext4_bmap,
3620 .invalidate_folio = ext4_journalled_invalidate_folio,
3621 .release_folio = ext4_release_folio,
3622 .migrate_folio = buffer_migrate_folio_norefs,
3623 .is_partially_uptodate = block_is_partially_uptodate,
3624 .error_remove_folio = generic_error_remove_folio,
3625 .swap_activate = ext4_iomap_swap_activate,
3626 };
3627
3628 static const struct address_space_operations ext4_da_aops = {
3629 .read_folio = ext4_read_folio,
3630 .readahead = ext4_readahead,
3631 .writepages = ext4_writepages,
3632 .write_begin = ext4_da_write_begin,
3633 .write_end = ext4_da_write_end,
3634 .dirty_folio = ext4_dirty_folio,
3635 .bmap = ext4_bmap,
3636 .invalidate_folio = ext4_invalidate_folio,
3637 .release_folio = ext4_release_folio,
3638 .migrate_folio = buffer_migrate_folio,
3639 .is_partially_uptodate = block_is_partially_uptodate,
3640 .error_remove_folio = generic_error_remove_folio,
3641 .swap_activate = ext4_iomap_swap_activate,
3642 };
3643
3644 static const struct address_space_operations ext4_dax_aops = {
3645 .writepages = ext4_dax_writepages,
3646 .dirty_folio = noop_dirty_folio,
3647 .bmap = ext4_bmap,
3648 .swap_activate = ext4_iomap_swap_activate,
3649 };
3650
ext4_set_aops(struct inode * inode)3651 void ext4_set_aops(struct inode *inode)
3652 {
3653 switch (ext4_inode_journal_mode(inode)) {
3654 case EXT4_INODE_ORDERED_DATA_MODE:
3655 case EXT4_INODE_WRITEBACK_DATA_MODE:
3656 break;
3657 case EXT4_INODE_JOURNAL_DATA_MODE:
3658 inode->i_mapping->a_ops = &ext4_journalled_aops;
3659 return;
3660 default:
3661 BUG();
3662 }
3663 if (IS_DAX(inode))
3664 inode->i_mapping->a_ops = &ext4_dax_aops;
3665 else if (test_opt(inode->i_sb, DELALLOC))
3666 inode->i_mapping->a_ops = &ext4_da_aops;
3667 else
3668 inode->i_mapping->a_ops = &ext4_aops;
3669 }
3670
3671 /*
3672 * Here we can't skip an unwritten buffer even though it usually reads zero
3673 * because it might have data in pagecache (eg, if called from ext4_zero_range,
3674 * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
3675 * racing writeback can come later and flush the stale pagecache to disk.
3676 */
__ext4_block_zero_page_range(handle_t * handle,struct address_space * mapping,loff_t from,loff_t length)3677 static int __ext4_block_zero_page_range(handle_t *handle,
3678 struct address_space *mapping, loff_t from, loff_t length)
3679 {
3680 ext4_fsblk_t index = from >> PAGE_SHIFT;
3681 unsigned offset = from & (PAGE_SIZE-1);
3682 unsigned blocksize, pos;
3683 ext4_lblk_t iblock;
3684 struct inode *inode = mapping->host;
3685 struct buffer_head *bh;
3686 struct folio *folio;
3687 int err = 0;
3688
3689 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3690 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3691 mapping_gfp_constraint(mapping, ~__GFP_FS));
3692 if (IS_ERR(folio))
3693 return PTR_ERR(folio);
3694
3695 blocksize = inode->i_sb->s_blocksize;
3696
3697 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3698
3699 bh = folio_buffers(folio);
3700 if (!bh)
3701 bh = create_empty_buffers(folio, blocksize, 0);
3702
3703 /* Find the buffer that contains "offset" */
3704 pos = blocksize;
3705 while (offset >= pos) {
3706 bh = bh->b_this_page;
3707 iblock++;
3708 pos += blocksize;
3709 }
3710 if (buffer_freed(bh)) {
3711 BUFFER_TRACE(bh, "freed: skip");
3712 goto unlock;
3713 }
3714 if (!buffer_mapped(bh)) {
3715 BUFFER_TRACE(bh, "unmapped");
3716 ext4_get_block(inode, iblock, bh, 0);
3717 /* unmapped? It's a hole - nothing to do */
3718 if (!buffer_mapped(bh)) {
3719 BUFFER_TRACE(bh, "still unmapped");
3720 goto unlock;
3721 }
3722 }
3723
3724 /* Ok, it's mapped. Make sure it's up-to-date */
3725 if (folio_test_uptodate(folio))
3726 set_buffer_uptodate(bh);
3727
3728 if (!buffer_uptodate(bh)) {
3729 err = ext4_read_bh_lock(bh, 0, true);
3730 if (err)
3731 goto unlock;
3732 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3733 /* We expect the key to be set. */
3734 BUG_ON(!fscrypt_has_encryption_key(inode));
3735 err = fscrypt_decrypt_pagecache_blocks(folio,
3736 blocksize,
3737 bh_offset(bh));
3738 if (err) {
3739 clear_buffer_uptodate(bh);
3740 goto unlock;
3741 }
3742 }
3743 }
3744 if (ext4_should_journal_data(inode)) {
3745 BUFFER_TRACE(bh, "get write access");
3746 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3747 EXT4_JTR_NONE);
3748 if (err)
3749 goto unlock;
3750 }
3751 folio_zero_range(folio, offset, length);
3752 BUFFER_TRACE(bh, "zeroed end of block");
3753
3754 if (ext4_should_journal_data(inode)) {
3755 err = ext4_dirty_journalled_data(handle, bh);
3756 } else {
3757 err = 0;
3758 mark_buffer_dirty(bh);
3759 if (ext4_should_order_data(inode))
3760 err = ext4_jbd2_inode_add_write(handle, inode, from,
3761 length);
3762 }
3763
3764 unlock:
3765 folio_unlock(folio);
3766 folio_put(folio);
3767 return err;
3768 }
3769
3770 /*
3771 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3772 * starting from file offset 'from'. The range to be zero'd must
3773 * be contained with in one block. If the specified range exceeds
3774 * the end of the block it will be shortened to end of the block
3775 * that corresponds to 'from'
3776 */
ext4_block_zero_page_range(handle_t * handle,struct address_space * mapping,loff_t from,loff_t length)3777 static int ext4_block_zero_page_range(handle_t *handle,
3778 struct address_space *mapping, loff_t from, loff_t length)
3779 {
3780 struct inode *inode = mapping->host;
3781 unsigned offset = from & (PAGE_SIZE-1);
3782 unsigned blocksize = inode->i_sb->s_blocksize;
3783 unsigned max = blocksize - (offset & (blocksize - 1));
3784
3785 /*
3786 * correct length if it does not fall between
3787 * 'from' and the end of the block
3788 */
3789 if (length > max || length < 0)
3790 length = max;
3791
3792 if (IS_DAX(inode)) {
3793 return dax_zero_range(inode, from, length, NULL,
3794 &ext4_iomap_ops);
3795 }
3796 return __ext4_block_zero_page_range(handle, mapping, from, length);
3797 }
3798
3799 /*
3800 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3801 * up to the end of the block which corresponds to `from'.
3802 * This required during truncate. We need to physically zero the tail end
3803 * of that block so it doesn't yield old data if the file is later grown.
3804 */
ext4_block_truncate_page(handle_t * handle,struct address_space * mapping,loff_t from)3805 static int ext4_block_truncate_page(handle_t *handle,
3806 struct address_space *mapping, loff_t from)
3807 {
3808 unsigned offset = from & (PAGE_SIZE-1);
3809 unsigned length;
3810 unsigned blocksize;
3811 struct inode *inode = mapping->host;
3812
3813 /* If we are processing an encrypted inode during orphan list handling */
3814 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3815 return 0;
3816
3817 blocksize = inode->i_sb->s_blocksize;
3818 length = blocksize - (offset & (blocksize - 1));
3819
3820 return ext4_block_zero_page_range(handle, mapping, from, length);
3821 }
3822
ext4_zero_partial_blocks(handle_t * handle,struct inode * inode,loff_t lstart,loff_t length)3823 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3824 loff_t lstart, loff_t length)
3825 {
3826 struct super_block *sb = inode->i_sb;
3827 struct address_space *mapping = inode->i_mapping;
3828 unsigned partial_start, partial_end;
3829 ext4_fsblk_t start, end;
3830 loff_t byte_end = (lstart + length - 1);
3831 int err = 0;
3832
3833 partial_start = lstart & (sb->s_blocksize - 1);
3834 partial_end = byte_end & (sb->s_blocksize - 1);
3835
3836 start = lstart >> sb->s_blocksize_bits;
3837 end = byte_end >> sb->s_blocksize_bits;
3838
3839 /* Handle partial zero within the single block */
3840 if (start == end &&
3841 (partial_start || (partial_end != sb->s_blocksize - 1))) {
3842 err = ext4_block_zero_page_range(handle, mapping,
3843 lstart, length);
3844 return err;
3845 }
3846 /* Handle partial zero out on the start of the range */
3847 if (partial_start) {
3848 err = ext4_block_zero_page_range(handle, mapping,
3849 lstart, sb->s_blocksize);
3850 if (err)
3851 return err;
3852 }
3853 /* Handle partial zero out on the end of the range */
3854 if (partial_end != sb->s_blocksize - 1)
3855 err = ext4_block_zero_page_range(handle, mapping,
3856 byte_end - partial_end,
3857 partial_end + 1);
3858 return err;
3859 }
3860
ext4_can_truncate(struct inode * inode)3861 int ext4_can_truncate(struct inode *inode)
3862 {
3863 if (S_ISREG(inode->i_mode))
3864 return 1;
3865 if (S_ISDIR(inode->i_mode))
3866 return 1;
3867 if (S_ISLNK(inode->i_mode))
3868 return !ext4_inode_is_fast_symlink(inode);
3869 return 0;
3870 }
3871
3872 /*
3873 * We have to make sure i_disksize gets properly updated before we truncate
3874 * page cache due to hole punching or zero range. Otherwise i_disksize update
3875 * can get lost as it may have been postponed to submission of writeback but
3876 * that will never happen after we truncate page cache.
3877 */
ext4_update_disksize_before_punch(struct inode * inode,loff_t offset,loff_t len)3878 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3879 loff_t len)
3880 {
3881 handle_t *handle;
3882 int ret;
3883
3884 loff_t size = i_size_read(inode);
3885
3886 WARN_ON(!inode_is_locked(inode));
3887 if (offset > size || offset + len < size)
3888 return 0;
3889
3890 if (EXT4_I(inode)->i_disksize >= size)
3891 return 0;
3892
3893 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3894 if (IS_ERR(handle))
3895 return PTR_ERR(handle);
3896 ext4_update_i_disksize(inode, size);
3897 ret = ext4_mark_inode_dirty(handle, inode);
3898 ext4_journal_stop(handle);
3899
3900 return ret;
3901 }
3902
ext4_wait_dax_page(struct inode * inode)3903 static void ext4_wait_dax_page(struct inode *inode)
3904 {
3905 filemap_invalidate_unlock(inode->i_mapping);
3906 schedule();
3907 filemap_invalidate_lock(inode->i_mapping);
3908 }
3909
ext4_break_layouts(struct inode * inode)3910 int ext4_break_layouts(struct inode *inode)
3911 {
3912 struct page *page;
3913 int error;
3914
3915 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3916 return -EINVAL;
3917
3918 do {
3919 page = dax_layout_busy_page(inode->i_mapping);
3920 if (!page)
3921 return 0;
3922
3923 error = ___wait_var_event(&page->_refcount,
3924 atomic_read(&page->_refcount) == 1,
3925 TASK_INTERRUPTIBLE, 0, 0,
3926 ext4_wait_dax_page(inode));
3927 } while (error == 0);
3928
3929 return error;
3930 }
3931
3932 /*
3933 * ext4_punch_hole: punches a hole in a file by releasing the blocks
3934 * associated with the given offset and length
3935 *
3936 * @inode: File inode
3937 * @offset: The offset where the hole will begin
3938 * @len: The length of the hole
3939 *
3940 * Returns: 0 on success or negative on failure
3941 */
3942
ext4_punch_hole(struct file * file,loff_t offset,loff_t length)3943 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3944 {
3945 struct inode *inode = file_inode(file);
3946 struct super_block *sb = inode->i_sb;
3947 ext4_lblk_t first_block, stop_block;
3948 struct address_space *mapping = inode->i_mapping;
3949 loff_t first_block_offset, last_block_offset, max_length;
3950 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3951 handle_t *handle;
3952 unsigned int credits;
3953 int ret = 0, ret2 = 0;
3954
3955 trace_ext4_punch_hole(inode, offset, length, 0);
3956
3957 /*
3958 * Write out all dirty pages to avoid race conditions
3959 * Then release them.
3960 */
3961 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3962 ret = filemap_write_and_wait_range(mapping, offset,
3963 offset + length - 1);
3964 if (ret)
3965 return ret;
3966 }
3967
3968 inode_lock(inode);
3969
3970 /* No need to punch hole beyond i_size */
3971 if (offset >= inode->i_size)
3972 goto out_mutex;
3973
3974 /*
3975 * If the hole extends beyond i_size, set the hole
3976 * to end after the page that contains i_size
3977 */
3978 if (offset + length > inode->i_size) {
3979 length = inode->i_size +
3980 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3981 offset;
3982 }
3983
3984 /*
3985 * For punch hole the length + offset needs to be within one block
3986 * before last range. Adjust the length if it goes beyond that limit.
3987 */
3988 max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3989 if (offset + length > max_length)
3990 length = max_length - offset;
3991
3992 if (offset & (sb->s_blocksize - 1) ||
3993 (offset + length) & (sb->s_blocksize - 1)) {
3994 /*
3995 * Attach jinode to inode for jbd2 if we do any zeroing of
3996 * partial block
3997 */
3998 ret = ext4_inode_attach_jinode(inode);
3999 if (ret < 0)
4000 goto out_mutex;
4001
4002 }
4003
4004 /* Wait all existing dio workers, newcomers will block on i_rwsem */
4005 inode_dio_wait(inode);
4006
4007 ret = file_modified(file);
4008 if (ret)
4009 goto out_mutex;
4010
4011 /*
4012 * Prevent page faults from reinstantiating pages we have released from
4013 * page cache.
4014 */
4015 filemap_invalidate_lock(mapping);
4016
4017 ret = ext4_break_layouts(inode);
4018 if (ret)
4019 goto out_dio;
4020
4021 first_block_offset = round_up(offset, sb->s_blocksize);
4022 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4023
4024 /* Now release the pages and zero block aligned part of pages*/
4025 if (last_block_offset > first_block_offset) {
4026 ret = ext4_update_disksize_before_punch(inode, offset, length);
4027 if (ret)
4028 goto out_dio;
4029 truncate_pagecache_range(inode, first_block_offset,
4030 last_block_offset);
4031 }
4032
4033 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4034 credits = ext4_writepage_trans_blocks(inode);
4035 else
4036 credits = ext4_blocks_for_truncate(inode);
4037 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4038 if (IS_ERR(handle)) {
4039 ret = PTR_ERR(handle);
4040 ext4_std_error(sb, ret);
4041 goto out_dio;
4042 }
4043
4044 ret = ext4_zero_partial_blocks(handle, inode, offset,
4045 length);
4046 if (ret)
4047 goto out_stop;
4048
4049 first_block = (offset + sb->s_blocksize - 1) >>
4050 EXT4_BLOCK_SIZE_BITS(sb);
4051 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4052
4053 /* If there are blocks to remove, do it */
4054 if (stop_block > first_block) {
4055 ext4_lblk_t hole_len = stop_block - first_block;
4056
4057 down_write(&EXT4_I(inode)->i_data_sem);
4058 ext4_discard_preallocations(inode);
4059
4060 ext4_es_remove_extent(inode, first_block, hole_len);
4061
4062 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4063 ret = ext4_ext_remove_space(inode, first_block,
4064 stop_block - 1);
4065 else
4066 ret = ext4_ind_remove_space(handle, inode, first_block,
4067 stop_block);
4068
4069 ext4_es_insert_extent(inode, first_block, hole_len, ~0,
4070 EXTENT_STATUS_HOLE);
4071 up_write(&EXT4_I(inode)->i_data_sem);
4072 }
4073 ext4_fc_track_range(handle, inode, first_block, stop_block);
4074 if (IS_SYNC(inode))
4075 ext4_handle_sync(handle);
4076
4077 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4078 ret2 = ext4_mark_inode_dirty(handle, inode);
4079 if (unlikely(ret2))
4080 ret = ret2;
4081 if (ret >= 0)
4082 ext4_update_inode_fsync_trans(handle, inode, 1);
4083 out_stop:
4084 ext4_journal_stop(handle);
4085 out_dio:
4086 filemap_invalidate_unlock(mapping);
4087 out_mutex:
4088 inode_unlock(inode);
4089 return ret;
4090 }
4091
ext4_inode_attach_jinode(struct inode * inode)4092 int ext4_inode_attach_jinode(struct inode *inode)
4093 {
4094 struct ext4_inode_info *ei = EXT4_I(inode);
4095 struct jbd2_inode *jinode;
4096
4097 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4098 return 0;
4099
4100 jinode = jbd2_alloc_inode(GFP_KERNEL);
4101 spin_lock(&inode->i_lock);
4102 if (!ei->jinode) {
4103 if (!jinode) {
4104 spin_unlock(&inode->i_lock);
4105 return -ENOMEM;
4106 }
4107 ei->jinode = jinode;
4108 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4109 jinode = NULL;
4110 }
4111 spin_unlock(&inode->i_lock);
4112 if (unlikely(jinode != NULL))
4113 jbd2_free_inode(jinode);
4114 return 0;
4115 }
4116
4117 /*
4118 * ext4_truncate()
4119 *
4120 * We block out ext4_get_block() block instantiations across the entire
4121 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4122 * simultaneously on behalf of the same inode.
4123 *
4124 * As we work through the truncate and commit bits of it to the journal there
4125 * is one core, guiding principle: the file's tree must always be consistent on
4126 * disk. We must be able to restart the truncate after a crash.
4127 *
4128 * The file's tree may be transiently inconsistent in memory (although it
4129 * probably isn't), but whenever we close off and commit a journal transaction,
4130 * the contents of (the filesystem + the journal) must be consistent and
4131 * restartable. It's pretty simple, really: bottom up, right to left (although
4132 * left-to-right works OK too).
4133 *
4134 * Note that at recovery time, journal replay occurs *before* the restart of
4135 * truncate against the orphan inode list.
4136 *
4137 * The committed inode has the new, desired i_size (which is the same as
4138 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4139 * that this inode's truncate did not complete and it will again call
4140 * ext4_truncate() to have another go. So there will be instantiated blocks
4141 * to the right of the truncation point in a crashed ext4 filesystem. But
4142 * that's fine - as long as they are linked from the inode, the post-crash
4143 * ext4_truncate() run will find them and release them.
4144 */
ext4_truncate(struct inode * inode)4145 int ext4_truncate(struct inode *inode)
4146 {
4147 struct ext4_inode_info *ei = EXT4_I(inode);
4148 unsigned int credits;
4149 int err = 0, err2;
4150 handle_t *handle;
4151 struct address_space *mapping = inode->i_mapping;
4152
4153 /*
4154 * There is a possibility that we're either freeing the inode
4155 * or it's a completely new inode. In those cases we might not
4156 * have i_rwsem locked because it's not necessary.
4157 */
4158 if (!(inode->i_state & (I_NEW|I_FREEING)))
4159 WARN_ON(!inode_is_locked(inode));
4160 trace_ext4_truncate_enter(inode);
4161
4162 if (!ext4_can_truncate(inode))
4163 goto out_trace;
4164
4165 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4166 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4167
4168 if (ext4_has_inline_data(inode)) {
4169 int has_inline = 1;
4170
4171 err = ext4_inline_data_truncate(inode, &has_inline);
4172 if (err || has_inline)
4173 goto out_trace;
4174 }
4175
4176 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4177 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4178 err = ext4_inode_attach_jinode(inode);
4179 if (err)
4180 goto out_trace;
4181 }
4182
4183 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4184 credits = ext4_writepage_trans_blocks(inode);
4185 else
4186 credits = ext4_blocks_for_truncate(inode);
4187
4188 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4189 if (IS_ERR(handle)) {
4190 err = PTR_ERR(handle);
4191 goto out_trace;
4192 }
4193
4194 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4195 ext4_block_truncate_page(handle, mapping, inode->i_size);
4196
4197 /*
4198 * We add the inode to the orphan list, so that if this
4199 * truncate spans multiple transactions, and we crash, we will
4200 * resume the truncate when the filesystem recovers. It also
4201 * marks the inode dirty, to catch the new size.
4202 *
4203 * Implication: the file must always be in a sane, consistent
4204 * truncatable state while each transaction commits.
4205 */
4206 err = ext4_orphan_add(handle, inode);
4207 if (err)
4208 goto out_stop;
4209
4210 down_write(&EXT4_I(inode)->i_data_sem);
4211
4212 ext4_discard_preallocations(inode);
4213
4214 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4215 err = ext4_ext_truncate(handle, inode);
4216 else
4217 ext4_ind_truncate(handle, inode);
4218
4219 up_write(&ei->i_data_sem);
4220 if (err)
4221 goto out_stop;
4222
4223 if (IS_SYNC(inode))
4224 ext4_handle_sync(handle);
4225
4226 out_stop:
4227 /*
4228 * If this was a simple ftruncate() and the file will remain alive,
4229 * then we need to clear up the orphan record which we created above.
4230 * However, if this was a real unlink then we were called by
4231 * ext4_evict_inode(), and we allow that function to clean up the
4232 * orphan info for us.
4233 */
4234 if (inode->i_nlink)
4235 ext4_orphan_del(handle, inode);
4236
4237 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4238 err2 = ext4_mark_inode_dirty(handle, inode);
4239 if (unlikely(err2 && !err))
4240 err = err2;
4241 ext4_journal_stop(handle);
4242
4243 out_trace:
4244 trace_ext4_truncate_exit(inode);
4245 return err;
4246 }
4247
ext4_inode_peek_iversion(const struct inode * inode)4248 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4249 {
4250 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4251 return inode_peek_iversion_raw(inode);
4252 else
4253 return inode_peek_iversion(inode);
4254 }
4255
ext4_inode_blocks_set(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4256 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4257 struct ext4_inode_info *ei)
4258 {
4259 struct inode *inode = &(ei->vfs_inode);
4260 u64 i_blocks = READ_ONCE(inode->i_blocks);
4261 struct super_block *sb = inode->i_sb;
4262
4263 if (i_blocks <= ~0U) {
4264 /*
4265 * i_blocks can be represented in a 32 bit variable
4266 * as multiple of 512 bytes
4267 */
4268 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4269 raw_inode->i_blocks_high = 0;
4270 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4271 return 0;
4272 }
4273
4274 /*
4275 * This should never happen since sb->s_maxbytes should not have
4276 * allowed this, sb->s_maxbytes was set according to the huge_file
4277 * feature in ext4_fill_super().
4278 */
4279 if (!ext4_has_feature_huge_file(sb))
4280 return -EFSCORRUPTED;
4281
4282 if (i_blocks <= 0xffffffffffffULL) {
4283 /*
4284 * i_blocks can be represented in a 48 bit variable
4285 * as multiple of 512 bytes
4286 */
4287 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4288 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4289 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4290 } else {
4291 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4292 /* i_block is stored in file system block size */
4293 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4294 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4295 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4296 }
4297 return 0;
4298 }
4299
ext4_fill_raw_inode(struct inode * inode,struct ext4_inode * raw_inode)4300 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4301 {
4302 struct ext4_inode_info *ei = EXT4_I(inode);
4303 uid_t i_uid;
4304 gid_t i_gid;
4305 projid_t i_projid;
4306 int block;
4307 int err;
4308
4309 err = ext4_inode_blocks_set(raw_inode, ei);
4310
4311 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4312 i_uid = i_uid_read(inode);
4313 i_gid = i_gid_read(inode);
4314 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4315 if (!(test_opt(inode->i_sb, NO_UID32))) {
4316 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4317 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4318 /*
4319 * Fix up interoperability with old kernels. Otherwise,
4320 * old inodes get re-used with the upper 16 bits of the
4321 * uid/gid intact.
4322 */
4323 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4324 raw_inode->i_uid_high = 0;
4325 raw_inode->i_gid_high = 0;
4326 } else {
4327 raw_inode->i_uid_high =
4328 cpu_to_le16(high_16_bits(i_uid));
4329 raw_inode->i_gid_high =
4330 cpu_to_le16(high_16_bits(i_gid));
4331 }
4332 } else {
4333 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4334 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4335 raw_inode->i_uid_high = 0;
4336 raw_inode->i_gid_high = 0;
4337 }
4338 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4339
4340 EXT4_INODE_SET_CTIME(inode, raw_inode);
4341 EXT4_INODE_SET_MTIME(inode, raw_inode);
4342 EXT4_INODE_SET_ATIME(inode, raw_inode);
4343 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4344
4345 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4346 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4347 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4348 raw_inode->i_file_acl_high =
4349 cpu_to_le16(ei->i_file_acl >> 32);
4350 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4351 ext4_isize_set(raw_inode, ei->i_disksize);
4352
4353 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4354 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4355 if (old_valid_dev(inode->i_rdev)) {
4356 raw_inode->i_block[0] =
4357 cpu_to_le32(old_encode_dev(inode->i_rdev));
4358 raw_inode->i_block[1] = 0;
4359 } else {
4360 raw_inode->i_block[0] = 0;
4361 raw_inode->i_block[1] =
4362 cpu_to_le32(new_encode_dev(inode->i_rdev));
4363 raw_inode->i_block[2] = 0;
4364 }
4365 } else if (!ext4_has_inline_data(inode)) {
4366 for (block = 0; block < EXT4_N_BLOCKS; block++)
4367 raw_inode->i_block[block] = ei->i_data[block];
4368 }
4369
4370 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4371 u64 ivers = ext4_inode_peek_iversion(inode);
4372
4373 raw_inode->i_disk_version = cpu_to_le32(ivers);
4374 if (ei->i_extra_isize) {
4375 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4376 raw_inode->i_version_hi =
4377 cpu_to_le32(ivers >> 32);
4378 raw_inode->i_extra_isize =
4379 cpu_to_le16(ei->i_extra_isize);
4380 }
4381 }
4382
4383 if (i_projid != EXT4_DEF_PROJID &&
4384 !ext4_has_feature_project(inode->i_sb))
4385 err = err ?: -EFSCORRUPTED;
4386
4387 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4388 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4389 raw_inode->i_projid = cpu_to_le32(i_projid);
4390
4391 ext4_inode_csum_set(inode, raw_inode, ei);
4392 return err;
4393 }
4394
4395 /*
4396 * ext4_get_inode_loc returns with an extra refcount against the inode's
4397 * underlying buffer_head on success. If we pass 'inode' and it does not
4398 * have in-inode xattr, we have all inode data in memory that is needed
4399 * to recreate the on-disk version of this inode.
4400 */
__ext4_get_inode_loc(struct super_block * sb,unsigned long ino,struct inode * inode,struct ext4_iloc * iloc,ext4_fsblk_t * ret_block)4401 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4402 struct inode *inode, struct ext4_iloc *iloc,
4403 ext4_fsblk_t *ret_block)
4404 {
4405 struct ext4_group_desc *gdp;
4406 struct buffer_head *bh;
4407 ext4_fsblk_t block;
4408 struct blk_plug plug;
4409 int inodes_per_block, inode_offset;
4410
4411 iloc->bh = NULL;
4412 if (ino < EXT4_ROOT_INO ||
4413 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4414 return -EFSCORRUPTED;
4415
4416 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4417 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4418 if (!gdp)
4419 return -EIO;
4420
4421 /*
4422 * Figure out the offset within the block group inode table
4423 */
4424 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4425 inode_offset = ((ino - 1) %
4426 EXT4_INODES_PER_GROUP(sb));
4427 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4428
4429 block = ext4_inode_table(sb, gdp);
4430 if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4431 (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4432 ext4_error(sb, "Invalid inode table block %llu in "
4433 "block_group %u", block, iloc->block_group);
4434 return -EFSCORRUPTED;
4435 }
4436 block += (inode_offset / inodes_per_block);
4437
4438 bh = sb_getblk(sb, block);
4439 if (unlikely(!bh))
4440 return -ENOMEM;
4441 if (ext4_buffer_uptodate(bh))
4442 goto has_buffer;
4443
4444 lock_buffer(bh);
4445 if (ext4_buffer_uptodate(bh)) {
4446 /* Someone brought it uptodate while we waited */
4447 unlock_buffer(bh);
4448 goto has_buffer;
4449 }
4450
4451 /*
4452 * If we have all information of the inode in memory and this
4453 * is the only valid inode in the block, we need not read the
4454 * block.
4455 */
4456 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4457 struct buffer_head *bitmap_bh;
4458 int i, start;
4459
4460 start = inode_offset & ~(inodes_per_block - 1);
4461
4462 /* Is the inode bitmap in cache? */
4463 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4464 if (unlikely(!bitmap_bh))
4465 goto make_io;
4466
4467 /*
4468 * If the inode bitmap isn't in cache then the
4469 * optimisation may end up performing two reads instead
4470 * of one, so skip it.
4471 */
4472 if (!buffer_uptodate(bitmap_bh)) {
4473 brelse(bitmap_bh);
4474 goto make_io;
4475 }
4476 for (i = start; i < start + inodes_per_block; i++) {
4477 if (i == inode_offset)
4478 continue;
4479 if (ext4_test_bit(i, bitmap_bh->b_data))
4480 break;
4481 }
4482 brelse(bitmap_bh);
4483 if (i == start + inodes_per_block) {
4484 struct ext4_inode *raw_inode =
4485 (struct ext4_inode *) (bh->b_data + iloc->offset);
4486
4487 /* all other inodes are free, so skip I/O */
4488 memset(bh->b_data, 0, bh->b_size);
4489 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4490 ext4_fill_raw_inode(inode, raw_inode);
4491 set_buffer_uptodate(bh);
4492 unlock_buffer(bh);
4493 goto has_buffer;
4494 }
4495 }
4496
4497 make_io:
4498 /*
4499 * If we need to do any I/O, try to pre-readahead extra
4500 * blocks from the inode table.
4501 */
4502 blk_start_plug(&plug);
4503 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4504 ext4_fsblk_t b, end, table;
4505 unsigned num;
4506 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4507
4508 table = ext4_inode_table(sb, gdp);
4509 /* s_inode_readahead_blks is always a power of 2 */
4510 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4511 if (table > b)
4512 b = table;
4513 end = b + ra_blks;
4514 num = EXT4_INODES_PER_GROUP(sb);
4515 if (ext4_has_group_desc_csum(sb))
4516 num -= ext4_itable_unused_count(sb, gdp);
4517 table += num / inodes_per_block;
4518 if (end > table)
4519 end = table;
4520 while (b <= end)
4521 ext4_sb_breadahead_unmovable(sb, b++);
4522 }
4523
4524 /*
4525 * There are other valid inodes in the buffer, this inode
4526 * has in-inode xattrs, or we don't have this inode in memory.
4527 * Read the block from disk.
4528 */
4529 trace_ext4_load_inode(sb, ino);
4530 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4531 blk_finish_plug(&plug);
4532 wait_on_buffer(bh);
4533 ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4534 if (!buffer_uptodate(bh)) {
4535 if (ret_block)
4536 *ret_block = block;
4537 brelse(bh);
4538 return -EIO;
4539 }
4540 has_buffer:
4541 iloc->bh = bh;
4542 return 0;
4543 }
4544
__ext4_get_inode_loc_noinmem(struct inode * inode,struct ext4_iloc * iloc)4545 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4546 struct ext4_iloc *iloc)
4547 {
4548 ext4_fsblk_t err_blk = 0;
4549 int ret;
4550
4551 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4552 &err_blk);
4553
4554 if (ret == -EIO)
4555 ext4_error_inode_block(inode, err_blk, EIO,
4556 "unable to read itable block");
4557
4558 return ret;
4559 }
4560
ext4_get_inode_loc(struct inode * inode,struct ext4_iloc * iloc)4561 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4562 {
4563 ext4_fsblk_t err_blk = 0;
4564 int ret;
4565
4566 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4567 &err_blk);
4568
4569 if (ret == -EIO)
4570 ext4_error_inode_block(inode, err_blk, EIO,
4571 "unable to read itable block");
4572
4573 return ret;
4574 }
4575
4576
ext4_get_fc_inode_loc(struct super_block * sb,unsigned long ino,struct ext4_iloc * iloc)4577 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4578 struct ext4_iloc *iloc)
4579 {
4580 return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
4581 }
4582
ext4_should_enable_dax(struct inode * inode)4583 static bool ext4_should_enable_dax(struct inode *inode)
4584 {
4585 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4586
4587 if (test_opt2(inode->i_sb, DAX_NEVER))
4588 return false;
4589 if (!S_ISREG(inode->i_mode))
4590 return false;
4591 if (ext4_should_journal_data(inode))
4592 return false;
4593 if (ext4_has_inline_data(inode))
4594 return false;
4595 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4596 return false;
4597 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4598 return false;
4599 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4600 return false;
4601 if (test_opt(inode->i_sb, DAX_ALWAYS))
4602 return true;
4603
4604 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4605 }
4606
ext4_set_inode_flags(struct inode * inode,bool init)4607 void ext4_set_inode_flags(struct inode *inode, bool init)
4608 {
4609 unsigned int flags = EXT4_I(inode)->i_flags;
4610 unsigned int new_fl = 0;
4611
4612 WARN_ON_ONCE(IS_DAX(inode) && init);
4613
4614 if (flags & EXT4_SYNC_FL)
4615 new_fl |= S_SYNC;
4616 if (flags & EXT4_APPEND_FL)
4617 new_fl |= S_APPEND;
4618 if (flags & EXT4_IMMUTABLE_FL)
4619 new_fl |= S_IMMUTABLE;
4620 if (flags & EXT4_NOATIME_FL)
4621 new_fl |= S_NOATIME;
4622 if (flags & EXT4_DIRSYNC_FL)
4623 new_fl |= S_DIRSYNC;
4624
4625 /* Because of the way inode_set_flags() works we must preserve S_DAX
4626 * here if already set. */
4627 new_fl |= (inode->i_flags & S_DAX);
4628 if (init && ext4_should_enable_dax(inode))
4629 new_fl |= S_DAX;
4630
4631 if (flags & EXT4_ENCRYPT_FL)
4632 new_fl |= S_ENCRYPTED;
4633 if (flags & EXT4_CASEFOLD_FL)
4634 new_fl |= S_CASEFOLD;
4635 if (flags & EXT4_VERITY_FL)
4636 new_fl |= S_VERITY;
4637 inode_set_flags(inode, new_fl,
4638 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4639 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4640 }
4641
ext4_inode_blocks(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4642 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4643 struct ext4_inode_info *ei)
4644 {
4645 blkcnt_t i_blocks ;
4646 struct inode *inode = &(ei->vfs_inode);
4647 struct super_block *sb = inode->i_sb;
4648
4649 if (ext4_has_feature_huge_file(sb)) {
4650 /* we are using combined 48 bit field */
4651 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4652 le32_to_cpu(raw_inode->i_blocks_lo);
4653 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4654 /* i_blocks represent file system block size */
4655 return i_blocks << (inode->i_blkbits - 9);
4656 } else {
4657 return i_blocks;
4658 }
4659 } else {
4660 return le32_to_cpu(raw_inode->i_blocks_lo);
4661 }
4662 }
4663
ext4_iget_extra_inode(struct inode * inode,struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4664 static inline int ext4_iget_extra_inode(struct inode *inode,
4665 struct ext4_inode *raw_inode,
4666 struct ext4_inode_info *ei)
4667 {
4668 __le32 *magic = (void *)raw_inode +
4669 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4670
4671 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
4672 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4673 int err;
4674
4675 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4676 err = ext4_find_inline_data_nolock(inode);
4677 if (!err && ext4_has_inline_data(inode))
4678 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4679 return err;
4680 } else
4681 EXT4_I(inode)->i_inline_off = 0;
4682 return 0;
4683 }
4684
ext4_get_projid(struct inode * inode,kprojid_t * projid)4685 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4686 {
4687 if (!ext4_has_feature_project(inode->i_sb))
4688 return -EOPNOTSUPP;
4689 *projid = EXT4_I(inode)->i_projid;
4690 return 0;
4691 }
4692
4693 /*
4694 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4695 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4696 * set.
4697 */
ext4_inode_set_iversion_queried(struct inode * inode,u64 val)4698 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4699 {
4700 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4701 inode_set_iversion_raw(inode, val);
4702 else
4703 inode_set_iversion_queried(inode, val);
4704 }
4705
check_igot_inode(struct inode * inode,ext4_iget_flags flags)4706 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4707
4708 {
4709 if (flags & EXT4_IGET_EA_INODE) {
4710 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4711 return "missing EA_INODE flag";
4712 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4713 EXT4_I(inode)->i_file_acl)
4714 return "ea_inode with extended attributes";
4715 } else {
4716 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4717 return "unexpected EA_INODE flag";
4718 }
4719 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4720 return "unexpected bad inode w/o EXT4_IGET_BAD";
4721 return NULL;
4722 }
4723
__ext4_iget(struct super_block * sb,unsigned long ino,ext4_iget_flags flags,const char * function,unsigned int line)4724 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4725 ext4_iget_flags flags, const char *function,
4726 unsigned int line)
4727 {
4728 struct ext4_iloc iloc;
4729 struct ext4_inode *raw_inode;
4730 struct ext4_inode_info *ei;
4731 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4732 struct inode *inode;
4733 const char *err_str;
4734 journal_t *journal = EXT4_SB(sb)->s_journal;
4735 long ret;
4736 loff_t size;
4737 int block;
4738 uid_t i_uid;
4739 gid_t i_gid;
4740 projid_t i_projid;
4741
4742 if ((!(flags & EXT4_IGET_SPECIAL) &&
4743 ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4744 ino == le32_to_cpu(es->s_usr_quota_inum) ||
4745 ino == le32_to_cpu(es->s_grp_quota_inum) ||
4746 ino == le32_to_cpu(es->s_prj_quota_inum) ||
4747 ino == le32_to_cpu(es->s_orphan_file_inum))) ||
4748 (ino < EXT4_ROOT_INO) ||
4749 (ino > le32_to_cpu(es->s_inodes_count))) {
4750 if (flags & EXT4_IGET_HANDLE)
4751 return ERR_PTR(-ESTALE);
4752 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4753 "inode #%lu: comm %s: iget: illegal inode #",
4754 ino, current->comm);
4755 return ERR_PTR(-EFSCORRUPTED);
4756 }
4757
4758 inode = iget_locked(sb, ino);
4759 if (!inode)
4760 return ERR_PTR(-ENOMEM);
4761 if (!(inode->i_state & I_NEW)) {
4762 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4763 ext4_error_inode(inode, function, line, 0, err_str);
4764 iput(inode);
4765 return ERR_PTR(-EFSCORRUPTED);
4766 }
4767 return inode;
4768 }
4769
4770 ei = EXT4_I(inode);
4771 iloc.bh = NULL;
4772
4773 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4774 if (ret < 0)
4775 goto bad_inode;
4776 raw_inode = ext4_raw_inode(&iloc);
4777
4778 if ((flags & EXT4_IGET_HANDLE) &&
4779 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4780 ret = -ESTALE;
4781 goto bad_inode;
4782 }
4783
4784 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4785 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4786 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4787 EXT4_INODE_SIZE(inode->i_sb) ||
4788 (ei->i_extra_isize & 3)) {
4789 ext4_error_inode(inode, function, line, 0,
4790 "iget: bad extra_isize %u "
4791 "(inode size %u)",
4792 ei->i_extra_isize,
4793 EXT4_INODE_SIZE(inode->i_sb));
4794 ret = -EFSCORRUPTED;
4795 goto bad_inode;
4796 }
4797 } else
4798 ei->i_extra_isize = 0;
4799
4800 /* Precompute checksum seed for inode metadata */
4801 if (ext4_has_metadata_csum(sb)) {
4802 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4803 __u32 csum;
4804 __le32 inum = cpu_to_le32(inode->i_ino);
4805 __le32 gen = raw_inode->i_generation;
4806 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4807 sizeof(inum));
4808 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4809 sizeof(gen));
4810 }
4811
4812 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4813 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4814 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4815 ext4_error_inode_err(inode, function, line, 0,
4816 EFSBADCRC, "iget: checksum invalid");
4817 ret = -EFSBADCRC;
4818 goto bad_inode;
4819 }
4820
4821 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4822 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4823 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4824 if (ext4_has_feature_project(sb) &&
4825 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4826 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4827 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4828 else
4829 i_projid = EXT4_DEF_PROJID;
4830
4831 if (!(test_opt(inode->i_sb, NO_UID32))) {
4832 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4833 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4834 }
4835 i_uid_write(inode, i_uid);
4836 i_gid_write(inode, i_gid);
4837 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4838 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4839
4840 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4841 ei->i_inline_off = 0;
4842 ei->i_dir_start_lookup = 0;
4843 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4844 /* We now have enough fields to check if the inode was active or not.
4845 * This is needed because nfsd might try to access dead inodes
4846 * the test is that same one that e2fsck uses
4847 * NeilBrown 1999oct15
4848 */
4849 if (inode->i_nlink == 0) {
4850 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4851 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4852 ino != EXT4_BOOT_LOADER_INO) {
4853 /* this inode is deleted or unallocated */
4854 if (flags & EXT4_IGET_SPECIAL) {
4855 ext4_error_inode(inode, function, line, 0,
4856 "iget: special inode unallocated");
4857 ret = -EFSCORRUPTED;
4858 } else
4859 ret = -ESTALE;
4860 goto bad_inode;
4861 }
4862 /* The only unlinked inodes we let through here have
4863 * valid i_mode and are being read by the orphan
4864 * recovery code: that's fine, we're about to complete
4865 * the process of deleting those.
4866 * OR it is the EXT4_BOOT_LOADER_INO which is
4867 * not initialized on a new filesystem. */
4868 }
4869 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4870 ext4_set_inode_flags(inode, true);
4871 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4872 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4873 if (ext4_has_feature_64bit(sb))
4874 ei->i_file_acl |=
4875 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4876 inode->i_size = ext4_isize(sb, raw_inode);
4877 if ((size = i_size_read(inode)) < 0) {
4878 ext4_error_inode(inode, function, line, 0,
4879 "iget: bad i_size value: %lld", size);
4880 ret = -EFSCORRUPTED;
4881 goto bad_inode;
4882 }
4883 /*
4884 * If dir_index is not enabled but there's dir with INDEX flag set,
4885 * we'd normally treat htree data as empty space. But with metadata
4886 * checksumming that corrupts checksums so forbid that.
4887 */
4888 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4889 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4890 ext4_error_inode(inode, function, line, 0,
4891 "iget: Dir with htree data on filesystem without dir_index feature.");
4892 ret = -EFSCORRUPTED;
4893 goto bad_inode;
4894 }
4895 ei->i_disksize = inode->i_size;
4896 #ifdef CONFIG_QUOTA
4897 ei->i_reserved_quota = 0;
4898 #endif
4899 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4900 ei->i_block_group = iloc.block_group;
4901 ei->i_last_alloc_group = ~0;
4902 /*
4903 * NOTE! The in-memory inode i_data array is in little-endian order
4904 * even on big-endian machines: we do NOT byteswap the block numbers!
4905 */
4906 for (block = 0; block < EXT4_N_BLOCKS; block++)
4907 ei->i_data[block] = raw_inode->i_block[block];
4908 INIT_LIST_HEAD(&ei->i_orphan);
4909 ext4_fc_init_inode(&ei->vfs_inode);
4910
4911 /*
4912 * Set transaction id's of transactions that have to be committed
4913 * to finish f[data]sync. We set them to currently running transaction
4914 * as we cannot be sure that the inode or some of its metadata isn't
4915 * part of the transaction - the inode could have been reclaimed and
4916 * now it is reread from disk.
4917 */
4918 if (journal) {
4919 transaction_t *transaction;
4920 tid_t tid;
4921
4922 read_lock(&journal->j_state_lock);
4923 if (journal->j_running_transaction)
4924 transaction = journal->j_running_transaction;
4925 else
4926 transaction = journal->j_committing_transaction;
4927 if (transaction)
4928 tid = transaction->t_tid;
4929 else
4930 tid = journal->j_commit_sequence;
4931 read_unlock(&journal->j_state_lock);
4932 ei->i_sync_tid = tid;
4933 ei->i_datasync_tid = tid;
4934 }
4935
4936 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4937 if (ei->i_extra_isize == 0) {
4938 /* The extra space is currently unused. Use it. */
4939 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4940 ei->i_extra_isize = sizeof(struct ext4_inode) -
4941 EXT4_GOOD_OLD_INODE_SIZE;
4942 } else {
4943 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4944 if (ret)
4945 goto bad_inode;
4946 }
4947 }
4948
4949 EXT4_INODE_GET_CTIME(inode, raw_inode);
4950 EXT4_INODE_GET_ATIME(inode, raw_inode);
4951 EXT4_INODE_GET_MTIME(inode, raw_inode);
4952 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4953
4954 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4955 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4956
4957 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4958 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4959 ivers |=
4960 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4961 }
4962 ext4_inode_set_iversion_queried(inode, ivers);
4963 }
4964
4965 ret = 0;
4966 if (ei->i_file_acl &&
4967 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4968 ext4_error_inode(inode, function, line, 0,
4969 "iget: bad extended attribute block %llu",
4970 ei->i_file_acl);
4971 ret = -EFSCORRUPTED;
4972 goto bad_inode;
4973 } else if (!ext4_has_inline_data(inode)) {
4974 /* validate the block references in the inode */
4975 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4976 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4977 (S_ISLNK(inode->i_mode) &&
4978 !ext4_inode_is_fast_symlink(inode)))) {
4979 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4980 ret = ext4_ext_check_inode(inode);
4981 else
4982 ret = ext4_ind_check_inode(inode);
4983 }
4984 }
4985 if (ret)
4986 goto bad_inode;
4987
4988 if (S_ISREG(inode->i_mode)) {
4989 inode->i_op = &ext4_file_inode_operations;
4990 inode->i_fop = &ext4_file_operations;
4991 ext4_set_aops(inode);
4992 } else if (S_ISDIR(inode->i_mode)) {
4993 inode->i_op = &ext4_dir_inode_operations;
4994 inode->i_fop = &ext4_dir_operations;
4995 } else if (S_ISLNK(inode->i_mode)) {
4996 /* VFS does not allow setting these so must be corruption */
4997 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4998 ext4_error_inode(inode, function, line, 0,
4999 "iget: immutable or append flags "
5000 "not allowed on symlinks");
5001 ret = -EFSCORRUPTED;
5002 goto bad_inode;
5003 }
5004 if (IS_ENCRYPTED(inode)) {
5005 inode->i_op = &ext4_encrypted_symlink_inode_operations;
5006 } else if (ext4_inode_is_fast_symlink(inode)) {
5007 inode->i_link = (char *)ei->i_data;
5008 inode->i_op = &ext4_fast_symlink_inode_operations;
5009 nd_terminate_link(ei->i_data, inode->i_size,
5010 sizeof(ei->i_data) - 1);
5011 } else {
5012 inode->i_op = &ext4_symlink_inode_operations;
5013 }
5014 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5015 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5016 inode->i_op = &ext4_special_inode_operations;
5017 if (raw_inode->i_block[0])
5018 init_special_inode(inode, inode->i_mode,
5019 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5020 else
5021 init_special_inode(inode, inode->i_mode,
5022 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5023 } else if (ino == EXT4_BOOT_LOADER_INO) {
5024 make_bad_inode(inode);
5025 } else {
5026 ret = -EFSCORRUPTED;
5027 ext4_error_inode(inode, function, line, 0,
5028 "iget: bogus i_mode (%o)", inode->i_mode);
5029 goto bad_inode;
5030 }
5031 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
5032 ext4_error_inode(inode, function, line, 0,
5033 "casefold flag without casefold feature");
5034 ret = -EFSCORRUPTED;
5035 goto bad_inode;
5036 }
5037 if ((err_str = check_igot_inode(inode, flags)) != NULL) {
5038 ext4_error_inode(inode, function, line, 0, err_str);
5039 ret = -EFSCORRUPTED;
5040 goto bad_inode;
5041 }
5042
5043 brelse(iloc.bh);
5044 unlock_new_inode(inode);
5045 return inode;
5046
5047 bad_inode:
5048 brelse(iloc.bh);
5049 iget_failed(inode);
5050 return ERR_PTR(ret);
5051 }
5052
__ext4_update_other_inode_time(struct super_block * sb,unsigned long orig_ino,unsigned long ino,struct ext4_inode * raw_inode)5053 static void __ext4_update_other_inode_time(struct super_block *sb,
5054 unsigned long orig_ino,
5055 unsigned long ino,
5056 struct ext4_inode *raw_inode)
5057 {
5058 struct inode *inode;
5059
5060 inode = find_inode_by_ino_rcu(sb, ino);
5061 if (!inode)
5062 return;
5063
5064 if (!inode_is_dirtytime_only(inode))
5065 return;
5066
5067 spin_lock(&inode->i_lock);
5068 if (inode_is_dirtytime_only(inode)) {
5069 struct ext4_inode_info *ei = EXT4_I(inode);
5070
5071 inode->i_state &= ~I_DIRTY_TIME;
5072 spin_unlock(&inode->i_lock);
5073
5074 spin_lock(&ei->i_raw_lock);
5075 EXT4_INODE_SET_CTIME(inode, raw_inode);
5076 EXT4_INODE_SET_MTIME(inode, raw_inode);
5077 EXT4_INODE_SET_ATIME(inode, raw_inode);
5078 ext4_inode_csum_set(inode, raw_inode, ei);
5079 spin_unlock(&ei->i_raw_lock);
5080 trace_ext4_other_inode_update_time(inode, orig_ino);
5081 return;
5082 }
5083 spin_unlock(&inode->i_lock);
5084 }
5085
5086 /*
5087 * Opportunistically update the other time fields for other inodes in
5088 * the same inode table block.
5089 */
ext4_update_other_inodes_time(struct super_block * sb,unsigned long orig_ino,char * buf)5090 static void ext4_update_other_inodes_time(struct super_block *sb,
5091 unsigned long orig_ino, char *buf)
5092 {
5093 unsigned long ino;
5094 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5095 int inode_size = EXT4_INODE_SIZE(sb);
5096
5097 /*
5098 * Calculate the first inode in the inode table block. Inode
5099 * numbers are one-based. That is, the first inode in a block
5100 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5101 */
5102 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5103 rcu_read_lock();
5104 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5105 if (ino == orig_ino)
5106 continue;
5107 __ext4_update_other_inode_time(sb, orig_ino, ino,
5108 (struct ext4_inode *)buf);
5109 }
5110 rcu_read_unlock();
5111 }
5112
5113 /*
5114 * Post the struct inode info into an on-disk inode location in the
5115 * buffer-cache. This gobbles the caller's reference to the
5116 * buffer_head in the inode location struct.
5117 *
5118 * The caller must have write access to iloc->bh.
5119 */
ext4_do_update_inode(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5120 static int ext4_do_update_inode(handle_t *handle,
5121 struct inode *inode,
5122 struct ext4_iloc *iloc)
5123 {
5124 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5125 struct ext4_inode_info *ei = EXT4_I(inode);
5126 struct buffer_head *bh = iloc->bh;
5127 struct super_block *sb = inode->i_sb;
5128 int err;
5129 int need_datasync = 0, set_large_file = 0;
5130
5131 spin_lock(&ei->i_raw_lock);
5132
5133 /*
5134 * For fields not tracked in the in-memory inode, initialise them
5135 * to zero for new inodes.
5136 */
5137 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5138 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5139
5140 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5141 need_datasync = 1;
5142 if (ei->i_disksize > 0x7fffffffULL) {
5143 if (!ext4_has_feature_large_file(sb) ||
5144 EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5145 set_large_file = 1;
5146 }
5147
5148 err = ext4_fill_raw_inode(inode, raw_inode);
5149 spin_unlock(&ei->i_raw_lock);
5150 if (err) {
5151 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5152 goto out_brelse;
5153 }
5154
5155 if (inode->i_sb->s_flags & SB_LAZYTIME)
5156 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5157 bh->b_data);
5158
5159 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5160 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5161 if (err)
5162 goto out_error;
5163 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5164 if (set_large_file) {
5165 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5166 err = ext4_journal_get_write_access(handle, sb,
5167 EXT4_SB(sb)->s_sbh,
5168 EXT4_JTR_NONE);
5169 if (err)
5170 goto out_error;
5171 lock_buffer(EXT4_SB(sb)->s_sbh);
5172 ext4_set_feature_large_file(sb);
5173 ext4_superblock_csum_set(sb);
5174 unlock_buffer(EXT4_SB(sb)->s_sbh);
5175 ext4_handle_sync(handle);
5176 err = ext4_handle_dirty_metadata(handle, NULL,
5177 EXT4_SB(sb)->s_sbh);
5178 }
5179 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5180 out_error:
5181 ext4_std_error(inode->i_sb, err);
5182 out_brelse:
5183 brelse(bh);
5184 return err;
5185 }
5186
5187 /*
5188 * ext4_write_inode()
5189 *
5190 * We are called from a few places:
5191 *
5192 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5193 * Here, there will be no transaction running. We wait for any running
5194 * transaction to commit.
5195 *
5196 * - Within flush work (sys_sync(), kupdate and such).
5197 * We wait on commit, if told to.
5198 *
5199 * - Within iput_final() -> write_inode_now()
5200 * We wait on commit, if told to.
5201 *
5202 * In all cases it is actually safe for us to return without doing anything,
5203 * because the inode has been copied into a raw inode buffer in
5204 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5205 * writeback.
5206 *
5207 * Note that we are absolutely dependent upon all inode dirtiers doing the
5208 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5209 * which we are interested.
5210 *
5211 * It would be a bug for them to not do this. The code:
5212 *
5213 * mark_inode_dirty(inode)
5214 * stuff();
5215 * inode->i_size = expr;
5216 *
5217 * is in error because write_inode() could occur while `stuff()' is running,
5218 * and the new i_size will be lost. Plus the inode will no longer be on the
5219 * superblock's dirty inode list.
5220 */
ext4_write_inode(struct inode * inode,struct writeback_control * wbc)5221 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5222 {
5223 int err;
5224
5225 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5226 return 0;
5227
5228 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5229 return -EIO;
5230
5231 if (EXT4_SB(inode->i_sb)->s_journal) {
5232 if (ext4_journal_current_handle()) {
5233 ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5234 dump_stack();
5235 return -EIO;
5236 }
5237
5238 /*
5239 * No need to force transaction in WB_SYNC_NONE mode. Also
5240 * ext4_sync_fs() will force the commit after everything is
5241 * written.
5242 */
5243 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5244 return 0;
5245
5246 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5247 EXT4_I(inode)->i_sync_tid);
5248 } else {
5249 struct ext4_iloc iloc;
5250
5251 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5252 if (err)
5253 return err;
5254 /*
5255 * sync(2) will flush the whole buffer cache. No need to do
5256 * it here separately for each inode.
5257 */
5258 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5259 sync_dirty_buffer(iloc.bh);
5260 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5261 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5262 "IO error syncing inode");
5263 err = -EIO;
5264 }
5265 brelse(iloc.bh);
5266 }
5267 return err;
5268 }
5269
5270 /*
5271 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5272 * buffers that are attached to a folio straddling i_size and are undergoing
5273 * commit. In that case we have to wait for commit to finish and try again.
5274 */
ext4_wait_for_tail_page_commit(struct inode * inode)5275 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5276 {
5277 unsigned offset;
5278 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5279 tid_t commit_tid = 0;
5280 int ret;
5281
5282 offset = inode->i_size & (PAGE_SIZE - 1);
5283 /*
5284 * If the folio is fully truncated, we don't need to wait for any commit
5285 * (and we even should not as __ext4_journalled_invalidate_folio() may
5286 * strip all buffers from the folio but keep the folio dirty which can then
5287 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5288 * buffers). Also we don't need to wait for any commit if all buffers in
5289 * the folio remain valid. This is most beneficial for the common case of
5290 * blocksize == PAGESIZE.
5291 */
5292 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5293 return;
5294 while (1) {
5295 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5296 inode->i_size >> PAGE_SHIFT);
5297 if (IS_ERR(folio))
5298 return;
5299 ret = __ext4_journalled_invalidate_folio(folio, offset,
5300 folio_size(folio) - offset);
5301 folio_unlock(folio);
5302 folio_put(folio);
5303 if (ret != -EBUSY)
5304 return;
5305 commit_tid = 0;
5306 read_lock(&journal->j_state_lock);
5307 if (journal->j_committing_transaction)
5308 commit_tid = journal->j_committing_transaction->t_tid;
5309 read_unlock(&journal->j_state_lock);
5310 if (commit_tid)
5311 jbd2_log_wait_commit(journal, commit_tid);
5312 }
5313 }
5314
5315 /*
5316 * ext4_setattr()
5317 *
5318 * Called from notify_change.
5319 *
5320 * We want to trap VFS attempts to truncate the file as soon as
5321 * possible. In particular, we want to make sure that when the VFS
5322 * shrinks i_size, we put the inode on the orphan list and modify
5323 * i_disksize immediately, so that during the subsequent flushing of
5324 * dirty pages and freeing of disk blocks, we can guarantee that any
5325 * commit will leave the blocks being flushed in an unused state on
5326 * disk. (On recovery, the inode will get truncated and the blocks will
5327 * be freed, so we have a strong guarantee that no future commit will
5328 * leave these blocks visible to the user.)
5329 *
5330 * Another thing we have to assure is that if we are in ordered mode
5331 * and inode is still attached to the committing transaction, we must
5332 * we start writeout of all the dirty pages which are being truncated.
5333 * This way we are sure that all the data written in the previous
5334 * transaction are already on disk (truncate waits for pages under
5335 * writeback).
5336 *
5337 * Called with inode->i_rwsem down.
5338 */
ext4_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5339 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5340 struct iattr *attr)
5341 {
5342 struct inode *inode = d_inode(dentry);
5343 int error, rc = 0;
5344 int orphan = 0;
5345 const unsigned int ia_valid = attr->ia_valid;
5346 bool inc_ivers = true;
5347
5348 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5349 return -EIO;
5350
5351 if (unlikely(IS_IMMUTABLE(inode)))
5352 return -EPERM;
5353
5354 if (unlikely(IS_APPEND(inode) &&
5355 (ia_valid & (ATTR_MODE | ATTR_UID |
5356 ATTR_GID | ATTR_TIMES_SET))))
5357 return -EPERM;
5358
5359 error = setattr_prepare(idmap, dentry, attr);
5360 if (error)
5361 return error;
5362
5363 error = fscrypt_prepare_setattr(dentry, attr);
5364 if (error)
5365 return error;
5366
5367 error = fsverity_prepare_setattr(dentry, attr);
5368 if (error)
5369 return error;
5370
5371 if (is_quota_modification(idmap, inode, attr)) {
5372 error = dquot_initialize(inode);
5373 if (error)
5374 return error;
5375 }
5376
5377 if (i_uid_needs_update(idmap, attr, inode) ||
5378 i_gid_needs_update(idmap, attr, inode)) {
5379 handle_t *handle;
5380
5381 /* (user+group)*(old+new) structure, inode write (sb,
5382 * inode block, ? - but truncate inode update has it) */
5383 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5384 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5385 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5386 if (IS_ERR(handle)) {
5387 error = PTR_ERR(handle);
5388 goto err_out;
5389 }
5390
5391 /* dquot_transfer() calls back ext4_get_inode_usage() which
5392 * counts xattr inode references.
5393 */
5394 down_read(&EXT4_I(inode)->xattr_sem);
5395 error = dquot_transfer(idmap, inode, attr);
5396 up_read(&EXT4_I(inode)->xattr_sem);
5397
5398 if (error) {
5399 ext4_journal_stop(handle);
5400 return error;
5401 }
5402 /* Update corresponding info in inode so that everything is in
5403 * one transaction */
5404 i_uid_update(idmap, attr, inode);
5405 i_gid_update(idmap, attr, inode);
5406 error = ext4_mark_inode_dirty(handle, inode);
5407 ext4_journal_stop(handle);
5408 if (unlikely(error)) {
5409 return error;
5410 }
5411 }
5412
5413 if (attr->ia_valid & ATTR_SIZE) {
5414 handle_t *handle;
5415 loff_t oldsize = inode->i_size;
5416 loff_t old_disksize;
5417 int shrink = (attr->ia_size < inode->i_size);
5418
5419 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5420 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5421
5422 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5423 return -EFBIG;
5424 }
5425 }
5426 if (!S_ISREG(inode->i_mode)) {
5427 return -EINVAL;
5428 }
5429
5430 if (attr->ia_size == inode->i_size)
5431 inc_ivers = false;
5432
5433 if (shrink) {
5434 if (ext4_should_order_data(inode)) {
5435 error = ext4_begin_ordered_truncate(inode,
5436 attr->ia_size);
5437 if (error)
5438 goto err_out;
5439 }
5440 /*
5441 * Blocks are going to be removed from the inode. Wait
5442 * for dio in flight.
5443 */
5444 inode_dio_wait(inode);
5445 }
5446
5447 filemap_invalidate_lock(inode->i_mapping);
5448
5449 rc = ext4_break_layouts(inode);
5450 if (rc) {
5451 filemap_invalidate_unlock(inode->i_mapping);
5452 goto err_out;
5453 }
5454
5455 if (attr->ia_size != inode->i_size) {
5456 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5457 if (IS_ERR(handle)) {
5458 error = PTR_ERR(handle);
5459 goto out_mmap_sem;
5460 }
5461 if (ext4_handle_valid(handle) && shrink) {
5462 error = ext4_orphan_add(handle, inode);
5463 orphan = 1;
5464 }
5465 /*
5466 * Update c/mtime on truncate up, ext4_truncate() will
5467 * update c/mtime in shrink case below
5468 */
5469 if (!shrink)
5470 inode_set_mtime_to_ts(inode,
5471 inode_set_ctime_current(inode));
5472
5473 if (shrink)
5474 ext4_fc_track_range(handle, inode,
5475 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5476 inode->i_sb->s_blocksize_bits,
5477 EXT_MAX_BLOCKS - 1);
5478 else
5479 ext4_fc_track_range(
5480 handle, inode,
5481 (oldsize > 0 ? oldsize - 1 : oldsize) >>
5482 inode->i_sb->s_blocksize_bits,
5483 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5484 inode->i_sb->s_blocksize_bits);
5485
5486 down_write(&EXT4_I(inode)->i_data_sem);
5487 old_disksize = EXT4_I(inode)->i_disksize;
5488 EXT4_I(inode)->i_disksize = attr->ia_size;
5489 rc = ext4_mark_inode_dirty(handle, inode);
5490 if (!error)
5491 error = rc;
5492 /*
5493 * We have to update i_size under i_data_sem together
5494 * with i_disksize to avoid races with writeback code
5495 * running ext4_wb_update_i_disksize().
5496 */
5497 if (!error)
5498 i_size_write(inode, attr->ia_size);
5499 else
5500 EXT4_I(inode)->i_disksize = old_disksize;
5501 up_write(&EXT4_I(inode)->i_data_sem);
5502 ext4_journal_stop(handle);
5503 if (error)
5504 goto out_mmap_sem;
5505 if (!shrink) {
5506 pagecache_isize_extended(inode, oldsize,
5507 inode->i_size);
5508 } else if (ext4_should_journal_data(inode)) {
5509 ext4_wait_for_tail_page_commit(inode);
5510 }
5511 }
5512
5513 /*
5514 * Truncate pagecache after we've waited for commit
5515 * in data=journal mode to make pages freeable.
5516 */
5517 truncate_pagecache(inode, inode->i_size);
5518 /*
5519 * Call ext4_truncate() even if i_size didn't change to
5520 * truncate possible preallocated blocks.
5521 */
5522 if (attr->ia_size <= oldsize) {
5523 rc = ext4_truncate(inode);
5524 if (rc)
5525 error = rc;
5526 }
5527 out_mmap_sem:
5528 filemap_invalidate_unlock(inode->i_mapping);
5529 }
5530
5531 if (!error) {
5532 if (inc_ivers)
5533 inode_inc_iversion(inode);
5534 setattr_copy(idmap, inode, attr);
5535 mark_inode_dirty(inode);
5536 }
5537
5538 /*
5539 * If the call to ext4_truncate failed to get a transaction handle at
5540 * all, we need to clean up the in-core orphan list manually.
5541 */
5542 if (orphan && inode->i_nlink)
5543 ext4_orphan_del(NULL, inode);
5544
5545 if (!error && (ia_valid & ATTR_MODE))
5546 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5547
5548 err_out:
5549 if (error)
5550 ext4_std_error(inode->i_sb, error);
5551 if (!error)
5552 error = rc;
5553 return error;
5554 }
5555
ext4_dio_alignment(struct inode * inode)5556 u32 ext4_dio_alignment(struct inode *inode)
5557 {
5558 if (fsverity_active(inode))
5559 return 0;
5560 if (ext4_should_journal_data(inode))
5561 return 0;
5562 if (ext4_has_inline_data(inode))
5563 return 0;
5564 if (IS_ENCRYPTED(inode)) {
5565 if (!fscrypt_dio_supported(inode))
5566 return 0;
5567 return i_blocksize(inode);
5568 }
5569 return 1; /* use the iomap defaults */
5570 }
5571
ext4_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)5572 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
5573 struct kstat *stat, u32 request_mask, unsigned int query_flags)
5574 {
5575 struct inode *inode = d_inode(path->dentry);
5576 struct ext4_inode *raw_inode;
5577 struct ext4_inode_info *ei = EXT4_I(inode);
5578 unsigned int flags;
5579
5580 if ((request_mask & STATX_BTIME) &&
5581 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5582 stat->result_mask |= STATX_BTIME;
5583 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5584 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5585 }
5586
5587 /*
5588 * Return the DIO alignment restrictions if requested. We only return
5589 * this information when requested, since on encrypted files it might
5590 * take a fair bit of work to get if the file wasn't opened recently.
5591 */
5592 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5593 u32 dio_align = ext4_dio_alignment(inode);
5594
5595 stat->result_mask |= STATX_DIOALIGN;
5596 if (dio_align == 1) {
5597 struct block_device *bdev = inode->i_sb->s_bdev;
5598
5599 /* iomap defaults */
5600 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5601 stat->dio_offset_align = bdev_logical_block_size(bdev);
5602 } else {
5603 stat->dio_mem_align = dio_align;
5604 stat->dio_offset_align = dio_align;
5605 }
5606 }
5607
5608 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5609 if (flags & EXT4_APPEND_FL)
5610 stat->attributes |= STATX_ATTR_APPEND;
5611 if (flags & EXT4_COMPR_FL)
5612 stat->attributes |= STATX_ATTR_COMPRESSED;
5613 if (flags & EXT4_ENCRYPT_FL)
5614 stat->attributes |= STATX_ATTR_ENCRYPTED;
5615 if (flags & EXT4_IMMUTABLE_FL)
5616 stat->attributes |= STATX_ATTR_IMMUTABLE;
5617 if (flags & EXT4_NODUMP_FL)
5618 stat->attributes |= STATX_ATTR_NODUMP;
5619 if (flags & EXT4_VERITY_FL)
5620 stat->attributes |= STATX_ATTR_VERITY;
5621
5622 stat->attributes_mask |= (STATX_ATTR_APPEND |
5623 STATX_ATTR_COMPRESSED |
5624 STATX_ATTR_ENCRYPTED |
5625 STATX_ATTR_IMMUTABLE |
5626 STATX_ATTR_NODUMP |
5627 STATX_ATTR_VERITY);
5628
5629 generic_fillattr(idmap, request_mask, inode, stat);
5630 return 0;
5631 }
5632
ext4_file_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)5633 int ext4_file_getattr(struct mnt_idmap *idmap,
5634 const struct path *path, struct kstat *stat,
5635 u32 request_mask, unsigned int query_flags)
5636 {
5637 struct inode *inode = d_inode(path->dentry);
5638 u64 delalloc_blocks;
5639
5640 ext4_getattr(idmap, path, stat, request_mask, query_flags);
5641
5642 /*
5643 * If there is inline data in the inode, the inode will normally not
5644 * have data blocks allocated (it may have an external xattr block).
5645 * Report at least one sector for such files, so tools like tar, rsync,
5646 * others don't incorrectly think the file is completely sparse.
5647 */
5648 if (unlikely(ext4_has_inline_data(inode)))
5649 stat->blocks += (stat->size + 511) >> 9;
5650
5651 /*
5652 * We can't update i_blocks if the block allocation is delayed
5653 * otherwise in the case of system crash before the real block
5654 * allocation is done, we will have i_blocks inconsistent with
5655 * on-disk file blocks.
5656 * We always keep i_blocks updated together with real
5657 * allocation. But to not confuse with user, stat
5658 * will return the blocks that include the delayed allocation
5659 * blocks for this file.
5660 */
5661 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5662 EXT4_I(inode)->i_reserved_data_blocks);
5663 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5664 return 0;
5665 }
5666
ext4_index_trans_blocks(struct inode * inode,int lblocks,int pextents)5667 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5668 int pextents)
5669 {
5670 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5671 return ext4_ind_trans_blocks(inode, lblocks);
5672 return ext4_ext_index_trans_blocks(inode, pextents);
5673 }
5674
5675 /*
5676 * Account for index blocks, block groups bitmaps and block group
5677 * descriptor blocks if modify datablocks and index blocks
5678 * worse case, the indexs blocks spread over different block groups
5679 *
5680 * If datablocks are discontiguous, they are possible to spread over
5681 * different block groups too. If they are contiguous, with flexbg,
5682 * they could still across block group boundary.
5683 *
5684 * Also account for superblock, inode, quota and xattr blocks
5685 */
ext4_meta_trans_blocks(struct inode * inode,int lblocks,int pextents)5686 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5687 int pextents)
5688 {
5689 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5690 int gdpblocks;
5691 int idxblocks;
5692 int ret;
5693
5694 /*
5695 * How many index blocks need to touch to map @lblocks logical blocks
5696 * to @pextents physical extents?
5697 */
5698 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5699
5700 ret = idxblocks;
5701
5702 /*
5703 * Now let's see how many group bitmaps and group descriptors need
5704 * to account
5705 */
5706 groups = idxblocks + pextents;
5707 gdpblocks = groups;
5708 if (groups > ngroups)
5709 groups = ngroups;
5710 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5711 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5712
5713 /* bitmaps and block group descriptor blocks */
5714 ret += groups + gdpblocks;
5715
5716 /* Blocks for super block, inode, quota and xattr blocks */
5717 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5718
5719 return ret;
5720 }
5721
5722 /*
5723 * Calculate the total number of credits to reserve to fit
5724 * the modification of a single pages into a single transaction,
5725 * which may include multiple chunks of block allocations.
5726 *
5727 * This could be called via ext4_write_begin()
5728 *
5729 * We need to consider the worse case, when
5730 * one new block per extent.
5731 */
ext4_writepage_trans_blocks(struct inode * inode)5732 int ext4_writepage_trans_blocks(struct inode *inode)
5733 {
5734 int bpp = ext4_journal_blocks_per_page(inode);
5735 int ret;
5736
5737 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5738
5739 /* Account for data blocks for journalled mode */
5740 if (ext4_should_journal_data(inode))
5741 ret += bpp;
5742 return ret;
5743 }
5744
5745 /*
5746 * Calculate the journal credits for a chunk of data modification.
5747 *
5748 * This is called from DIO, fallocate or whoever calling
5749 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5750 *
5751 * journal buffers for data blocks are not included here, as DIO
5752 * and fallocate do no need to journal data buffers.
5753 */
ext4_chunk_trans_blocks(struct inode * inode,int nrblocks)5754 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5755 {
5756 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5757 }
5758
5759 /*
5760 * The caller must have previously called ext4_reserve_inode_write().
5761 * Give this, we know that the caller already has write access to iloc->bh.
5762 */
ext4_mark_iloc_dirty(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5763 int ext4_mark_iloc_dirty(handle_t *handle,
5764 struct inode *inode, struct ext4_iloc *iloc)
5765 {
5766 int err = 0;
5767
5768 if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5769 put_bh(iloc->bh);
5770 return -EIO;
5771 }
5772 ext4_fc_track_inode(handle, inode);
5773
5774 /* the do_update_inode consumes one bh->b_count */
5775 get_bh(iloc->bh);
5776
5777 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5778 err = ext4_do_update_inode(handle, inode, iloc);
5779 put_bh(iloc->bh);
5780 return err;
5781 }
5782
5783 /*
5784 * On success, We end up with an outstanding reference count against
5785 * iloc->bh. This _must_ be cleaned up later.
5786 */
5787
5788 int
ext4_reserve_inode_write(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5789 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5790 struct ext4_iloc *iloc)
5791 {
5792 int err;
5793
5794 if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5795 return -EIO;
5796
5797 err = ext4_get_inode_loc(inode, iloc);
5798 if (!err) {
5799 BUFFER_TRACE(iloc->bh, "get_write_access");
5800 err = ext4_journal_get_write_access(handle, inode->i_sb,
5801 iloc->bh, EXT4_JTR_NONE);
5802 if (err) {
5803 brelse(iloc->bh);
5804 iloc->bh = NULL;
5805 }
5806 }
5807 ext4_std_error(inode->i_sb, err);
5808 return err;
5809 }
5810
__ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc,handle_t * handle,int * no_expand)5811 static int __ext4_expand_extra_isize(struct inode *inode,
5812 unsigned int new_extra_isize,
5813 struct ext4_iloc *iloc,
5814 handle_t *handle, int *no_expand)
5815 {
5816 struct ext4_inode *raw_inode;
5817 struct ext4_xattr_ibody_header *header;
5818 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5819 struct ext4_inode_info *ei = EXT4_I(inode);
5820 int error;
5821
5822 /* this was checked at iget time, but double check for good measure */
5823 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5824 (ei->i_extra_isize & 3)) {
5825 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5826 ei->i_extra_isize,
5827 EXT4_INODE_SIZE(inode->i_sb));
5828 return -EFSCORRUPTED;
5829 }
5830 if ((new_extra_isize < ei->i_extra_isize) ||
5831 (new_extra_isize < 4) ||
5832 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5833 return -EINVAL; /* Should never happen */
5834
5835 raw_inode = ext4_raw_inode(iloc);
5836
5837 header = IHDR(inode, raw_inode);
5838
5839 /* No extended attributes present */
5840 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5841 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5842 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5843 EXT4_I(inode)->i_extra_isize, 0,
5844 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5845 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5846 return 0;
5847 }
5848
5849 /*
5850 * We may need to allocate external xattr block so we need quotas
5851 * initialized. Here we can be called with various locks held so we
5852 * cannot affort to initialize quotas ourselves. So just bail.
5853 */
5854 if (dquot_initialize_needed(inode))
5855 return -EAGAIN;
5856
5857 /* try to expand with EAs present */
5858 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5859 raw_inode, handle);
5860 if (error) {
5861 /*
5862 * Inode size expansion failed; don't try again
5863 */
5864 *no_expand = 1;
5865 }
5866
5867 return error;
5868 }
5869
5870 /*
5871 * Expand an inode by new_extra_isize bytes.
5872 * Returns 0 on success or negative error number on failure.
5873 */
ext4_try_to_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc iloc,handle_t * handle)5874 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5875 unsigned int new_extra_isize,
5876 struct ext4_iloc iloc,
5877 handle_t *handle)
5878 {
5879 int no_expand;
5880 int error;
5881
5882 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5883 return -EOVERFLOW;
5884
5885 /*
5886 * In nojournal mode, we can immediately attempt to expand
5887 * the inode. When journaled, we first need to obtain extra
5888 * buffer credits since we may write into the EA block
5889 * with this same handle. If journal_extend fails, then it will
5890 * only result in a minor loss of functionality for that inode.
5891 * If this is felt to be critical, then e2fsck should be run to
5892 * force a large enough s_min_extra_isize.
5893 */
5894 if (ext4_journal_extend(handle,
5895 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5896 return -ENOSPC;
5897
5898 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5899 return -EBUSY;
5900
5901 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5902 handle, &no_expand);
5903 ext4_write_unlock_xattr(inode, &no_expand);
5904
5905 return error;
5906 }
5907
ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc)5908 int ext4_expand_extra_isize(struct inode *inode,
5909 unsigned int new_extra_isize,
5910 struct ext4_iloc *iloc)
5911 {
5912 handle_t *handle;
5913 int no_expand;
5914 int error, rc;
5915
5916 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5917 brelse(iloc->bh);
5918 return -EOVERFLOW;
5919 }
5920
5921 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5922 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5923 if (IS_ERR(handle)) {
5924 error = PTR_ERR(handle);
5925 brelse(iloc->bh);
5926 return error;
5927 }
5928
5929 ext4_write_lock_xattr(inode, &no_expand);
5930
5931 BUFFER_TRACE(iloc->bh, "get_write_access");
5932 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5933 EXT4_JTR_NONE);
5934 if (error) {
5935 brelse(iloc->bh);
5936 goto out_unlock;
5937 }
5938
5939 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5940 handle, &no_expand);
5941
5942 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5943 if (!error)
5944 error = rc;
5945
5946 out_unlock:
5947 ext4_write_unlock_xattr(inode, &no_expand);
5948 ext4_journal_stop(handle);
5949 return error;
5950 }
5951
5952 /*
5953 * What we do here is to mark the in-core inode as clean with respect to inode
5954 * dirtiness (it may still be data-dirty).
5955 * This means that the in-core inode may be reaped by prune_icache
5956 * without having to perform any I/O. This is a very good thing,
5957 * because *any* task may call prune_icache - even ones which
5958 * have a transaction open against a different journal.
5959 *
5960 * Is this cheating? Not really. Sure, we haven't written the
5961 * inode out, but prune_icache isn't a user-visible syncing function.
5962 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5963 * we start and wait on commits.
5964 */
__ext4_mark_inode_dirty(handle_t * handle,struct inode * inode,const char * func,unsigned int line)5965 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5966 const char *func, unsigned int line)
5967 {
5968 struct ext4_iloc iloc;
5969 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5970 int err;
5971
5972 might_sleep();
5973 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5974 err = ext4_reserve_inode_write(handle, inode, &iloc);
5975 if (err)
5976 goto out;
5977
5978 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5979 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5980 iloc, handle);
5981
5982 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5983 out:
5984 if (unlikely(err))
5985 ext4_error_inode_err(inode, func, line, 0, err,
5986 "mark_inode_dirty error");
5987 return err;
5988 }
5989
5990 /*
5991 * ext4_dirty_inode() is called from __mark_inode_dirty()
5992 *
5993 * We're really interested in the case where a file is being extended.
5994 * i_size has been changed by generic_commit_write() and we thus need
5995 * to include the updated inode in the current transaction.
5996 *
5997 * Also, dquot_alloc_block() will always dirty the inode when blocks
5998 * are allocated to the file.
5999 *
6000 * If the inode is marked synchronous, we don't honour that here - doing
6001 * so would cause a commit on atime updates, which we don't bother doing.
6002 * We handle synchronous inodes at the highest possible level.
6003 */
ext4_dirty_inode(struct inode * inode,int flags)6004 void ext4_dirty_inode(struct inode *inode, int flags)
6005 {
6006 handle_t *handle;
6007
6008 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6009 if (IS_ERR(handle))
6010 return;
6011 ext4_mark_inode_dirty(handle, inode);
6012 ext4_journal_stop(handle);
6013 }
6014
ext4_change_inode_journal_flag(struct inode * inode,int val)6015 int ext4_change_inode_journal_flag(struct inode *inode, int val)
6016 {
6017 journal_t *journal;
6018 handle_t *handle;
6019 int err;
6020 int alloc_ctx;
6021
6022 /*
6023 * We have to be very careful here: changing a data block's
6024 * journaling status dynamically is dangerous. If we write a
6025 * data block to the journal, change the status and then delete
6026 * that block, we risk forgetting to revoke the old log record
6027 * from the journal and so a subsequent replay can corrupt data.
6028 * So, first we make sure that the journal is empty and that
6029 * nobody is changing anything.
6030 */
6031
6032 journal = EXT4_JOURNAL(inode);
6033 if (!journal)
6034 return 0;
6035 if (is_journal_aborted(journal))
6036 return -EROFS;
6037
6038 /* Wait for all existing dio workers */
6039 inode_dio_wait(inode);
6040
6041 /*
6042 * Before flushing the journal and switching inode's aops, we have
6043 * to flush all dirty data the inode has. There can be outstanding
6044 * delayed allocations, there can be unwritten extents created by
6045 * fallocate or buffered writes in dioread_nolock mode covered by
6046 * dirty data which can be converted only after flushing the dirty
6047 * data (and journalled aops don't know how to handle these cases).
6048 */
6049 if (val) {
6050 filemap_invalidate_lock(inode->i_mapping);
6051 err = filemap_write_and_wait(inode->i_mapping);
6052 if (err < 0) {
6053 filemap_invalidate_unlock(inode->i_mapping);
6054 return err;
6055 }
6056 }
6057
6058 alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6059 jbd2_journal_lock_updates(journal);
6060
6061 /*
6062 * OK, there are no updates running now, and all cached data is
6063 * synced to disk. We are now in a completely consistent state
6064 * which doesn't have anything in the journal, and we know that
6065 * no filesystem updates are running, so it is safe to modify
6066 * the inode's in-core data-journaling state flag now.
6067 */
6068
6069 if (val)
6070 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6071 else {
6072 err = jbd2_journal_flush(journal, 0);
6073 if (err < 0) {
6074 jbd2_journal_unlock_updates(journal);
6075 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6076 return err;
6077 }
6078 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6079 }
6080 ext4_set_aops(inode);
6081
6082 jbd2_journal_unlock_updates(journal);
6083 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6084
6085 if (val)
6086 filemap_invalidate_unlock(inode->i_mapping);
6087
6088 /* Finally we can mark the inode as dirty. */
6089
6090 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6091 if (IS_ERR(handle))
6092 return PTR_ERR(handle);
6093
6094 ext4_fc_mark_ineligible(inode->i_sb,
6095 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6096 err = ext4_mark_inode_dirty(handle, inode);
6097 ext4_handle_sync(handle);
6098 ext4_journal_stop(handle);
6099 ext4_std_error(inode->i_sb, err);
6100
6101 return err;
6102 }
6103
ext4_bh_unmapped(handle_t * handle,struct inode * inode,struct buffer_head * bh)6104 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6105 struct buffer_head *bh)
6106 {
6107 return !buffer_mapped(bh);
6108 }
6109
ext4_page_mkwrite(struct vm_fault * vmf)6110 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6111 {
6112 struct vm_area_struct *vma = vmf->vma;
6113 struct folio *folio = page_folio(vmf->page);
6114 loff_t size;
6115 unsigned long len;
6116 int err;
6117 vm_fault_t ret;
6118 struct file *file = vma->vm_file;
6119 struct inode *inode = file_inode(file);
6120 struct address_space *mapping = inode->i_mapping;
6121 handle_t *handle;
6122 get_block_t *get_block;
6123 int retries = 0;
6124
6125 if (unlikely(IS_IMMUTABLE(inode)))
6126 return VM_FAULT_SIGBUS;
6127
6128 sb_start_pagefault(inode->i_sb);
6129 file_update_time(vma->vm_file);
6130
6131 filemap_invalidate_lock_shared(mapping);
6132
6133 err = ext4_convert_inline_data(inode);
6134 if (err)
6135 goto out_ret;
6136
6137 /*
6138 * On data journalling we skip straight to the transaction handle:
6139 * there's no delalloc; page truncated will be checked later; the
6140 * early return w/ all buffers mapped (calculates size/len) can't
6141 * be used; and there's no dioread_nolock, so only ext4_get_block.
6142 */
6143 if (ext4_should_journal_data(inode))
6144 goto retry_alloc;
6145
6146 /* Delalloc case is easy... */
6147 if (test_opt(inode->i_sb, DELALLOC) &&
6148 !ext4_nonda_switch(inode->i_sb)) {
6149 do {
6150 err = block_page_mkwrite(vma, vmf,
6151 ext4_da_get_block_prep);
6152 } while (err == -ENOSPC &&
6153 ext4_should_retry_alloc(inode->i_sb, &retries));
6154 goto out_ret;
6155 }
6156
6157 folio_lock(folio);
6158 size = i_size_read(inode);
6159 /* Page got truncated from under us? */
6160 if (folio->mapping != mapping || folio_pos(folio) > size) {
6161 folio_unlock(folio);
6162 ret = VM_FAULT_NOPAGE;
6163 goto out;
6164 }
6165
6166 len = folio_size(folio);
6167 if (folio_pos(folio) + len > size)
6168 len = size - folio_pos(folio);
6169 /*
6170 * Return if we have all the buffers mapped. This avoids the need to do
6171 * journal_start/journal_stop which can block and take a long time
6172 *
6173 * This cannot be done for data journalling, as we have to add the
6174 * inode to the transaction's list to writeprotect pages on commit.
6175 */
6176 if (folio_buffers(folio)) {
6177 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6178 0, len, NULL,
6179 ext4_bh_unmapped)) {
6180 /* Wait so that we don't change page under IO */
6181 folio_wait_stable(folio);
6182 ret = VM_FAULT_LOCKED;
6183 goto out;
6184 }
6185 }
6186 folio_unlock(folio);
6187 /* OK, we need to fill the hole... */
6188 if (ext4_should_dioread_nolock(inode))
6189 get_block = ext4_get_block_unwritten;
6190 else
6191 get_block = ext4_get_block;
6192 retry_alloc:
6193 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6194 ext4_writepage_trans_blocks(inode));
6195 if (IS_ERR(handle)) {
6196 ret = VM_FAULT_SIGBUS;
6197 goto out;
6198 }
6199 /*
6200 * Data journalling can't use block_page_mkwrite() because it
6201 * will set_buffer_dirty() before do_journal_get_write_access()
6202 * thus might hit warning messages for dirty metadata buffers.
6203 */
6204 if (!ext4_should_journal_data(inode)) {
6205 err = block_page_mkwrite(vma, vmf, get_block);
6206 } else {
6207 folio_lock(folio);
6208 size = i_size_read(inode);
6209 /* Page got truncated from under us? */
6210 if (folio->mapping != mapping || folio_pos(folio) > size) {
6211 ret = VM_FAULT_NOPAGE;
6212 goto out_error;
6213 }
6214
6215 len = folio_size(folio);
6216 if (folio_pos(folio) + len > size)
6217 len = size - folio_pos(folio);
6218
6219 err = __block_write_begin(folio, 0, len, ext4_get_block);
6220 if (!err) {
6221 ret = VM_FAULT_SIGBUS;
6222 if (ext4_journal_folio_buffers(handle, folio, len))
6223 goto out_error;
6224 } else {
6225 folio_unlock(folio);
6226 }
6227 }
6228 ext4_journal_stop(handle);
6229 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6230 goto retry_alloc;
6231 out_ret:
6232 ret = vmf_fs_error(err);
6233 out:
6234 filemap_invalidate_unlock_shared(mapping);
6235 sb_end_pagefault(inode->i_sb);
6236 return ret;
6237 out_error:
6238 folio_unlock(folio);
6239 ext4_journal_stop(handle);
6240 goto out;
6241 }
6242