1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/inode.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
18 *
19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20 */
21
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/time.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/string.h>
30 #include <linux/buffer_head.h>
31 #include <linux/writeback.h>
32 #include <linux/folio_batch.h>
33 #include <linux/mpage.h>
34 #include <linux/rmap.h>
35 #include <linux/namei.h>
36 #include <linux/uio.h>
37 #include <linux/bio.h>
38 #include <linux/workqueue.h>
39 #include <linux/kernel.h>
40 #include <linux/printk.h>
41 #include <linux/slab.h>
42 #include <linux/bitops.h>
43 #include <linux/iomap.h>
44 #include <linux/iversion.h>
45
46 #include "ext4_jbd2.h"
47 #include "xattr.h"
48 #include "acl.h"
49 #include "truncate.h"
50
51 #include <kunit/static_stub.h>
52
53 #include <trace/events/ext4.h>
54
55 static void ext4_journalled_zero_new_buffers(handle_t *handle,
56 struct inode *inode,
57 struct folio *folio,
58 unsigned from, unsigned to);
59
ext4_inode_csum(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)60 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
61 struct ext4_inode_info *ei)
62 {
63 __u32 csum;
64 __u16 dummy_csum = 0;
65 int offset = offsetof(struct ext4_inode, i_checksum_lo);
66 unsigned int csum_size = sizeof(dummy_csum);
67
68 csum = ext4_chksum(ei->i_csum_seed, (__u8 *)raw, offset);
69 csum = ext4_chksum(csum, (__u8 *)&dummy_csum, csum_size);
70 offset += csum_size;
71 csum = ext4_chksum(csum, (__u8 *)raw + offset,
72 EXT4_GOOD_OLD_INODE_SIZE - offset);
73
74 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
75 offset = offsetof(struct ext4_inode, i_checksum_hi);
76 csum = ext4_chksum(csum, (__u8 *)raw + EXT4_GOOD_OLD_INODE_SIZE,
77 offset - EXT4_GOOD_OLD_INODE_SIZE);
78 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
79 csum = ext4_chksum(csum, (__u8 *)&dummy_csum,
80 csum_size);
81 offset += csum_size;
82 }
83 csum = ext4_chksum(csum, (__u8 *)raw + offset,
84 EXT4_INODE_SIZE(inode->i_sb) - offset);
85 }
86
87 return csum;
88 }
89
ext4_inode_csum_verify(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)90 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
91 struct ext4_inode_info *ei)
92 {
93 __u32 provided, calculated;
94
95 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
96 cpu_to_le32(EXT4_OS_LINUX) ||
97 !ext4_has_feature_metadata_csum(inode->i_sb))
98 return 1;
99
100 provided = le16_to_cpu(raw->i_checksum_lo);
101 calculated = ext4_inode_csum(inode, raw, ei);
102 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
103 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
104 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
105 else
106 calculated &= 0xFFFF;
107
108 return provided == calculated;
109 }
110
ext4_inode_csum_set(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)111 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
112 struct ext4_inode_info *ei)
113 {
114 __u32 csum;
115
116 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
117 cpu_to_le32(EXT4_OS_LINUX) ||
118 !ext4_has_feature_metadata_csum(inode->i_sb))
119 return;
120
121 csum = ext4_inode_csum(inode, raw, ei);
122 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
123 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
124 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
125 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
126 }
127
ext4_begin_ordered_truncate(struct inode * inode,loff_t new_size)128 static inline int ext4_begin_ordered_truncate(struct inode *inode,
129 loff_t new_size)
130 {
131 struct jbd2_inode *jinode = READ_ONCE(EXT4_I(inode)->jinode);
132
133 trace_ext4_begin_ordered_truncate(inode, new_size);
134 /*
135 * If jinode is zero, then we never opened the file for
136 * writing, so there's no need to call
137 * jbd2_journal_begin_ordered_truncate() since there's no
138 * outstanding writes we need to flush.
139 */
140 if (!jinode)
141 return 0;
142 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
143 jinode,
144 new_size);
145 }
146
147 /*
148 * Test whether an inode is a fast symlink.
149 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
150 */
ext4_inode_is_fast_symlink(struct inode * inode)151 int ext4_inode_is_fast_symlink(struct inode *inode)
152 {
153 if (!ext4_has_feature_ea_inode(inode->i_sb)) {
154 int ea_blocks = EXT4_I(inode)->i_file_acl ?
155 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
156
157 if (ext4_has_inline_data(inode))
158 return 0;
159
160 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
161 }
162 return S_ISLNK(inode->i_mode) && inode->i_size &&
163 (inode->i_size < EXT4_N_BLOCKS * 4);
164 }
165
166 /*
167 * Called at the last iput() if i_nlink is zero.
168 */
ext4_evict_inode(struct inode * inode)169 void ext4_evict_inode(struct inode *inode)
170 {
171 handle_t *handle;
172 int err;
173 /*
174 * Credits for final inode cleanup and freeing:
175 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
176 * (xattr block freeing), bitmap, group descriptor (inode freeing)
177 */
178 int extra_credits = 6;
179 struct ext4_xattr_inode_array *ea_inode_array = NULL;
180 bool freeze_protected = false;
181
182 trace_ext4_evict_inode(inode);
183
184 dax_break_layout_final(inode);
185
186 if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
187 ext4_evict_ea_inode(inode);
188 if (inode->i_nlink) {
189 /*
190 * If there's dirty page will lead to data loss, user
191 * could see stale data.
192 */
193 if (unlikely(!ext4_emergency_state(inode->i_sb) &&
194 mapping_tagged(&inode->i_data, PAGECACHE_TAG_DIRTY)))
195 ext4_warning_inode(inode, "data will be lost");
196
197 truncate_inode_pages_final(&inode->i_data);
198 /* Avoid mballoc special inode which has no proper iops */
199 if (!EXT4_SB(inode->i_sb)->s_journal)
200 mmb_sync(&EXT4_I(inode)->i_metadata_bhs);
201 goto no_delete;
202 }
203
204 if (is_bad_inode(inode))
205 goto no_delete;
206 dquot_initialize(inode);
207
208 if (ext4_should_order_data(inode))
209 ext4_begin_ordered_truncate(inode, 0);
210 truncate_inode_pages_final(&inode->i_data);
211
212 /*
213 * For inodes with journalled data, transaction commit could have
214 * dirtied the inode. And for inodes with dioread_nolock, unwritten
215 * extents converting worker could merge extents and also have dirtied
216 * the inode. Flush worker is ignoring it because of I_FREEING flag but
217 * we still need to remove the inode from the writeback lists.
218 */
219 inode_io_list_del(inode);
220
221 /*
222 * Protect us against freezing - iput() caller didn't have to have any
223 * protection against it. When we are in a running transaction though,
224 * we are already protected against freezing and we cannot grab further
225 * protection due to lock ordering constraints.
226 */
227 if (!ext4_journal_current_handle()) {
228 sb_start_intwrite(inode->i_sb);
229 freeze_protected = true;
230 }
231
232 if (!IS_NOQUOTA(inode))
233 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
234
235 /*
236 * Block bitmap, group descriptor, and inode are accounted in both
237 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
238 */
239 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
240 ext4_blocks_for_truncate(inode) + extra_credits - 3);
241 if (IS_ERR(handle)) {
242 ext4_std_error(inode->i_sb, PTR_ERR(handle));
243 /*
244 * If we're going to skip the normal cleanup, we still need to
245 * make sure that the in-core orphan linked list is properly
246 * cleaned up.
247 */
248 ext4_orphan_del(NULL, inode);
249 if (freeze_protected)
250 sb_end_intwrite(inode->i_sb);
251 goto no_delete;
252 }
253
254 if (IS_SYNC(inode))
255 ext4_handle_sync(handle);
256
257 /*
258 * Set inode->i_size to 0 before calling ext4_truncate(). We need
259 * special handling of symlinks here because i_size is used to
260 * determine whether ext4_inode_info->i_data contains symlink data or
261 * block mappings. Setting i_size to 0 will remove its fast symlink
262 * status. Erase i_data so that it becomes a valid empty block map.
263 */
264 if (ext4_inode_is_fast_symlink(inode))
265 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
266 inode->i_size = 0;
267 err = ext4_mark_inode_dirty(handle, inode);
268 if (err) {
269 ext4_warning(inode->i_sb,
270 "couldn't mark inode dirty (err %d)", err);
271 goto stop_handle;
272 }
273 if (inode->i_blocks) {
274 err = ext4_truncate(inode);
275 if (err) {
276 ext4_error_err(inode->i_sb, -err,
277 "couldn't truncate inode %llu (err %d)",
278 inode->i_ino, err);
279 goto stop_handle;
280 }
281 }
282
283 /* Remove xattr references. */
284 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
285 extra_credits);
286 if (err) {
287 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
288 stop_handle:
289 ext4_journal_stop(handle);
290 ext4_orphan_del(NULL, inode);
291 if (freeze_protected)
292 sb_end_intwrite(inode->i_sb);
293 ext4_xattr_inode_array_free(ea_inode_array);
294 goto no_delete;
295 }
296
297 /*
298 * Kill off the orphan record which ext4_truncate created.
299 * AKPM: I think this can be inside the above `if'.
300 * Note that ext4_orphan_del() has to be able to cope with the
301 * deletion of a non-existent orphan - this is because we don't
302 * know if ext4_truncate() actually created an orphan record.
303 * (Well, we could do this if we need to, but heck - it works)
304 */
305 ext4_orphan_del(handle, inode);
306 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
307
308 /*
309 * One subtle ordering requirement: if anything has gone wrong
310 * (transaction abort, IO errors, whatever), then we can still
311 * do these next steps (the fs will already have been marked as
312 * having errors), but we can't free the inode if the mark_dirty
313 * fails.
314 */
315 if (ext4_mark_inode_dirty(handle, inode))
316 /* If that failed, just do the required in-core inode clear. */
317 ext4_clear_inode(inode);
318 else
319 ext4_free_inode(handle, inode);
320 ext4_journal_stop(handle);
321 if (freeze_protected)
322 sb_end_intwrite(inode->i_sb);
323 ext4_xattr_inode_array_free(ea_inode_array);
324 return;
325 no_delete:
326 /*
327 * Check out some where else accidentally dirty the evicting inode,
328 * which may probably cause inode use-after-free issues later.
329 */
330 WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
331
332 if (!list_empty(&EXT4_I(inode)->i_fc_list))
333 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
334 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
335 }
336
337 #ifdef CONFIG_QUOTA
ext4_get_reserved_space(struct inode * inode)338 qsize_t *ext4_get_reserved_space(struct inode *inode)
339 {
340 return &EXT4_I(inode)->i_reserved_quota;
341 }
342 #endif
343
344 /*
345 * Called with i_data_sem down, which is important since we can call
346 * ext4_discard_preallocations() from here.
347 */
ext4_da_update_reserve_space(struct inode * inode,int used,int quota_claim)348 void ext4_da_update_reserve_space(struct inode *inode,
349 int used, int quota_claim)
350 {
351 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
352 struct ext4_inode_info *ei = EXT4_I(inode);
353
354 spin_lock(&ei->i_block_reservation_lock);
355 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
356 if (unlikely(used > ei->i_reserved_data_blocks)) {
357 ext4_warning(inode->i_sb, "%s: ino %llu, used %d "
358 "with only %d reserved data blocks",
359 __func__, inode->i_ino, used,
360 ei->i_reserved_data_blocks);
361 WARN_ON(1);
362 used = ei->i_reserved_data_blocks;
363 }
364
365 /* Update per-inode reservations */
366 ei->i_reserved_data_blocks -= used;
367 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
368
369 spin_unlock(&ei->i_block_reservation_lock);
370
371 /* Update quota subsystem for data blocks */
372 if (quota_claim)
373 dquot_claim_block(inode, EXT4_C2B(sbi, used));
374 else {
375 /*
376 * We did fallocate with an offset that is already delayed
377 * allocated. So on delayed allocated writeback we should
378 * not re-claim the quota for fallocated blocks.
379 */
380 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
381 }
382
383 /*
384 * If we have done all the pending block allocations and if
385 * there aren't any writers on the inode, we can discard the
386 * inode's preallocations.
387 */
388 if ((ei->i_reserved_data_blocks == 0) &&
389 !inode_is_open_for_write(inode))
390 ext4_discard_preallocations(inode);
391 }
392
__check_block_validity(struct inode * inode,const char * func,unsigned int line,struct ext4_map_blocks * map)393 static int __check_block_validity(struct inode *inode, const char *func,
394 unsigned int line,
395 struct ext4_map_blocks *map)
396 {
397 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
398
399 if (journal && inode == journal->j_inode)
400 return 0;
401
402 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
403 ext4_error_inode(inode, func, line, map->m_pblk,
404 "lblock %lu mapped to illegal pblock %llu "
405 "(length %d)", (unsigned long) map->m_lblk,
406 map->m_pblk, map->m_len);
407 return -EFSCORRUPTED;
408 }
409 return 0;
410 }
411
ext4_issue_zeroout(struct inode * inode,ext4_lblk_t lblk,ext4_fsblk_t pblk,ext4_lblk_t len)412 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
413 ext4_lblk_t len)
414 {
415 int ret;
416
417 KUNIT_STATIC_STUB_REDIRECT(ext4_issue_zeroout, inode, lblk, pblk, len);
418
419 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
420 return fscrypt_zeroout_range(inode,
421 (loff_t)lblk << inode->i_blkbits,
422 pblk << (inode->i_blkbits - SECTOR_SHIFT),
423 (u64)len << inode->i_blkbits);
424
425 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
426 if (ret > 0)
427 ret = 0;
428
429 return ret;
430 }
431
432 /*
433 * For generic regular files, when updating the extent tree, Ext4 should
434 * hold the i_rwsem and invalidate_lock exclusively. This ensures
435 * exclusion against concurrent page faults, as well as reads and writes.
436 */
437 #ifdef CONFIG_EXT4_DEBUG
ext4_check_map_extents_env(struct inode * inode)438 void ext4_check_map_extents_env(struct inode *inode)
439 {
440 if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
441 return;
442
443 if (!S_ISREG(inode->i_mode) ||
444 IS_NOQUOTA(inode) || IS_VERITY(inode) ||
445 is_special_ino(inode->i_sb, inode->i_ino) ||
446 (inode_state_read_once(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) ||
447 ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
448 ext4_verity_in_progress(inode))
449 return;
450
451 WARN_ON_ONCE(!inode_is_locked(inode) &&
452 !rwsem_is_locked(&inode->i_mapping->invalidate_lock));
453 }
454 #else
ext4_check_map_extents_env(struct inode * inode)455 void ext4_check_map_extents_env(struct inode *inode) {}
456 #endif
457
458 #define check_block_validity(inode, map) \
459 __check_block_validity((inode), __func__, __LINE__, (map))
460
461 #ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle_t * handle,struct inode * inode,struct ext4_map_blocks * es_map,struct ext4_map_blocks * map,int flags)462 static void ext4_map_blocks_es_recheck(handle_t *handle,
463 struct inode *inode,
464 struct ext4_map_blocks *es_map,
465 struct ext4_map_blocks *map,
466 int flags)
467 {
468 int retval;
469
470 map->m_flags = 0;
471 /*
472 * There is a race window that the result is not the same.
473 * e.g. xfstests #223 when dioread_nolock enables. The reason
474 * is that we lookup a block mapping in extent status tree with
475 * out taking i_data_sem. So at the time the unwritten extent
476 * could be converted.
477 */
478 down_read(&EXT4_I(inode)->i_data_sem);
479 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
480 retval = ext4_ext_map_blocks(handle, inode, map, 0);
481 } else {
482 retval = ext4_ind_map_blocks(handle, inode, map, 0);
483 }
484 up_read((&EXT4_I(inode)->i_data_sem));
485
486 /*
487 * We don't check m_len because extent will be collpased in status
488 * tree. So the m_len might not equal.
489 */
490 if (es_map->m_lblk != map->m_lblk ||
491 es_map->m_flags != map->m_flags ||
492 es_map->m_pblk != map->m_pblk) {
493 printk("ES cache assertion failed for inode: %llu "
494 "es_cached ex [%d/%d/%llu/%x] != "
495 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
496 inode->i_ino, es_map->m_lblk, es_map->m_len,
497 es_map->m_pblk, es_map->m_flags, map->m_lblk,
498 map->m_len, map->m_pblk, map->m_flags,
499 retval, flags);
500 }
501 }
502 #endif /* ES_AGGRESSIVE_TEST */
503
ext4_map_query_blocks_next_in_leaf(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,unsigned int orig_mlen)504 static int ext4_map_query_blocks_next_in_leaf(handle_t *handle,
505 struct inode *inode, struct ext4_map_blocks *map,
506 unsigned int orig_mlen)
507 {
508 struct ext4_map_blocks map2;
509 unsigned int status, status2;
510 int retval;
511
512 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
513 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
514
515 WARN_ON_ONCE(!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF));
516 WARN_ON_ONCE(orig_mlen <= map->m_len);
517
518 /* Prepare map2 for lookup in next leaf block */
519 map2.m_lblk = map->m_lblk + map->m_len;
520 map2.m_len = orig_mlen - map->m_len;
521 map2.m_flags = 0;
522 retval = ext4_ext_map_blocks(handle, inode, &map2, 0);
523
524 if (retval <= 0) {
525 ext4_es_cache_extent(inode, map->m_lblk, map->m_len,
526 map->m_pblk, status);
527 return map->m_len;
528 }
529
530 if (unlikely(retval != map2.m_len)) {
531 ext4_warning(inode->i_sb,
532 "ES len assertion failed for inode "
533 "%llu: retval %d != map->m_len %d",
534 inode->i_ino, retval, map2.m_len);
535 WARN_ON(1);
536 }
537
538 status2 = map2.m_flags & EXT4_MAP_UNWRITTEN ?
539 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
540
541 /*
542 * If map2 is contiguous with map, then let's insert it as a single
543 * extent in es cache and return the combined length of both the maps.
544 */
545 if (map->m_pblk + map->m_len == map2.m_pblk &&
546 status == status2) {
547 ext4_es_cache_extent(inode, map->m_lblk,
548 map->m_len + map2.m_len, map->m_pblk,
549 status);
550 map->m_len += map2.m_len;
551 } else {
552 ext4_es_cache_extent(inode, map->m_lblk, map->m_len,
553 map->m_pblk, status);
554 }
555
556 return map->m_len;
557 }
558
ext4_map_query_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)559 int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
560 struct ext4_map_blocks *map, int flags)
561 {
562 unsigned int status;
563 int retval;
564 unsigned int orig_mlen = map->m_len;
565
566 flags &= EXT4_EX_QUERY_FILTER;
567 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
568 retval = ext4_ext_map_blocks(handle, inode, map, flags);
569 else
570 retval = ext4_ind_map_blocks(handle, inode, map, flags);
571 if (retval < 0)
572 return retval;
573
574 /* A hole? */
575 if (retval == 0)
576 goto out;
577
578 if (unlikely(retval != map->m_len)) {
579 ext4_warning(inode->i_sb,
580 "ES len assertion failed for inode "
581 "%llu: retval %d != map->m_len %d",
582 inode->i_ino, retval, map->m_len);
583 WARN_ON(1);
584 }
585
586 /*
587 * No need to query next in leaf:
588 * - if returned extent is not last in leaf or
589 * - if the last in leaf is the full requested range
590 */
591 if (!(map->m_flags & EXT4_MAP_QUERY_LAST_IN_LEAF) ||
592 map->m_len == orig_mlen) {
593 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
594 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
595 ext4_es_cache_extent(inode, map->m_lblk, map->m_len,
596 map->m_pblk, status);
597 } else {
598 retval = ext4_map_query_blocks_next_in_leaf(handle, inode, map,
599 orig_mlen);
600 }
601 out:
602 map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
603 return retval;
604 }
605
ext4_map_create_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)606 int ext4_map_create_blocks(handle_t *handle, struct inode *inode,
607 struct ext4_map_blocks *map, int flags)
608 {
609 unsigned int status;
610 int err, retval = 0;
611
612 /*
613 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE
614 * indicates that the blocks and quotas has already been
615 * checked when the data was copied into the page cache.
616 */
617 if (map->m_flags & EXT4_MAP_DELAYED)
618 flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
619
620 /*
621 * Here we clear m_flags because after allocating an new extent,
622 * it will be set again.
623 */
624 map->m_flags &= ~EXT4_MAP_FLAGS;
625
626 /*
627 * We need to check for EXT4 here because migrate could have
628 * changed the inode type in between.
629 */
630 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
631 retval = ext4_ext_map_blocks(handle, inode, map, flags);
632 } else {
633 retval = ext4_ind_map_blocks(handle, inode, map, flags);
634
635 /*
636 * We allocated new blocks which will result in i_data's
637 * format changing. Force the migrate to fail by clearing
638 * migrate flags.
639 */
640 if (retval > 0 && map->m_flags & EXT4_MAP_NEW)
641 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
642 }
643 if (retval <= 0)
644 return retval;
645
646 if (unlikely(retval != map->m_len)) {
647 ext4_warning(inode->i_sb,
648 "ES len assertion failed for inode %llu: "
649 "retval %d != map->m_len %d",
650 inode->i_ino, retval, map->m_len);
651 WARN_ON(1);
652 }
653
654 /*
655 * We have to zeroout blocks before inserting them into extent
656 * status tree. Otherwise someone could look them up there and
657 * use them before they are really zeroed. We also have to
658 * unmap metadata before zeroing as otherwise writeback can
659 * overwrite zeros with stale data from block device.
660 */
661 if (flags & EXT4_GET_BLOCKS_ZERO &&
662 map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) {
663 err = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk,
664 map->m_len);
665 if (err)
666 return err;
667 }
668
669 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
670 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
671 ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk,
672 status, flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE);
673 map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
674
675 return retval;
676 }
677
678 /*
679 * The ext4_map_blocks() function tries to look up the requested blocks,
680 * and returns if the blocks are already mapped.
681 *
682 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
683 * and store the allocated blocks in the result buffer head and mark it
684 * mapped.
685 *
686 * If file type is extents based, it will call ext4_ext_map_blocks(),
687 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
688 * based files
689 *
690 * On success, it returns the number of blocks being mapped or allocated.
691 * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are
692 * pre-allocated and unwritten, the resulting @map is marked as unwritten.
693 * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
694 *
695 * It returns 0 if plain look up failed (blocks have not been allocated), in
696 * that case, @map is returned as unmapped but we still do fill map->m_len to
697 * indicate the length of a hole starting at map->m_lblk.
698 *
699 * It returns the error in case of allocation failure.
700 */
ext4_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)701 int ext4_map_blocks(handle_t *handle, struct inode *inode,
702 struct ext4_map_blocks *map, int flags)
703 {
704 struct extent_status es;
705 int retval;
706 int ret = 0;
707 unsigned int orig_mlen = map->m_len;
708 #ifdef ES_AGGRESSIVE_TEST
709 struct ext4_map_blocks orig_map;
710
711 memcpy(&orig_map, map, sizeof(*map));
712 #endif
713
714 map->m_flags = 0;
715 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
716 flags, map->m_len, (unsigned long) map->m_lblk);
717
718 /*
719 * ext4_map_blocks returns an int, and m_len is an unsigned int
720 */
721 if (unlikely(map->m_len > INT_MAX))
722 map->m_len = INT_MAX;
723
724 /* We can handle the block number less than EXT_MAX_BLOCKS */
725 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
726 return -EFSCORRUPTED;
727
728 /*
729 * Callers from the context of data submission are the only exceptions
730 * for regular files that do not hold the i_rwsem or invalidate_lock.
731 * However, caching unrelated ranges is not permitted.
732 */
733 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
734 WARN_ON_ONCE(!(flags & EXT4_EX_NOCACHE));
735 else
736 ext4_check_map_extents_env(inode);
737
738 /* Lookup extent status tree firstly */
739 if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, &map->m_seq)) {
740 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
741 map->m_pblk = ext4_es_pblock(&es) +
742 map->m_lblk - es.es_lblk;
743 map->m_flags |= ext4_es_is_written(&es) ?
744 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
745 retval = es.es_len - (map->m_lblk - es.es_lblk);
746 if (retval > map->m_len)
747 retval = map->m_len;
748 map->m_len = retval;
749 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
750 map->m_pblk = 0;
751 map->m_flags |= ext4_es_is_delayed(&es) ?
752 EXT4_MAP_DELAYED : 0;
753 retval = es.es_len - (map->m_lblk - es.es_lblk);
754 if (retval > map->m_len)
755 retval = map->m_len;
756 map->m_len = retval;
757 retval = 0;
758 } else {
759 BUG();
760 }
761
762 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
763 return retval;
764 #ifdef ES_AGGRESSIVE_TEST
765 ext4_map_blocks_es_recheck(handle, inode, map,
766 &orig_map, flags);
767 #endif
768 if (!(flags & EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF) ||
769 orig_mlen == map->m_len)
770 goto found;
771
772 map->m_len = orig_mlen;
773 }
774 /*
775 * In the query cache no-wait mode, nothing we can do more if we
776 * cannot find extent in the cache.
777 */
778 if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
779 return 0;
780
781 /*
782 * Try to see if we can get the block without requesting a new
783 * file system block.
784 */
785 down_read(&EXT4_I(inode)->i_data_sem);
786 retval = ext4_map_query_blocks(handle, inode, map, flags);
787 up_read((&EXT4_I(inode)->i_data_sem));
788
789 found:
790 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
791 ret = check_block_validity(inode, map);
792 if (ret != 0)
793 return ret;
794 }
795
796 /* If it is only a block(s) look up */
797 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
798 return retval;
799
800 /*
801 * Returns if the blocks have already allocated
802 *
803 * Note that if blocks have been preallocated
804 * ext4_ext_map_blocks() returns with buffer head unmapped
805 */
806 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
807 /*
808 * If we need to convert extent to unwritten
809 * we continue and do the actual work in
810 * ext4_ext_map_blocks()
811 */
812 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
813 return retval;
814
815
816 ext4_fc_track_inode(handle, inode);
817 /*
818 * New blocks allocate and/or writing to unwritten extent
819 * will possibly result in updating i_data, so we take
820 * the write lock of i_data_sem, and call get_block()
821 * with create == 1 flag.
822 */
823 down_write(&EXT4_I(inode)->i_data_sem);
824 retval = ext4_map_create_blocks(handle, inode, map, flags);
825 up_write((&EXT4_I(inode)->i_data_sem));
826
827 if (retval < 0)
828 ext_debug(inode, "failed with err %d\n", retval);
829 if (retval <= 0)
830 return retval;
831
832 if (map->m_flags & EXT4_MAP_MAPPED) {
833 ret = check_block_validity(inode, map);
834 if (ret != 0)
835 return ret;
836
837 /*
838 * Inodes with freshly allocated blocks where contents will be
839 * visible after transaction commit must be on transaction's
840 * ordered data list.
841 */
842 if (map->m_flags & EXT4_MAP_NEW &&
843 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
844 !(flags & EXT4_GET_BLOCKS_ZERO) &&
845 !ext4_is_quota_file(inode) &&
846 ext4_should_order_data(inode)) {
847 loff_t start_byte = EXT4_LBLK_TO_B(inode, map->m_lblk);
848 loff_t length = EXT4_LBLK_TO_B(inode, map->m_len);
849
850 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
851 ret = ext4_jbd2_inode_add_wait(handle, inode,
852 start_byte, length);
853 else
854 ret = ext4_jbd2_inode_add_write(handle, inode,
855 start_byte, length);
856 if (ret)
857 return ret;
858 }
859 }
860 ext4_fc_track_range(handle, inode, map->m_lblk, map->m_lblk +
861 map->m_len - 1);
862 return retval;
863 }
864
865 /*
866 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
867 * we have to be careful as someone else may be manipulating b_state as well.
868 */
ext4_update_bh_state(struct buffer_head * bh,unsigned long flags)869 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
870 {
871 unsigned long old_state;
872 unsigned long new_state;
873
874 flags &= EXT4_MAP_FLAGS;
875
876 /* Dummy buffer_head? Set non-atomically. */
877 if (!bh->b_folio) {
878 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
879 return;
880 }
881 /*
882 * Someone else may be modifying b_state. Be careful! This is ugly but
883 * once we get rid of using bh as a container for mapping information
884 * to pass to / from get_block functions, this can go away.
885 */
886 old_state = READ_ONCE(bh->b_state);
887 do {
888 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
889 } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
890 }
891
892 /*
893 * Make sure that the current journal transaction has enough credits to map
894 * one extent. Return -EAGAIN if it cannot extend the current running
895 * transaction.
896 */
ext4_journal_ensure_extent_credits(handle_t * handle,struct inode * inode)897 static inline int ext4_journal_ensure_extent_credits(handle_t *handle,
898 struct inode *inode)
899 {
900 int credits;
901 int ret;
902
903 /* Called from ext4_da_write_begin() which has no handle started? */
904 if (!handle)
905 return 0;
906
907 credits = ext4_chunk_trans_blocks(inode, 1);
908 ret = __ext4_journal_ensure_credits(handle, credits, credits, 0);
909 return ret <= 0 ? ret : -EAGAIN;
910 }
911
_ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int flags)912 static int _ext4_get_block(struct inode *inode, sector_t iblock,
913 struct buffer_head *bh, int flags)
914 {
915 struct ext4_map_blocks map;
916 int ret = 0;
917
918 if (ext4_has_inline_data(inode))
919 return -ERANGE;
920
921 map.m_lblk = iblock;
922 map.m_len = bh->b_size >> inode->i_blkbits;
923
924 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
925 flags);
926 if (ret > 0) {
927 map_bh(bh, inode->i_sb, map.m_pblk);
928 ext4_update_bh_state(bh, map.m_flags);
929 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
930 ret = 0;
931 } else if (ret == 0) {
932 /* hole case, need to fill in bh->b_size */
933 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
934 }
935 return ret;
936 }
937
ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)938 int ext4_get_block(struct inode *inode, sector_t iblock,
939 struct buffer_head *bh, int create)
940 {
941 return _ext4_get_block(inode, iblock, bh,
942 create ? EXT4_GET_BLOCKS_CREATE : 0);
943 }
944
945 /*
946 * Get block function used when preparing for buffered write if we require
947 * creating an unwritten extent if blocks haven't been allocated. The extent
948 * will be converted to written after the IO is complete.
949 */
ext4_get_block_unwritten(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)950 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
951 struct buffer_head *bh_result, int create)
952 {
953 int ret = 0;
954
955 ext4_debug("ext4_get_block_unwritten: inode %llu, create flag %d\n",
956 inode->i_ino, create);
957 ret = _ext4_get_block(inode, iblock, bh_result,
958 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
959
960 /*
961 * If the buffer is marked unwritten, mark it as new to make sure it is
962 * zeroed out correctly in case of partial writes. Otherwise, there is
963 * a chance of stale data getting exposed.
964 */
965 if (ret == 0 && buffer_unwritten(bh_result))
966 set_buffer_new(bh_result);
967
968 return ret;
969 }
970
971 /* Maximum number of blocks we map for direct IO at once. */
972 #define DIO_MAX_BLOCKS 4096
973
974 /*
975 * `handle' can be NULL if create is zero
976 */
ext4_getblk(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)977 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
978 ext4_lblk_t block, int map_flags)
979 {
980 struct ext4_map_blocks map;
981 struct buffer_head *bh;
982 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
983 bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
984 int err;
985
986 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
987 || handle != NULL || create == 0);
988 ASSERT(create == 0 || !nowait);
989
990 map.m_lblk = block;
991 map.m_len = 1;
992 err = ext4_map_blocks(handle, inode, &map, map_flags);
993
994 if (err == 0)
995 return create ? ERR_PTR(-ENOSPC) : NULL;
996 if (err < 0)
997 return ERR_PTR(err);
998
999 if (nowait)
1000 return sb_find_get_block(inode->i_sb, map.m_pblk);
1001
1002 /*
1003 * Since bh could introduce extra ref count such as referred by
1004 * journal_head etc. Try to avoid using __GFP_MOVABLE here
1005 * as it may fail the migration when journal_head remains.
1006 */
1007 bh = getblk_unmovable(inode->i_sb->s_bdev, map.m_pblk,
1008 inode->i_sb->s_blocksize);
1009
1010 if (unlikely(!bh))
1011 return ERR_PTR(-ENOMEM);
1012 if (map.m_flags & EXT4_MAP_NEW) {
1013 ASSERT(create != 0);
1014 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
1015 || (handle != NULL));
1016
1017 /*
1018 * Now that we do not always journal data, we should
1019 * keep in mind whether this should always journal the
1020 * new buffer as metadata. For now, regular file
1021 * writes use ext4_get_block instead, so it's not a
1022 * problem.
1023 */
1024 lock_buffer(bh);
1025 BUFFER_TRACE(bh, "call get_create_access");
1026 err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1027 EXT4_JTR_NONE);
1028 if (unlikely(err)) {
1029 unlock_buffer(bh);
1030 goto errout;
1031 }
1032 if (!buffer_uptodate(bh)) {
1033 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1034 set_buffer_uptodate(bh);
1035 }
1036 unlock_buffer(bh);
1037 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1038 err = ext4_handle_dirty_metadata(handle, inode, bh);
1039 if (unlikely(err))
1040 goto errout;
1041 } else
1042 BUFFER_TRACE(bh, "not a new buffer");
1043 return bh;
1044 errout:
1045 brelse(bh);
1046 return ERR_PTR(err);
1047 }
1048
ext4_bread(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)1049 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1050 ext4_lblk_t block, int map_flags)
1051 {
1052 struct buffer_head *bh;
1053 int ret;
1054
1055 bh = ext4_getblk(handle, inode, block, map_flags);
1056 if (IS_ERR(bh))
1057 return bh;
1058 if (!bh || ext4_buffer_uptodate(bh))
1059 return bh;
1060
1061 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
1062 if (ret) {
1063 put_bh(bh);
1064 return ERR_PTR(ret);
1065 }
1066 return bh;
1067 }
1068
1069 /* Read a contiguous batch of blocks. */
ext4_bread_batch(struct inode * inode,ext4_lblk_t block,int bh_count,bool wait,struct buffer_head ** bhs)1070 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
1071 bool wait, struct buffer_head **bhs)
1072 {
1073 int i, err;
1074
1075 for (i = 0; i < bh_count; i++) {
1076 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
1077 if (IS_ERR(bhs[i])) {
1078 err = PTR_ERR(bhs[i]);
1079 bh_count = i;
1080 goto out_brelse;
1081 }
1082 }
1083
1084 for (i = 0; i < bh_count; i++)
1085 /* Note that NULL bhs[i] is valid because of holes. */
1086 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
1087 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
1088
1089 if (!wait)
1090 return 0;
1091
1092 for (i = 0; i < bh_count; i++)
1093 if (bhs[i])
1094 wait_on_buffer(bhs[i]);
1095
1096 for (i = 0; i < bh_count; i++) {
1097 if (bhs[i] && !buffer_uptodate(bhs[i])) {
1098 err = -EIO;
1099 goto out_brelse;
1100 }
1101 }
1102 return 0;
1103
1104 out_brelse:
1105 for (i = 0; i < bh_count; i++) {
1106 brelse(bhs[i]);
1107 bhs[i] = NULL;
1108 }
1109 return err;
1110 }
1111
ext4_walk_page_buffers(handle_t * handle,struct inode * inode,struct buffer_head * head,unsigned from,unsigned to,int * partial,int (* fn)(handle_t * handle,struct inode * inode,struct buffer_head * bh))1112 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
1113 struct buffer_head *head,
1114 unsigned from,
1115 unsigned to,
1116 int *partial,
1117 int (*fn)(handle_t *handle, struct inode *inode,
1118 struct buffer_head *bh))
1119 {
1120 struct buffer_head *bh;
1121 unsigned block_start, block_end;
1122 unsigned blocksize = head->b_size;
1123 int err, ret = 0;
1124 struct buffer_head *next;
1125
1126 for (bh = head, block_start = 0;
1127 ret == 0 && (bh != head || !block_start);
1128 block_start = block_end, bh = next) {
1129 next = bh->b_this_page;
1130 block_end = block_start + blocksize;
1131 if (block_end <= from || block_start >= to) {
1132 if (partial && !buffer_uptodate(bh))
1133 *partial = 1;
1134 continue;
1135 }
1136 err = (*fn)(handle, inode, bh);
1137 if (!ret)
1138 ret = err;
1139 }
1140 return ret;
1141 }
1142
1143 /*
1144 * Helper for handling dirtying of journalled data. We also mark the folio as
1145 * dirty so that writeback code knows about this page (and inode) contains
1146 * dirty data. ext4_writepages() then commits appropriate transaction to
1147 * make data stable.
1148 */
ext4_dirty_journalled_data(handle_t * handle,struct buffer_head * bh)1149 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
1150 {
1151 struct folio *folio = bh->b_folio;
1152 struct inode *inode = folio->mapping->host;
1153
1154 /* only regular files have a_ops */
1155 if (S_ISREG(inode->i_mode))
1156 folio_mark_dirty(folio);
1157 return ext4_handle_dirty_metadata(handle, NULL, bh);
1158 }
1159
do_journal_get_write_access(handle_t * handle,struct inode * inode,struct buffer_head * bh)1160 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
1161 struct buffer_head *bh)
1162 {
1163 if (!buffer_mapped(bh) || buffer_freed(bh))
1164 return 0;
1165 BUFFER_TRACE(bh, "get write access");
1166 return ext4_journal_get_write_access(handle, inode->i_sb, bh,
1167 EXT4_JTR_NONE);
1168 }
1169
ext4_block_write_begin(handle_t * handle,struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block)1170 int ext4_block_write_begin(handle_t *handle, struct folio *folio,
1171 loff_t pos, unsigned len,
1172 get_block_t *get_block)
1173 {
1174 unsigned int from = offset_in_folio(folio, pos);
1175 unsigned to = from + len;
1176 struct inode *inode = folio->mapping->host;
1177 unsigned block_start, block_end;
1178 sector_t block;
1179 int err = 0;
1180 unsigned int blocksize = i_blocksize(inode);
1181 struct buffer_head *bh, *head, *wait[2];
1182 int nr_wait = 0;
1183 int i;
1184 bool should_journal_data = ext4_should_journal_data(inode);
1185
1186 BUG_ON(!folio_test_locked(folio));
1187 BUG_ON(to > folio_size(folio));
1188 BUG_ON(from > to);
1189 WARN_ON_ONCE(blocksize > folio_size(folio));
1190
1191 head = folio_buffers(folio);
1192 if (!head)
1193 head = create_empty_buffers(folio, blocksize, 0);
1194 block = EXT4_PG_TO_LBLK(inode, folio->index);
1195
1196 for (bh = head, block_start = 0; bh != head || !block_start;
1197 block++, block_start = block_end, bh = bh->b_this_page) {
1198 block_end = block_start + blocksize;
1199 if (block_end <= from || block_start >= to) {
1200 if (folio_test_uptodate(folio)) {
1201 set_buffer_uptodate(bh);
1202 }
1203 continue;
1204 }
1205 if (WARN_ON_ONCE(buffer_new(bh)))
1206 clear_buffer_new(bh);
1207 if (!buffer_mapped(bh)) {
1208 WARN_ON(bh->b_size != blocksize);
1209 err = ext4_journal_ensure_extent_credits(handle, inode);
1210 if (!err)
1211 err = get_block(inode, block, bh, 1);
1212 if (err)
1213 break;
1214 if (buffer_new(bh)) {
1215 /*
1216 * We may be zeroing partial buffers or all new
1217 * buffers in case of failure. Prepare JBD2 for
1218 * that.
1219 */
1220 if (should_journal_data)
1221 do_journal_get_write_access(handle,
1222 inode, bh);
1223 if (folio_test_uptodate(folio)) {
1224 /*
1225 * Unlike __block_write_begin() we leave
1226 * dirtying of new uptodate buffers to
1227 * ->write_end() time or
1228 * folio_zero_new_buffers().
1229 */
1230 set_buffer_uptodate(bh);
1231 continue;
1232 }
1233 if (block_end > to || block_start < from)
1234 folio_zero_segments(folio, to,
1235 block_end,
1236 block_start, from);
1237 continue;
1238 }
1239 }
1240 if (folio_test_uptodate(folio)) {
1241 set_buffer_uptodate(bh);
1242 continue;
1243 }
1244 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1245 !buffer_unwritten(bh) &&
1246 (block_start < from || block_end > to)) {
1247 ext4_read_bh_lock(bh, 0, false);
1248 wait[nr_wait++] = bh;
1249 }
1250 }
1251 /*
1252 * If we issued read requests, let them complete.
1253 */
1254 for (i = 0; i < nr_wait; i++) {
1255 wait_on_buffer(wait[i]);
1256 if (!buffer_uptodate(wait[i]))
1257 err = -EIO;
1258 }
1259 if (unlikely(err)) {
1260 if (should_journal_data)
1261 ext4_journalled_zero_new_buffers(handle, inode, folio,
1262 from, to);
1263 else
1264 folio_zero_new_buffers(folio, from, to);
1265 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1266 for (i = 0; i < nr_wait; i++) {
1267 int err2;
1268
1269 err2 = fscrypt_decrypt_pagecache_blocks(folio,
1270 blocksize, bh_offset(wait[i]));
1271 if (err2) {
1272 clear_buffer_uptodate(wait[i]);
1273 err = err2;
1274 }
1275 }
1276 }
1277
1278 return err;
1279 }
1280
1281 /*
1282 * To preserve ordering, it is essential that the hole instantiation and
1283 * the data write be encapsulated in a single transaction. We cannot
1284 * close off a transaction and start a new one between the ext4_get_block()
1285 * and the ext4_write_end(). So doing the jbd2_journal_start at the start of
1286 * ext4_write_begin() is the right place.
1287 */
ext4_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)1288 static int ext4_write_begin(const struct kiocb *iocb,
1289 struct address_space *mapping,
1290 loff_t pos, unsigned len,
1291 struct folio **foliop, void **fsdata)
1292 {
1293 struct inode *inode = mapping->host;
1294 int ret, needed_blocks;
1295 handle_t *handle;
1296 int retries = 0;
1297 struct folio *folio;
1298 pgoff_t index;
1299 unsigned from, to;
1300
1301 ret = ext4_emergency_state(inode->i_sb);
1302 if (unlikely(ret))
1303 return ret;
1304
1305 trace_ext4_write_begin(inode, pos, len);
1306 /*
1307 * Reserve one block more for addition to orphan list in case
1308 * we allocate blocks but write fails for some reason
1309 */
1310 needed_blocks = ext4_chunk_trans_extent(inode,
1311 ext4_journal_blocks_per_folio(inode)) + 1;
1312 index = pos >> PAGE_SHIFT;
1313
1314 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1315 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1316 foliop);
1317 if (ret < 0)
1318 return ret;
1319 if (ret == 1)
1320 return 0;
1321 }
1322
1323 /*
1324 * write_begin_get_folio() can take a long time if the
1325 * system is thrashing due to memory pressure, or if the folio
1326 * is being written back. So grab it first before we start
1327 * the transaction handle. This also allows us to allocate
1328 * the folio (if needed) without using GFP_NOFS.
1329 */
1330 retry_grab:
1331 folio = write_begin_get_folio(iocb, mapping, index, len);
1332 if (IS_ERR(folio))
1333 return PTR_ERR(folio);
1334
1335 if (len > folio_next_pos(folio) - pos)
1336 len = folio_next_pos(folio) - pos;
1337
1338 from = offset_in_folio(folio, pos);
1339 to = from + len;
1340
1341 /*
1342 * The same as page allocation, we prealloc buffer heads before
1343 * starting the handle.
1344 */
1345 if (!folio_buffers(folio))
1346 create_empty_buffers(folio, inode->i_sb->s_blocksize, 0);
1347
1348 folio_unlock(folio);
1349
1350 retry_journal:
1351 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1352 if (IS_ERR(handle)) {
1353 folio_put(folio);
1354 return PTR_ERR(handle);
1355 }
1356
1357 folio_lock(folio);
1358 if (folio->mapping != mapping) {
1359 /* The folio got truncated from under us */
1360 folio_unlock(folio);
1361 folio_put(folio);
1362 ext4_journal_stop(handle);
1363 goto retry_grab;
1364 }
1365 /* In case writeback began while the folio was unlocked */
1366 folio_wait_stable(folio);
1367
1368 if (ext4_should_dioread_nolock(inode))
1369 ret = ext4_block_write_begin(handle, folio, pos, len,
1370 ext4_get_block_unwritten);
1371 else
1372 ret = ext4_block_write_begin(handle, folio, pos, len,
1373 ext4_get_block);
1374 if (!ret && ext4_should_journal_data(inode)) {
1375 ret = ext4_walk_page_buffers(handle, inode,
1376 folio_buffers(folio), from, to,
1377 NULL, do_journal_get_write_access);
1378 }
1379
1380 if (ret) {
1381 bool extended = (pos + len > inode->i_size) &&
1382 !ext4_verity_in_progress(inode);
1383
1384 folio_unlock(folio);
1385 /*
1386 * ext4_block_write_begin may have instantiated a few blocks
1387 * outside i_size. Trim these off again. Don't need
1388 * i_size_read because we hold i_rwsem.
1389 *
1390 * Add inode to orphan list in case we crash before
1391 * truncate finishes
1392 */
1393 if (extended && ext4_can_truncate(inode))
1394 ext4_orphan_add(handle, inode);
1395
1396 ext4_journal_stop(handle);
1397 if (extended) {
1398 ext4_truncate_failed_write(inode);
1399 /*
1400 * If truncate failed early the inode might
1401 * still be on the orphan list; we need to
1402 * make sure the inode is removed from the
1403 * orphan list in that case.
1404 */
1405 if (inode->i_nlink)
1406 ext4_orphan_del(NULL, inode);
1407 }
1408
1409 if (ret == -EAGAIN ||
1410 (ret == -ENOSPC &&
1411 ext4_should_retry_alloc(inode->i_sb, &retries)))
1412 goto retry_journal;
1413 folio_put(folio);
1414 return ret;
1415 }
1416 *foliop = folio;
1417 return ret;
1418 }
1419
1420 /* For write_end() in data=journal mode */
write_end_fn(handle_t * handle,struct inode * inode,struct buffer_head * bh)1421 static int write_end_fn(handle_t *handle, struct inode *inode,
1422 struct buffer_head *bh)
1423 {
1424 int ret;
1425 if (!buffer_mapped(bh) || buffer_freed(bh))
1426 return 0;
1427 set_buffer_uptodate(bh);
1428 ret = ext4_dirty_journalled_data(handle, bh);
1429 clear_buffer_meta(bh);
1430 clear_buffer_prio(bh);
1431 clear_buffer_new(bh);
1432 return ret;
1433 }
1434
1435 /*
1436 * We need to pick up the new inode size which generic_commit_write gave us
1437 * `iocb` can be NULL - eg, when called from page_symlink().
1438 */
ext4_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)1439 static int ext4_write_end(const struct kiocb *iocb,
1440 struct address_space *mapping,
1441 loff_t pos, unsigned len, unsigned copied,
1442 struct folio *folio, void *fsdata)
1443 {
1444 handle_t *handle = ext4_journal_current_handle();
1445 struct inode *inode = mapping->host;
1446 loff_t old_size = inode->i_size;
1447 int ret = 0, ret2;
1448 int i_size_changed = 0;
1449 bool verity = ext4_verity_in_progress(inode);
1450
1451 trace_ext4_write_end(inode, pos, len, copied);
1452
1453 if (ext4_has_inline_data(inode) &&
1454 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1455 return ext4_write_inline_data_end(inode, pos, len, copied,
1456 folio);
1457
1458 copied = block_write_end(pos, len, copied, folio);
1459 /*
1460 * it's important to update i_size while still holding folio lock:
1461 * page writeout could otherwise come in and zero beyond i_size.
1462 *
1463 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1464 * blocks are being written past EOF, so skip the i_size update.
1465 */
1466 if (!verity)
1467 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1468 folio_unlock(folio);
1469 folio_put(folio);
1470
1471 if (old_size < pos && !verity)
1472 pagecache_isize_extended(inode, old_size, pos);
1473
1474 /*
1475 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1476 * makes the holding time of folio lock longer. Second, it forces lock
1477 * ordering of folio lock and transaction start for journaling
1478 * filesystems.
1479 */
1480 if (i_size_changed)
1481 ret = ext4_mark_inode_dirty(handle, inode);
1482
1483 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1484 /* if we have allocated more blocks and copied
1485 * less. We will have blocks allocated outside
1486 * inode->i_size. So truncate them
1487 */
1488 ext4_orphan_add(handle, inode);
1489
1490 ret2 = ext4_journal_stop(handle);
1491 if (!ret)
1492 ret = ret2;
1493
1494 if (pos + len > inode->i_size && !verity) {
1495 ext4_truncate_failed_write(inode);
1496 /*
1497 * If truncate failed early the inode might still be
1498 * on the orphan list; we need to make sure the inode
1499 * is removed from the orphan list in that case.
1500 */
1501 if (inode->i_nlink)
1502 ext4_orphan_del(NULL, inode);
1503 }
1504
1505 return ret ? ret : copied;
1506 }
1507
1508 /*
1509 * This is a private version of folio_zero_new_buffers() which doesn't
1510 * set the buffer to be dirty, since in data=journalled mode we need
1511 * to call ext4_dirty_journalled_data() instead.
1512 */
ext4_journalled_zero_new_buffers(handle_t * handle,struct inode * inode,struct folio * folio,unsigned from,unsigned to)1513 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1514 struct inode *inode,
1515 struct folio *folio,
1516 unsigned from, unsigned to)
1517 {
1518 unsigned int block_start = 0, block_end;
1519 struct buffer_head *head, *bh;
1520
1521 bh = head = folio_buffers(folio);
1522 do {
1523 block_end = block_start + bh->b_size;
1524 if (buffer_new(bh)) {
1525 if (block_end > from && block_start < to) {
1526 if (!folio_test_uptodate(folio)) {
1527 unsigned start, size;
1528
1529 start = max(from, block_start);
1530 size = min(to, block_end) - start;
1531
1532 folio_zero_range(folio, start, size);
1533 }
1534 clear_buffer_new(bh);
1535 write_end_fn(handle, inode, bh);
1536 }
1537 }
1538 block_start = block_end;
1539 bh = bh->b_this_page;
1540 } while (bh != head);
1541 }
1542
ext4_journalled_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)1543 static int ext4_journalled_write_end(const struct kiocb *iocb,
1544 struct address_space *mapping,
1545 loff_t pos, unsigned len, unsigned copied,
1546 struct folio *folio, void *fsdata)
1547 {
1548 handle_t *handle = ext4_journal_current_handle();
1549 struct inode *inode = mapping->host;
1550 loff_t old_size = inode->i_size;
1551 int ret = 0, ret2;
1552 int partial = 0;
1553 unsigned from, to;
1554 int size_changed = 0;
1555 bool verity = ext4_verity_in_progress(inode);
1556
1557 trace_ext4_journalled_write_end(inode, pos, len, copied);
1558 from = pos & (PAGE_SIZE - 1);
1559 to = from + len;
1560
1561 BUG_ON(!ext4_handle_valid(handle));
1562
1563 if (ext4_has_inline_data(inode))
1564 return ext4_write_inline_data_end(inode, pos, len, copied,
1565 folio);
1566
1567 if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1568 copied = 0;
1569 ext4_journalled_zero_new_buffers(handle, inode, folio,
1570 from, to);
1571 } else {
1572 if (unlikely(copied < len))
1573 ext4_journalled_zero_new_buffers(handle, inode, folio,
1574 from + copied, to);
1575 ret = ext4_walk_page_buffers(handle, inode,
1576 folio_buffers(folio),
1577 from, from + copied, &partial,
1578 write_end_fn);
1579 if (!partial)
1580 folio_mark_uptodate(folio);
1581 }
1582 if (!verity)
1583 size_changed = ext4_update_inode_size(inode, pos + copied);
1584 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1585 folio_unlock(folio);
1586 folio_put(folio);
1587
1588 if (old_size < pos && !verity)
1589 pagecache_isize_extended(inode, old_size, pos);
1590
1591 if (size_changed) {
1592 ret2 = ext4_mark_inode_dirty(handle, inode);
1593 if (!ret)
1594 ret = ret2;
1595 }
1596
1597 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1598 /* if we have allocated more blocks and copied
1599 * less. We will have blocks allocated outside
1600 * inode->i_size. So truncate them
1601 */
1602 ext4_orphan_add(handle, inode);
1603
1604 ret2 = ext4_journal_stop(handle);
1605 if (!ret)
1606 ret = ret2;
1607 if (pos + len > inode->i_size && !verity) {
1608 ext4_truncate_failed_write(inode);
1609 /*
1610 * If truncate failed early the inode might still be
1611 * on the orphan list; we need to make sure the inode
1612 * is removed from the orphan list in that case.
1613 */
1614 if (inode->i_nlink)
1615 ext4_orphan_del(NULL, inode);
1616 }
1617
1618 return ret ? ret : copied;
1619 }
1620
1621 /*
1622 * Reserve space for 'nr_resv' clusters
1623 */
ext4_da_reserve_space(struct inode * inode,int nr_resv)1624 static int ext4_da_reserve_space(struct inode *inode, int nr_resv)
1625 {
1626 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1627 struct ext4_inode_info *ei = EXT4_I(inode);
1628 int ret;
1629
1630 /*
1631 * We will charge metadata quota at writeout time; this saves
1632 * us from metadata over-estimation, though we may go over by
1633 * a small amount in the end. Here we just reserve for data.
1634 */
1635 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, nr_resv));
1636 if (ret)
1637 return ret;
1638
1639 spin_lock(&ei->i_block_reservation_lock);
1640 if (ext4_claim_free_clusters(sbi, nr_resv, 0)) {
1641 spin_unlock(&ei->i_block_reservation_lock);
1642 dquot_release_reservation_block(inode, EXT4_C2B(sbi, nr_resv));
1643 return -ENOSPC;
1644 }
1645 ei->i_reserved_data_blocks += nr_resv;
1646 trace_ext4_da_reserve_space(inode, nr_resv);
1647 spin_unlock(&ei->i_block_reservation_lock);
1648
1649 return 0; /* success */
1650 }
1651
ext4_da_release_space(struct inode * inode,int to_free)1652 void ext4_da_release_space(struct inode *inode, int to_free)
1653 {
1654 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1655 struct ext4_inode_info *ei = EXT4_I(inode);
1656
1657 if (!to_free)
1658 return; /* Nothing to release, exit */
1659
1660 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1661
1662 trace_ext4_da_release_space(inode, to_free);
1663 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1664 /*
1665 * if there aren't enough reserved blocks, then the
1666 * counter is messed up somewhere. Since this
1667 * function is called from invalidate page, it's
1668 * harmless to return without any action.
1669 */
1670 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1671 "ino %llu, to_free %d with only %d reserved "
1672 "data blocks", inode->i_ino, to_free,
1673 ei->i_reserved_data_blocks);
1674 WARN_ON(1);
1675 to_free = ei->i_reserved_data_blocks;
1676 }
1677 ei->i_reserved_data_blocks -= to_free;
1678
1679 /* update fs dirty data blocks counter */
1680 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1681
1682 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1683
1684 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1685 }
1686
1687 /*
1688 * Delayed allocation stuff
1689 */
1690
1691 struct mpage_da_data {
1692 /* These are input fields for ext4_do_writepages() */
1693 struct inode *inode;
1694 struct writeback_control *wbc;
1695 unsigned int can_map:1; /* Can writepages call map blocks? */
1696
1697 /* These are internal state of ext4_do_writepages() */
1698 loff_t start_pos; /* The start pos to write */
1699 loff_t next_pos; /* Current pos to examine */
1700 loff_t end_pos; /* Last pos to examine */
1701
1702 /*
1703 * Extent to map - this can be after start_pos because that can be
1704 * fully mapped. We somewhat abuse m_flags to store whether the extent
1705 * is delalloc or unwritten.
1706 */
1707 struct ext4_map_blocks map;
1708 struct ext4_io_submit io_submit; /* IO submission data */
1709 unsigned int do_map:1;
1710 unsigned int scanned_until_end:1;
1711 unsigned int journalled_more_data:1;
1712 };
1713
mpage_release_unused_pages(struct mpage_da_data * mpd,bool invalidate)1714 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1715 bool invalidate)
1716 {
1717 unsigned nr, i;
1718 pgoff_t index, end;
1719 struct folio_batch fbatch;
1720 struct inode *inode = mpd->inode;
1721 struct address_space *mapping = inode->i_mapping;
1722
1723 /* This is necessary when next_pos == 0. */
1724 if (mpd->start_pos >= mpd->next_pos)
1725 return;
1726
1727 mpd->scanned_until_end = 0;
1728 if (invalidate) {
1729 ext4_lblk_t start, last;
1730 start = EXT4_B_TO_LBLK(inode, mpd->start_pos);
1731 last = mpd->next_pos >> inode->i_blkbits;
1732
1733 /*
1734 * avoid racing with extent status tree scans made by
1735 * ext4_insert_delayed_block()
1736 */
1737 down_write(&EXT4_I(inode)->i_data_sem);
1738 ext4_es_remove_extent(inode, start, last - start);
1739 up_write(&EXT4_I(inode)->i_data_sem);
1740 }
1741
1742 folio_batch_init(&fbatch);
1743 index = mpd->start_pos >> PAGE_SHIFT;
1744 end = mpd->next_pos >> PAGE_SHIFT;
1745 while (index < end) {
1746 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1747 if (nr == 0)
1748 break;
1749 for (i = 0; i < nr; i++) {
1750 struct folio *folio = fbatch.folios[i];
1751
1752 if (folio_pos(folio) < mpd->start_pos)
1753 continue;
1754 if (folio_next_index(folio) > end)
1755 continue;
1756 BUG_ON(!folio_test_locked(folio));
1757 BUG_ON(folio_test_writeback(folio));
1758 if (invalidate) {
1759 if (folio_mapped(folio)) {
1760 folio_clear_dirty_for_io(folio);
1761 /*
1762 * Unmap folio from page
1763 * tables to prevent
1764 * subsequent accesses through
1765 * stale PTEs. This ensures
1766 * future accesses trigger new
1767 * page faults rather than
1768 * reusing the invalidated
1769 * folio.
1770 */
1771 unmap_mapping_pages(folio->mapping,
1772 folio->index,
1773 folio_nr_pages(folio), false);
1774 }
1775 block_invalidate_folio(folio, 0,
1776 folio_size(folio));
1777 folio_clear_uptodate(folio);
1778 }
1779 folio_unlock(folio);
1780 }
1781 folio_batch_release(&fbatch);
1782 }
1783 }
1784
ext4_print_free_blocks(struct inode * inode)1785 static void ext4_print_free_blocks(struct inode *inode)
1786 {
1787 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1788 struct super_block *sb = inode->i_sb;
1789 struct ext4_inode_info *ei = EXT4_I(inode);
1790
1791 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1792 EXT4_C2B(EXT4_SB(inode->i_sb),
1793 ext4_count_free_clusters(sb)));
1794 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1795 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1796 (long long) EXT4_C2B(EXT4_SB(sb),
1797 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1798 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1799 (long long) EXT4_C2B(EXT4_SB(sb),
1800 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1801 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1802 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1803 ei->i_reserved_data_blocks);
1804 return;
1805 }
1806
1807 /*
1808 * Check whether the cluster containing lblk has been allocated or has
1809 * delalloc reservation.
1810 *
1811 * Returns 0 if the cluster doesn't have either, 1 if it has delalloc
1812 * reservation, 2 if it's already been allocated, negative error code on
1813 * failure.
1814 */
ext4_clu_alloc_state(struct inode * inode,ext4_lblk_t lblk)1815 static int ext4_clu_alloc_state(struct inode *inode, ext4_lblk_t lblk)
1816 {
1817 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1818 int ret;
1819
1820 /* Has delalloc reservation? */
1821 if (ext4_es_scan_clu(inode, &ext4_es_is_delayed, lblk))
1822 return 1;
1823
1824 /* Already been allocated? */
1825 if (ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk))
1826 return 2;
1827 ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk));
1828 if (ret < 0)
1829 return ret;
1830 if (ret > 0)
1831 return 2;
1832
1833 return 0;
1834 }
1835
1836 /*
1837 * ext4_insert_delayed_blocks - adds a multiple delayed blocks to the extents
1838 * status tree, incrementing the reserved
1839 * cluster/block count or making pending
1840 * reservations where needed
1841 *
1842 * @inode - file containing the newly added block
1843 * @lblk - start logical block to be added
1844 * @len - length of blocks to be added
1845 *
1846 * Returns 0 on success, negative error code on failure.
1847 */
ext4_insert_delayed_blocks(struct inode * inode,ext4_lblk_t lblk,ext4_lblk_t len)1848 static int ext4_insert_delayed_blocks(struct inode *inode, ext4_lblk_t lblk,
1849 ext4_lblk_t len)
1850 {
1851 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1852 int ret;
1853 bool lclu_allocated = false;
1854 bool end_allocated = false;
1855 ext4_lblk_t resv_clu;
1856 ext4_lblk_t end = lblk + len - 1;
1857
1858 /*
1859 * If the cluster containing lblk or end is shared with a delayed,
1860 * written, or unwritten extent in a bigalloc file system, it's
1861 * already been accounted for and does not need to be reserved.
1862 * A pending reservation must be made for the cluster if it's
1863 * shared with a written or unwritten extent and doesn't already
1864 * have one. Written and unwritten extents can be purged from the
1865 * extents status tree if the system is under memory pressure, so
1866 * it's necessary to examine the extent tree if a search of the
1867 * extents status tree doesn't get a match.
1868 */
1869 if (sbi->s_cluster_ratio == 1) {
1870 ret = ext4_da_reserve_space(inode, len);
1871 if (ret != 0) /* ENOSPC */
1872 return ret;
1873 } else { /* bigalloc */
1874 resv_clu = EXT4_B2C(sbi, end) - EXT4_B2C(sbi, lblk) + 1;
1875
1876 ret = ext4_clu_alloc_state(inode, lblk);
1877 if (ret < 0)
1878 return ret;
1879 if (ret > 0) {
1880 resv_clu--;
1881 lclu_allocated = (ret == 2);
1882 }
1883
1884 if (EXT4_B2C(sbi, lblk) != EXT4_B2C(sbi, end)) {
1885 ret = ext4_clu_alloc_state(inode, end);
1886 if (ret < 0)
1887 return ret;
1888 if (ret > 0) {
1889 resv_clu--;
1890 end_allocated = (ret == 2);
1891 }
1892 }
1893
1894 if (resv_clu) {
1895 ret = ext4_da_reserve_space(inode, resv_clu);
1896 if (ret != 0) /* ENOSPC */
1897 return ret;
1898 }
1899 }
1900
1901 ext4_es_insert_delayed_extent(inode, lblk, len, lclu_allocated,
1902 end_allocated);
1903 return 0;
1904 }
1905
1906 /*
1907 * Looks up the requested blocks and sets the delalloc extent map.
1908 * First try to look up for the extent entry that contains the requested
1909 * blocks in the extent status tree without i_data_sem, then try to look
1910 * up for the ondisk extent mapping with i_data_sem in read mode,
1911 * finally hold i_data_sem in write mode, looks up again and add a
1912 * delalloc extent entry if it still couldn't find any extent. Pass out
1913 * the mapped extent through @map and return 0 on success.
1914 */
ext4_da_map_blocks(struct inode * inode,struct ext4_map_blocks * map)1915 static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map)
1916 {
1917 struct extent_status es;
1918 int retval;
1919 #ifdef ES_AGGRESSIVE_TEST
1920 struct ext4_map_blocks orig_map;
1921
1922 memcpy(&orig_map, map, sizeof(*map));
1923 #endif
1924
1925 map->m_flags = 0;
1926 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1927 (unsigned long) map->m_lblk);
1928
1929 ext4_check_map_extents_env(inode);
1930
1931 /* Lookup extent status tree firstly */
1932 if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
1933 map->m_len = min_t(unsigned int, map->m_len,
1934 es.es_len - (map->m_lblk - es.es_lblk));
1935
1936 if (ext4_es_is_hole(&es))
1937 goto add_delayed;
1938
1939 found:
1940 /*
1941 * Delayed extent could be allocated by fallocate.
1942 * So we need to check it.
1943 */
1944 if (ext4_es_is_delayed(&es)) {
1945 map->m_flags |= EXT4_MAP_DELAYED;
1946 return 0;
1947 }
1948
1949 map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk;
1950 if (ext4_es_is_written(&es))
1951 map->m_flags |= EXT4_MAP_MAPPED;
1952 else if (ext4_es_is_unwritten(&es))
1953 map->m_flags |= EXT4_MAP_UNWRITTEN;
1954 else
1955 BUG();
1956
1957 #ifdef ES_AGGRESSIVE_TEST
1958 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1959 #endif
1960 return 0;
1961 }
1962
1963 /*
1964 * Try to see if we can get the block without requesting a new
1965 * file system block.
1966 */
1967 down_read(&EXT4_I(inode)->i_data_sem);
1968 if (ext4_has_inline_data(inode))
1969 retval = 0;
1970 else
1971 retval = ext4_map_query_blocks(NULL, inode, map, 0);
1972 up_read(&EXT4_I(inode)->i_data_sem);
1973 if (retval)
1974 return retval < 0 ? retval : 0;
1975
1976 add_delayed:
1977 down_write(&EXT4_I(inode)->i_data_sem);
1978 /*
1979 * Page fault path (ext4_page_mkwrite does not take i_rwsem)
1980 * and fallocate path (no folio lock) can race. Make sure we
1981 * lookup the extent status tree here again while i_data_sem
1982 * is held in write mode, before inserting a new da entry in
1983 * the extent status tree.
1984 */
1985 if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es, NULL)) {
1986 map->m_len = min_t(unsigned int, map->m_len,
1987 es.es_len - (map->m_lblk - es.es_lblk));
1988
1989 if (!ext4_es_is_hole(&es)) {
1990 up_write(&EXT4_I(inode)->i_data_sem);
1991 goto found;
1992 }
1993 } else if (!ext4_has_inline_data(inode)) {
1994 retval = ext4_map_query_blocks(NULL, inode, map, 0);
1995 if (retval) {
1996 up_write(&EXT4_I(inode)->i_data_sem);
1997 return retval < 0 ? retval : 0;
1998 }
1999 }
2000
2001 map->m_flags |= EXT4_MAP_DELAYED;
2002 retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len);
2003 if (!retval)
2004 map->m_seq = READ_ONCE(EXT4_I(inode)->i_es_seq);
2005 up_write(&EXT4_I(inode)->i_data_sem);
2006
2007 return retval;
2008 }
2009
2010 /*
2011 * This is a special get_block_t callback which is used by
2012 * ext4_da_write_begin(). It will either return mapped block or
2013 * reserve space for a single block.
2014 *
2015 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2016 * We also have b_blocknr = -1 and b_bdev initialized properly
2017 *
2018 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2019 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2020 * initialized properly.
2021 */
ext4_da_get_block_prep(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)2022 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2023 struct buffer_head *bh, int create)
2024 {
2025 struct ext4_map_blocks map;
2026 sector_t invalid_block = ~((sector_t) 0xffff);
2027 int ret = 0;
2028
2029 BUG_ON(create == 0);
2030 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2031
2032 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2033 invalid_block = ~0;
2034
2035 map.m_lblk = iblock;
2036 map.m_len = 1;
2037
2038 /*
2039 * first, we need to know whether the block is allocated already
2040 * preallocated blocks are unmapped but should treated
2041 * the same as allocated blocks.
2042 */
2043 ret = ext4_da_map_blocks(inode, &map);
2044 if (ret < 0)
2045 return ret;
2046
2047 if (map.m_flags & EXT4_MAP_DELAYED) {
2048 map_bh(bh, inode->i_sb, invalid_block);
2049 set_buffer_new(bh);
2050 set_buffer_delay(bh);
2051 return 0;
2052 }
2053
2054 map_bh(bh, inode->i_sb, map.m_pblk);
2055 ext4_update_bh_state(bh, map.m_flags);
2056
2057 if (buffer_unwritten(bh)) {
2058 /* A delayed write to unwritten bh should be marked
2059 * new and mapped. Mapped ensures that we don't do
2060 * get_block multiple times when we write to the same
2061 * offset and new ensures that we do proper zero out
2062 * for partial write.
2063 */
2064 set_buffer_new(bh);
2065 set_buffer_mapped(bh);
2066 }
2067 return 0;
2068 }
2069
mpage_folio_done(struct mpage_da_data * mpd,struct folio * folio)2070 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
2071 {
2072 mpd->start_pos += folio_size(folio);
2073 mpd->wbc->nr_to_write -= folio_nr_pages(folio);
2074 folio_unlock(folio);
2075 }
2076
mpage_submit_folio(struct mpage_da_data * mpd,struct folio * folio)2077 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
2078 {
2079 size_t len;
2080 loff_t size;
2081 int err;
2082
2083 WARN_ON_ONCE(folio_pos(folio) != mpd->start_pos);
2084 folio_clear_dirty_for_io(folio);
2085 /*
2086 * We have to be very careful here! Nothing protects writeback path
2087 * against i_size changes and the page can be writeably mapped into
2088 * page tables. So an application can be growing i_size and writing
2089 * data through mmap while writeback runs. folio_clear_dirty_for_io()
2090 * write-protects our page in page tables and the page cannot get
2091 * written to again until we release folio lock. So only after
2092 * folio_clear_dirty_for_io() we are safe to sample i_size for
2093 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
2094 * on the barrier provided by folio_test_clear_dirty() in
2095 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
2096 * after page tables are updated.
2097 */
2098 size = i_size_read(mpd->inode);
2099 len = folio_size(folio);
2100 if (folio_pos(folio) + len > size &&
2101 !ext4_verity_in_progress(mpd->inode))
2102 len = size & (len - 1);
2103 err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
2104
2105 return err;
2106 }
2107
2108 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
2109
2110 /*
2111 * mballoc gives us at most this number of blocks...
2112 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2113 * The rest of mballoc seems to handle chunks up to full group size.
2114 */
2115 #define MAX_WRITEPAGES_EXTENT_LEN 2048
2116
2117 /*
2118 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2119 *
2120 * @mpd - extent of blocks
2121 * @lblk - logical number of the block in the file
2122 * @bh - buffer head we want to add to the extent
2123 *
2124 * The function is used to collect contig. blocks in the same state. If the
2125 * buffer doesn't require mapping for writeback and we haven't started the
2126 * extent of buffers to map yet, the function returns 'true' immediately - the
2127 * caller can write the buffer right away. Otherwise the function returns true
2128 * if the block has been added to the extent, false if the block couldn't be
2129 * added.
2130 */
mpage_add_bh_to_extent(struct mpage_da_data * mpd,ext4_lblk_t lblk,struct buffer_head * bh)2131 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2132 struct buffer_head *bh)
2133 {
2134 struct ext4_map_blocks *map = &mpd->map;
2135
2136 /* Buffer that doesn't need mapping for writeback? */
2137 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2138 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2139 /* So far no extent to map => we write the buffer right away */
2140 if (map->m_len == 0)
2141 return true;
2142 return false;
2143 }
2144
2145 /* First block in the extent? */
2146 if (map->m_len == 0) {
2147 /* We cannot map unless handle is started... */
2148 if (!mpd->do_map)
2149 return false;
2150 map->m_lblk = lblk;
2151 map->m_len = 1;
2152 map->m_flags = bh->b_state & BH_FLAGS;
2153 return true;
2154 }
2155
2156 /* Don't go larger than mballoc is willing to allocate */
2157 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2158 return false;
2159
2160 /* Can we merge the block to our big extent? */
2161 if (lblk == map->m_lblk + map->m_len &&
2162 (bh->b_state & BH_FLAGS) == map->m_flags) {
2163 map->m_len++;
2164 return true;
2165 }
2166 return false;
2167 }
2168
2169 /*
2170 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2171 *
2172 * @mpd - extent of blocks for mapping
2173 * @head - the first buffer in the page
2174 * @bh - buffer we should start processing from
2175 * @lblk - logical number of the block in the file corresponding to @bh
2176 *
2177 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2178 * the page for IO if all buffers in this page were mapped and there's no
2179 * accumulated extent of buffers to map or add buffers in the page to the
2180 * extent of buffers to map. The function returns 1 if the caller can continue
2181 * by processing the next page, 0 if it should stop adding buffers to the
2182 * extent to map because we cannot extend it anymore. It can also return value
2183 * < 0 in case of error during IO submission.
2184 */
mpage_process_page_bufs(struct mpage_da_data * mpd,struct buffer_head * head,struct buffer_head * bh,ext4_lblk_t lblk)2185 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2186 struct buffer_head *head,
2187 struct buffer_head *bh,
2188 ext4_lblk_t lblk)
2189 {
2190 struct inode *inode = mpd->inode;
2191 int err;
2192 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2193 >> inode->i_blkbits;
2194
2195 if (ext4_verity_in_progress(inode))
2196 blocks = EXT_MAX_BLOCKS;
2197
2198 do {
2199 BUG_ON(buffer_locked(bh));
2200
2201 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2202 /* Found extent to map? */
2203 if (mpd->map.m_len)
2204 return 0;
2205 /* Buffer needs mapping and handle is not started? */
2206 if (!mpd->do_map)
2207 return 0;
2208 /* Everything mapped so far and we hit EOF */
2209 break;
2210 }
2211 } while (lblk++, (bh = bh->b_this_page) != head);
2212 /* So far everything mapped? Submit the page for IO. */
2213 if (mpd->map.m_len == 0) {
2214 err = mpage_submit_folio(mpd, head->b_folio);
2215 if (err < 0)
2216 return err;
2217 mpage_folio_done(mpd, head->b_folio);
2218 }
2219 if (lblk >= blocks) {
2220 mpd->scanned_until_end = 1;
2221 return 0;
2222 }
2223 return 1;
2224 }
2225
2226 /*
2227 * mpage_process_folio - update folio buffers corresponding to changed extent
2228 * and may submit fully mapped page for IO
2229 * @mpd: description of extent to map, on return next extent to map
2230 * @folio: Contains these buffers.
2231 * @m_lblk: logical block mapping.
2232 * @m_pblk: corresponding physical mapping.
2233 * @map_bh: determines on return whether this page requires any further
2234 * mapping or not.
2235 *
2236 * Scan given folio buffers corresponding to changed extent and update buffer
2237 * state according to new extent state.
2238 * We map delalloc buffers to their physical location, clear unwritten bits.
2239 * If the given folio is not fully mapped, we update @mpd to the next extent in
2240 * the given folio that needs mapping & return @map_bh as true.
2241 */
mpage_process_folio(struct mpage_da_data * mpd,struct folio * folio,ext4_lblk_t * m_lblk,ext4_fsblk_t * m_pblk,bool * map_bh)2242 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2243 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2244 bool *map_bh)
2245 {
2246 struct buffer_head *head, *bh;
2247 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2248 ext4_lblk_t lblk = *m_lblk;
2249 ext4_fsblk_t pblock = *m_pblk;
2250 int err = 0;
2251 ssize_t io_end_size = 0;
2252 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2253
2254 bh = head = folio_buffers(folio);
2255 do {
2256 if (lblk < mpd->map.m_lblk)
2257 continue;
2258 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2259 /*
2260 * Buffer after end of mapped extent.
2261 * Find next buffer in the folio to map.
2262 */
2263 mpd->map.m_len = 0;
2264 mpd->map.m_flags = 0;
2265 io_end_vec->size += io_end_size;
2266
2267 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2268 if (err > 0)
2269 err = 0;
2270 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2271 io_end_vec = ext4_alloc_io_end_vec(io_end);
2272 if (IS_ERR(io_end_vec)) {
2273 err = PTR_ERR(io_end_vec);
2274 goto out;
2275 }
2276 io_end_vec->offset = EXT4_LBLK_TO_B(mpd->inode,
2277 mpd->map.m_lblk);
2278 }
2279 *map_bh = true;
2280 goto out;
2281 }
2282 if (buffer_delay(bh)) {
2283 clear_buffer_delay(bh);
2284 bh->b_blocknr = pblock++;
2285 }
2286 clear_buffer_unwritten(bh);
2287 io_end_size += i_blocksize(mpd->inode);
2288 } while (lblk++, (bh = bh->b_this_page) != head);
2289
2290 io_end_vec->size += io_end_size;
2291 *map_bh = false;
2292 out:
2293 *m_lblk = lblk;
2294 *m_pblk = pblock;
2295 return err;
2296 }
2297
2298 /*
2299 * mpage_map_buffers - update buffers corresponding to changed extent and
2300 * submit fully mapped pages for IO
2301 *
2302 * @mpd - description of extent to map, on return next extent to map
2303 *
2304 * Scan buffers corresponding to changed extent (we expect corresponding pages
2305 * to be already locked) and update buffer state according to new extent state.
2306 * We map delalloc buffers to their physical location, clear unwritten bits,
2307 * and mark buffers as uninit when we perform writes to unwritten extents
2308 * and do extent conversion after IO is finished. If the last page is not fully
2309 * mapped, we update @map to the next extent in the last page that needs
2310 * mapping. Otherwise we submit the page for IO.
2311 */
mpage_map_and_submit_buffers(struct mpage_da_data * mpd)2312 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2313 {
2314 struct folio_batch fbatch;
2315 unsigned nr, i;
2316 struct inode *inode = mpd->inode;
2317 pgoff_t start, end;
2318 ext4_lblk_t lblk;
2319 ext4_fsblk_t pblock;
2320 int err;
2321 bool map_bh = false;
2322
2323 start = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk);
2324 end = EXT4_LBLK_TO_PG(inode, mpd->map.m_lblk + mpd->map.m_len - 1);
2325 pblock = mpd->map.m_pblk;
2326
2327 folio_batch_init(&fbatch);
2328 while (start <= end) {
2329 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2330 if (nr == 0)
2331 break;
2332 for (i = 0; i < nr; i++) {
2333 struct folio *folio = fbatch.folios[i];
2334
2335 lblk = EXT4_PG_TO_LBLK(inode, folio->index);
2336 err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2337 &map_bh);
2338 /*
2339 * If map_bh is true, means page may require further bh
2340 * mapping, or maybe the page was submitted for IO.
2341 * So we return to call further extent mapping.
2342 */
2343 if (err < 0 || map_bh)
2344 goto out;
2345 /* Page fully mapped - let IO run! */
2346 err = mpage_submit_folio(mpd, folio);
2347 if (err < 0)
2348 goto out;
2349 mpage_folio_done(mpd, folio);
2350 }
2351 folio_batch_release(&fbatch);
2352 }
2353 /* Extent fully mapped and matches with page boundary. We are done. */
2354 mpd->map.m_len = 0;
2355 mpd->map.m_flags = 0;
2356 return 0;
2357 out:
2358 folio_batch_release(&fbatch);
2359 return err;
2360 }
2361
mpage_map_one_extent(handle_t * handle,struct mpage_da_data * mpd)2362 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2363 {
2364 struct inode *inode = mpd->inode;
2365 struct ext4_map_blocks *map = &mpd->map;
2366 int get_blocks_flags;
2367 int err, dioread_nolock;
2368
2369 /* Make sure transaction has enough credits for this extent */
2370 err = ext4_journal_ensure_extent_credits(handle, inode);
2371 if (err < 0)
2372 return err;
2373
2374 trace_ext4_da_write_pages_extent(inode, map);
2375 /*
2376 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2377 * to convert an unwritten extent to be initialized (in the case
2378 * where we have written into one or more preallocated blocks). It is
2379 * possible that we're going to need more metadata blocks than
2380 * previously reserved. However we must not fail because we're in
2381 * writeback and there is nothing we can do about it so it might result
2382 * in data loss. So use reserved blocks to allocate metadata if
2383 * possible. In addition, do not cache any unrelated extents, as it
2384 * only holds the folio lock but does not hold the i_rwsem or
2385 * invalidate_lock, which could corrupt the extent status tree.
2386 */
2387 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2388 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2389 EXT4_GET_BLOCKS_IO_SUBMIT |
2390 EXT4_EX_NOCACHE;
2391
2392 dioread_nolock = ext4_should_dioread_nolock(inode);
2393 if (dioread_nolock)
2394 get_blocks_flags |= EXT4_GET_BLOCKS_UNWRIT_EXT;
2395
2396 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2397 if (err < 0)
2398 return err;
2399 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2400 if (!mpd->io_submit.io_end->handle &&
2401 ext4_handle_valid(handle)) {
2402 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2403 handle->h_rsv_handle = NULL;
2404 }
2405 ext4_set_io_unwritten_flag(mpd->io_submit.io_end);
2406 }
2407
2408 BUG_ON(map->m_len == 0);
2409 return 0;
2410 }
2411
2412 /*
2413 * This is used to submit mapped buffers in a single folio that is not fully
2414 * mapped for various reasons, such as insufficient space or journal credits.
2415 */
mpage_submit_partial_folio(struct mpage_da_data * mpd)2416 static int mpage_submit_partial_folio(struct mpage_da_data *mpd)
2417 {
2418 struct inode *inode = mpd->inode;
2419 struct folio *folio;
2420 loff_t pos;
2421 int ret;
2422
2423 folio = filemap_get_folio(inode->i_mapping,
2424 mpd->start_pos >> PAGE_SHIFT);
2425 if (IS_ERR(folio))
2426 return PTR_ERR(folio);
2427 /*
2428 * The mapped position should be within the current processing folio
2429 * but must not be the folio start position.
2430 */
2431 pos = ((loff_t)mpd->map.m_lblk) << inode->i_blkbits;
2432 if (WARN_ON_ONCE((folio_pos(folio) == pos) ||
2433 !folio_contains(folio, pos >> PAGE_SHIFT)))
2434 return -EINVAL;
2435
2436 ret = mpage_submit_folio(mpd, folio);
2437 if (ret)
2438 goto out;
2439 /*
2440 * Update start_pos to prevent this folio from being released in
2441 * mpage_release_unused_pages(), it will be reset to the aligned folio
2442 * pos when this folio is written again in the next round. Additionally,
2443 * do not update wbc->nr_to_write here, as it will be updated once the
2444 * entire folio has finished processing.
2445 */
2446 mpd->start_pos = pos;
2447 out:
2448 folio_unlock(folio);
2449 folio_put(folio);
2450 return ret;
2451 }
2452
2453 /*
2454 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2455 * mpd->len and submit pages underlying it for IO
2456 *
2457 * @handle - handle for journal operations
2458 * @mpd - extent to map
2459 * @give_up_on_write - we set this to true iff there is a fatal error and there
2460 * is no hope of writing the data. The caller should discard
2461 * dirty pages to avoid infinite loops.
2462 *
2463 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2464 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2465 * them to initialized or split the described range from larger unwritten
2466 * extent. Note that we need not map all the described range since allocation
2467 * can return less blocks or the range is covered by more unwritten extents. We
2468 * cannot map more because we are limited by reserved transaction credits. On
2469 * the other hand we always make sure that the last touched page is fully
2470 * mapped so that it can be written out (and thus forward progress is
2471 * guaranteed). After mapping we submit all mapped pages for IO.
2472 */
mpage_map_and_submit_extent(handle_t * handle,struct mpage_da_data * mpd,bool * give_up_on_write)2473 static int mpage_map_and_submit_extent(handle_t *handle,
2474 struct mpage_da_data *mpd,
2475 bool *give_up_on_write)
2476 {
2477 struct inode *inode = mpd->inode;
2478 struct ext4_map_blocks *map = &mpd->map;
2479 int err;
2480 loff_t disksize;
2481 int progress = 0;
2482 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2483 struct ext4_io_end_vec *io_end_vec;
2484
2485 io_end_vec = ext4_alloc_io_end_vec(io_end);
2486 if (IS_ERR(io_end_vec))
2487 return PTR_ERR(io_end_vec);
2488 io_end_vec->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
2489 do {
2490 err = mpage_map_one_extent(handle, mpd);
2491 if (err < 0) {
2492 struct super_block *sb = inode->i_sb;
2493
2494 if (ext4_emergency_state(sb))
2495 goto invalidate_dirty_pages;
2496 /*
2497 * Let the uper layers retry transient errors.
2498 * In the case of ENOSPC, if ext4_count_free_blocks()
2499 * is non-zero, a commit should free up blocks.
2500 */
2501 if ((err == -ENOMEM) || (err == -EAGAIN) ||
2502 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2503 /*
2504 * We may have already allocated extents for
2505 * some bhs inside the folio, issue the
2506 * corresponding data to prevent stale data.
2507 */
2508 if (progress) {
2509 if (mpage_submit_partial_folio(mpd))
2510 goto invalidate_dirty_pages;
2511 goto update_disksize;
2512 }
2513 return err;
2514 }
2515 ext4_msg(sb, KERN_CRIT,
2516 "Delayed block allocation failed for "
2517 "inode %llu at logical offset %llu with"
2518 " max blocks %u with error %d",
2519 inode->i_ino,
2520 (unsigned long long)map->m_lblk,
2521 (unsigned)map->m_len, -err);
2522 ext4_msg(sb, KERN_CRIT,
2523 "This should not happen!! Data will "
2524 "be lost\n");
2525 if (err == -ENOSPC)
2526 ext4_print_free_blocks(inode);
2527 invalidate_dirty_pages:
2528 *give_up_on_write = true;
2529 return err;
2530 }
2531 progress = 1;
2532 /*
2533 * Update buffer state, submit mapped pages, and get us new
2534 * extent to map
2535 */
2536 err = mpage_map_and_submit_buffers(mpd);
2537 if (err < 0)
2538 goto update_disksize;
2539 } while (map->m_len);
2540
2541 update_disksize:
2542 /*
2543 * Update on-disk size after IO is submitted. Races with
2544 * truncate are avoided by checking i_size under i_data_sem.
2545 */
2546 disksize = mpd->start_pos;
2547 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2548 int err2;
2549 loff_t i_size;
2550
2551 down_write(&EXT4_I(inode)->i_data_sem);
2552 i_size = i_size_read(inode);
2553 if (disksize > i_size)
2554 disksize = i_size;
2555 if (disksize > EXT4_I(inode)->i_disksize)
2556 EXT4_I(inode)->i_disksize = disksize;
2557 up_write(&EXT4_I(inode)->i_data_sem);
2558 err2 = ext4_mark_inode_dirty(handle, inode);
2559 if (err2) {
2560 ext4_error_err(inode->i_sb, -err2,
2561 "Failed to mark inode %llu dirty",
2562 inode->i_ino);
2563 }
2564 if (!err)
2565 err = err2;
2566 }
2567 return err;
2568 }
2569
ext4_journal_folio_buffers(handle_t * handle,struct folio * folio,size_t len)2570 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2571 size_t len)
2572 {
2573 struct buffer_head *page_bufs = folio_buffers(folio);
2574 struct inode *inode = folio->mapping->host;
2575 int ret, err;
2576
2577 ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2578 NULL, do_journal_get_write_access);
2579 err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2580 NULL, write_end_fn);
2581 if (ret == 0)
2582 ret = err;
2583 err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2584 if (ret == 0)
2585 ret = err;
2586 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2587
2588 return ret;
2589 }
2590
mpage_journal_page_buffers(handle_t * handle,struct mpage_da_data * mpd,struct folio * folio)2591 static int mpage_journal_page_buffers(handle_t *handle,
2592 struct mpage_da_data *mpd,
2593 struct folio *folio)
2594 {
2595 struct inode *inode = mpd->inode;
2596 loff_t size = i_size_read(inode);
2597 size_t len = folio_size(folio);
2598
2599 folio_clear_checked(folio);
2600 mpd->wbc->nr_to_write -= folio_nr_pages(folio);
2601
2602 if (folio_pos(folio) + len > size &&
2603 !ext4_verity_in_progress(inode))
2604 len = size & (len - 1);
2605
2606 return ext4_journal_folio_buffers(handle, folio, len);
2607 }
2608
2609 /*
2610 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2611 * needing mapping, submit mapped pages
2612 *
2613 * @mpd - where to look for pages
2614 *
2615 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2616 * IO immediately. If we cannot map blocks, we submit just already mapped
2617 * buffers in the page for IO and keep page dirty. When we can map blocks and
2618 * we find a page which isn't mapped we start accumulating extent of buffers
2619 * underlying these pages that needs mapping (formed by either delayed or
2620 * unwritten buffers). We also lock the pages containing these buffers. The
2621 * extent found is returned in @mpd structure (starting at mpd->lblk with
2622 * length mpd->len blocks).
2623 *
2624 * Note that this function can attach bios to one io_end structure which are
2625 * neither logically nor physically contiguous. Although it may seem as an
2626 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2627 * case as we need to track IO to all buffers underlying a page in one io_end.
2628 */
mpage_prepare_extent_to_map(struct mpage_da_data * mpd)2629 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2630 {
2631 struct address_space *mapping = mpd->inode->i_mapping;
2632 struct folio_batch fbatch;
2633 unsigned int nr_folios;
2634 pgoff_t index = mpd->start_pos >> PAGE_SHIFT;
2635 pgoff_t end = mpd->end_pos >> PAGE_SHIFT;
2636 xa_mark_t tag;
2637 int i, err = 0;
2638 ext4_lblk_t lblk;
2639 struct buffer_head *head;
2640 handle_t *handle = NULL;
2641 int bpp = ext4_journal_blocks_per_folio(mpd->inode);
2642
2643 tag = wbc_to_tag(mpd->wbc);
2644
2645 mpd->map.m_len = 0;
2646 mpd->next_pos = mpd->start_pos;
2647 if (ext4_should_journal_data(mpd->inode)) {
2648 handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2649 bpp);
2650 if (IS_ERR(handle))
2651 return PTR_ERR(handle);
2652 }
2653 folio_batch_init(&fbatch);
2654 while (index <= end) {
2655 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2656 tag, &fbatch);
2657 if (nr_folios == 0)
2658 break;
2659
2660 for (i = 0; i < nr_folios; i++) {
2661 struct folio *folio = fbatch.folios[i];
2662
2663 /*
2664 * Accumulated enough dirty pages? This doesn't apply
2665 * to WB_SYNC_ALL mode. For integrity sync we have to
2666 * keep going because someone may be concurrently
2667 * dirtying pages, and we might have synced a lot of
2668 * newly appeared dirty pages, but have not synced all
2669 * of the old dirty pages.
2670 */
2671 if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2672 mpd->wbc->nr_to_write <=
2673 EXT4_LBLK_TO_PG(mpd->inode, mpd->map.m_len))
2674 goto out;
2675
2676 /* If we can't merge this page, we are done. */
2677 if (mpd->map.m_len > 0 &&
2678 mpd->next_pos != folio_pos(folio))
2679 goto out;
2680
2681 if (handle) {
2682 err = ext4_journal_ensure_credits(handle, bpp,
2683 0);
2684 if (err < 0)
2685 goto out;
2686 }
2687
2688 folio_lock(folio);
2689 /*
2690 * If the page is no longer dirty, or its mapping no
2691 * longer corresponds to inode we are writing (which
2692 * means it has been truncated or invalidated), or the
2693 * page is already under writeback and we are not doing
2694 * a data integrity writeback, skip the page
2695 */
2696 if (!folio_test_dirty(folio) ||
2697 (folio_test_writeback(folio) &&
2698 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2699 unlikely(folio->mapping != mapping)) {
2700 folio_unlock(folio);
2701 continue;
2702 }
2703
2704 folio_wait_writeback(folio);
2705 BUG_ON(folio_test_writeback(folio));
2706
2707 /*
2708 * Should never happen but for buggy code in
2709 * other subsystems that call
2710 * set_page_dirty() without properly warning
2711 * the file system first. See [1] for more
2712 * information.
2713 *
2714 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2715 */
2716 if (!folio_buffers(folio)) {
2717 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2718 folio_clear_dirty(folio);
2719 folio_unlock(folio);
2720 continue;
2721 }
2722
2723 if (mpd->map.m_len == 0)
2724 mpd->start_pos = folio_pos(folio);
2725 mpd->next_pos = folio_next_pos(folio);
2726 /*
2727 * Writeout when we cannot modify metadata is simple.
2728 * Just submit the page. For data=journal mode we
2729 * first handle writeout of the page for checkpoint and
2730 * only after that handle delayed page dirtying. This
2731 * makes sure current data is checkpointed to the final
2732 * location before possibly journalling it again which
2733 * is desirable when the page is frequently dirtied
2734 * through a pin.
2735 */
2736 if (!mpd->can_map) {
2737 err = mpage_submit_folio(mpd, folio);
2738 if (err < 0)
2739 goto out;
2740 /* Pending dirtying of journalled data? */
2741 if (folio_test_checked(folio)) {
2742 err = mpage_journal_page_buffers(handle,
2743 mpd, folio);
2744 if (err < 0)
2745 goto out;
2746 mpd->journalled_more_data = 1;
2747 }
2748 mpage_folio_done(mpd, folio);
2749 } else {
2750 /* Add all dirty buffers to mpd */
2751 lblk = EXT4_PG_TO_LBLK(mpd->inode, folio->index);
2752 head = folio_buffers(folio);
2753 err = mpage_process_page_bufs(mpd, head, head,
2754 lblk);
2755 if (err <= 0)
2756 goto out;
2757 err = 0;
2758 }
2759 }
2760 folio_batch_release(&fbatch);
2761 cond_resched();
2762 }
2763 mpd->scanned_until_end = 1;
2764 if (handle)
2765 ext4_journal_stop(handle);
2766 return 0;
2767 out:
2768 folio_batch_release(&fbatch);
2769 if (handle)
2770 ext4_journal_stop(handle);
2771 return err;
2772 }
2773
ext4_do_writepages(struct mpage_da_data * mpd)2774 static int ext4_do_writepages(struct mpage_da_data *mpd)
2775 {
2776 struct writeback_control *wbc = mpd->wbc;
2777 pgoff_t writeback_index = 0;
2778 long nr_to_write = wbc->nr_to_write;
2779 int range_whole = 0;
2780 int cycled = 1;
2781 handle_t *handle = NULL;
2782 struct inode *inode = mpd->inode;
2783 struct address_space *mapping = inode->i_mapping;
2784 int needed_blocks, rsv_blocks = 0, ret = 0;
2785 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2786 struct blk_plug plug;
2787 bool give_up_on_write = false;
2788
2789 trace_ext4_writepages(inode, wbc);
2790
2791 /*
2792 * No pages to write? This is mainly a kludge to avoid starting
2793 * a transaction for special inodes like journal inode on last iput()
2794 * because that could violate lock ordering on umount
2795 */
2796 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2797 goto out_writepages;
2798
2799 /*
2800 * If the filesystem has aborted, it is read-only, so return
2801 * right away instead of dumping stack traces later on that
2802 * will obscure the real source of the problem. We test
2803 * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2804 * the latter could be true if the filesystem is mounted
2805 * read-only, and in that case, ext4_writepages should
2806 * *never* be called, so if that ever happens, we would want
2807 * the stack trace.
2808 */
2809 ret = ext4_emergency_state(mapping->host->i_sb);
2810 if (unlikely(ret))
2811 goto out_writepages;
2812
2813 /*
2814 * If we have inline data and arrive here, it means that
2815 * we will soon create the block for the 1st page, so
2816 * we'd better clear the inline data here.
2817 */
2818 if (ext4_has_inline_data(inode)) {
2819 /* Just inode will be modified... */
2820 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2821 if (IS_ERR(handle)) {
2822 ret = PTR_ERR(handle);
2823 goto out_writepages;
2824 }
2825 BUG_ON(ext4_test_inode_state(inode,
2826 EXT4_STATE_MAY_INLINE_DATA));
2827 ext4_destroy_inline_data(handle, inode);
2828 ext4_journal_stop(handle);
2829 }
2830
2831 /*
2832 * data=journal mode does not do delalloc so we just need to writeout /
2833 * journal already mapped buffers. On the other hand we need to commit
2834 * transaction to make data stable. We expect all the data to be
2835 * already in the journal (the only exception are DMA pinned pages
2836 * dirtied behind our back) so we commit transaction here and run the
2837 * writeback loop to checkpoint them. The checkpointing is not actually
2838 * necessary to make data persistent *but* quite a few places (extent
2839 * shifting operations, fsverity, ...) depend on being able to drop
2840 * pagecache pages after calling filemap_write_and_wait() and for that
2841 * checkpointing needs to happen.
2842 */
2843 if (ext4_should_journal_data(inode)) {
2844 mpd->can_map = 0;
2845 if (wbc->sync_mode == WB_SYNC_ALL)
2846 ext4_fc_commit(sbi->s_journal,
2847 EXT4_I(inode)->i_datasync_tid);
2848 }
2849 mpd->journalled_more_data = 0;
2850
2851 if (ext4_should_dioread_nolock(inode)) {
2852 int bpf = ext4_journal_blocks_per_folio(inode);
2853 /*
2854 * We may need to convert up to one extent per block in
2855 * the folio and we may dirty the inode.
2856 */
2857 rsv_blocks = 1 + ext4_ext_index_trans_blocks(inode, bpf);
2858 }
2859
2860 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2861 range_whole = 1;
2862
2863 if (wbc->range_cyclic) {
2864 writeback_index = mapping->writeback_index;
2865 if (writeback_index)
2866 cycled = 0;
2867 mpd->start_pos = writeback_index << PAGE_SHIFT;
2868 mpd->end_pos = LLONG_MAX;
2869 } else {
2870 mpd->start_pos = wbc->range_start;
2871 mpd->end_pos = wbc->range_end;
2872 }
2873
2874 ext4_io_submit_init(&mpd->io_submit, wbc);
2875 retry:
2876 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2877 tag_pages_for_writeback(mapping, mpd->start_pos >> PAGE_SHIFT,
2878 mpd->end_pos >> PAGE_SHIFT);
2879 blk_start_plug(&plug);
2880
2881 /*
2882 * First writeback pages that don't need mapping - we can avoid
2883 * starting a transaction unnecessarily and also avoid being blocked
2884 * in the block layer on device congestion while having transaction
2885 * started.
2886 */
2887 mpd->do_map = 0;
2888 mpd->scanned_until_end = 0;
2889 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2890 if (!mpd->io_submit.io_end) {
2891 ret = -ENOMEM;
2892 goto unplug;
2893 }
2894 ret = mpage_prepare_extent_to_map(mpd);
2895 /* Unlock pages we didn't use */
2896 mpage_release_unused_pages(mpd, false);
2897 /* Submit prepared bio */
2898 ext4_io_submit(&mpd->io_submit);
2899 ext4_put_io_end_defer(mpd->io_submit.io_end);
2900 mpd->io_submit.io_end = NULL;
2901 if (ret < 0)
2902 goto unplug;
2903
2904 while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2905 /* For each extent of pages we use new io_end */
2906 mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2907 if (!mpd->io_submit.io_end) {
2908 ret = -ENOMEM;
2909 break;
2910 }
2911
2912 WARN_ON_ONCE(!mpd->can_map);
2913 /*
2914 * We have two constraints: We find one extent to map and we
2915 * must always write out whole page (makes a difference when
2916 * blocksize < pagesize) so that we don't block on IO when we
2917 * try to write out the rest of the page. Journalled mode is
2918 * not supported by delalloc.
2919 */
2920 BUG_ON(ext4_should_journal_data(inode));
2921 /*
2922 * Calculate the number of credits needed to reserve for one
2923 * extent of up to MAX_WRITEPAGES_EXTENT_LEN blocks. It will
2924 * attempt to extend the transaction or start a new iteration
2925 * if the reserved credits are insufficient.
2926 */
2927 needed_blocks = ext4_chunk_trans_blocks(inode,
2928 MAX_WRITEPAGES_EXTENT_LEN);
2929 /* start a new transaction */
2930 handle = ext4_journal_start_with_reserve(inode,
2931 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2932 if (IS_ERR(handle)) {
2933 ret = PTR_ERR(handle);
2934 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2935 "%ld pages, ino %llu; err %d", __func__,
2936 wbc->nr_to_write, inode->i_ino, ret);
2937 /* Release allocated io_end */
2938 ext4_put_io_end(mpd->io_submit.io_end);
2939 mpd->io_submit.io_end = NULL;
2940 break;
2941 }
2942 mpd->do_map = 1;
2943
2944 trace_ext4_da_write_folios_start(inode, mpd->start_pos,
2945 mpd->next_pos, wbc);
2946 ret = mpage_prepare_extent_to_map(mpd);
2947 if (!ret && mpd->map.m_len)
2948 ret = mpage_map_and_submit_extent(handle, mpd,
2949 &give_up_on_write);
2950 /*
2951 * Caution: If the handle is synchronous,
2952 * ext4_journal_stop() can wait for transaction commit
2953 * to finish which may depend on writeback of pages to
2954 * complete or on page lock to be released. In that
2955 * case, we have to wait until after we have
2956 * submitted all the IO, released page locks we hold,
2957 * and dropped io_end reference (for extent conversion
2958 * to be able to complete) before stopping the handle.
2959 */
2960 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2961 ext4_journal_stop(handle);
2962 handle = NULL;
2963 mpd->do_map = 0;
2964 }
2965 /* Unlock pages we didn't use */
2966 mpage_release_unused_pages(mpd, give_up_on_write);
2967 /* Submit prepared bio */
2968 ext4_io_submit(&mpd->io_submit);
2969
2970 /*
2971 * Drop our io_end reference we got from init. We have
2972 * to be careful and use deferred io_end finishing if
2973 * we are still holding the transaction as we can
2974 * release the last reference to io_end which may end
2975 * up doing unwritten extent conversion.
2976 */
2977 if (handle) {
2978 ext4_put_io_end_defer(mpd->io_submit.io_end);
2979 ext4_journal_stop(handle);
2980 } else
2981 ext4_put_io_end(mpd->io_submit.io_end);
2982 mpd->io_submit.io_end = NULL;
2983 trace_ext4_da_write_folios_end(inode, mpd->start_pos,
2984 mpd->next_pos, wbc, ret);
2985
2986 if (ret == -ENOSPC && sbi->s_journal) {
2987 /*
2988 * Commit the transaction which would
2989 * free blocks released in the transaction
2990 * and try again
2991 */
2992 jbd2_journal_force_commit_nested(sbi->s_journal);
2993 ret = 0;
2994 continue;
2995 }
2996 if (ret == -EAGAIN)
2997 ret = 0;
2998 /* Fatal error - ENOMEM, EIO... */
2999 if (ret)
3000 break;
3001 }
3002 unplug:
3003 blk_finish_plug(&plug);
3004 if (!ret && !cycled && wbc->nr_to_write > 0) {
3005 cycled = 1;
3006 mpd->end_pos = (writeback_index << PAGE_SHIFT) - 1;
3007 mpd->start_pos = 0;
3008 goto retry;
3009 }
3010
3011 /* Update index */
3012 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3013 /*
3014 * Set the writeback_index so that range_cyclic
3015 * mode will write it back later
3016 */
3017 mapping->writeback_index = mpd->start_pos >> PAGE_SHIFT;
3018
3019 out_writepages:
3020 trace_ext4_writepages_result(inode, wbc, ret,
3021 nr_to_write - wbc->nr_to_write);
3022 return ret;
3023 }
3024
ext4_writepages(struct address_space * mapping,struct writeback_control * wbc)3025 static int ext4_writepages(struct address_space *mapping,
3026 struct writeback_control *wbc)
3027 {
3028 struct super_block *sb = mapping->host->i_sb;
3029 struct mpage_da_data mpd = {
3030 .inode = mapping->host,
3031 .wbc = wbc,
3032 .can_map = 1,
3033 };
3034 int ret;
3035 int alloc_ctx;
3036
3037 ret = ext4_emergency_state(sb);
3038 if (unlikely(ret))
3039 return ret;
3040
3041 alloc_ctx = ext4_writepages_down_read(sb);
3042 ret = ext4_do_writepages(&mpd);
3043 /*
3044 * For data=journal writeback we could have come across pages marked
3045 * for delayed dirtying (PageChecked) which were just added to the
3046 * running transaction. Try once more to get them to stable storage.
3047 */
3048 if (!ret && mpd.journalled_more_data)
3049 ret = ext4_do_writepages(&mpd);
3050 ext4_writepages_up_read(sb, alloc_ctx);
3051
3052 return ret;
3053 }
3054
ext4_normal_submit_inode_data_buffers(struct jbd2_inode * jinode)3055 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
3056 {
3057 loff_t range_start, range_end;
3058 struct writeback_control wbc = {
3059 .sync_mode = WB_SYNC_ALL,
3060 .nr_to_write = LONG_MAX,
3061 };
3062 struct mpage_da_data mpd = {
3063 .inode = jinode->i_vfs_inode,
3064 .wbc = &wbc,
3065 .can_map = 0,
3066 };
3067
3068 if (!jbd2_jinode_get_dirty_range(jinode, &range_start, &range_end))
3069 return 0;
3070
3071 wbc.range_start = range_start;
3072 wbc.range_end = range_end;
3073
3074 return ext4_do_writepages(&mpd);
3075 }
3076
ext4_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)3077 static int ext4_dax_writepages(struct address_space *mapping,
3078 struct writeback_control *wbc)
3079 {
3080 int ret;
3081 long nr_to_write = wbc->nr_to_write;
3082 struct inode *inode = mapping->host;
3083 int alloc_ctx;
3084
3085 ret = ext4_emergency_state(inode->i_sb);
3086 if (unlikely(ret))
3087 return ret;
3088
3089 alloc_ctx = ext4_writepages_down_read(inode->i_sb);
3090 trace_ext4_writepages(inode, wbc);
3091
3092 ret = dax_writeback_mapping_range(mapping,
3093 EXT4_SB(inode->i_sb)->s_daxdev, wbc);
3094 trace_ext4_writepages_result(inode, wbc, ret,
3095 nr_to_write - wbc->nr_to_write);
3096 ext4_writepages_up_read(inode->i_sb, alloc_ctx);
3097 return ret;
3098 }
3099
ext4_nonda_switch(struct super_block * sb)3100 static int ext4_nonda_switch(struct super_block *sb)
3101 {
3102 s64 free_clusters, dirty_clusters;
3103 struct ext4_sb_info *sbi = EXT4_SB(sb);
3104
3105 /*
3106 * switch to non delalloc mode if we are running low
3107 * on free block. The free block accounting via percpu
3108 * counters can get slightly wrong with percpu_counter_batch getting
3109 * accumulated on each CPU without updating global counters
3110 * Delalloc need an accurate free block accounting. So switch
3111 * to non delalloc when we are near to error range.
3112 */
3113 free_clusters =
3114 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
3115 dirty_clusters =
3116 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
3117 /*
3118 * Start pushing delalloc when 1/2 of free blocks are dirty.
3119 */
3120 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
3121 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
3122
3123 if (2 * free_clusters < 3 * dirty_clusters ||
3124 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
3125 /*
3126 * free block count is less than 150% of dirty blocks
3127 * or free blocks is less than watermark
3128 */
3129 return 1;
3130 }
3131 return 0;
3132 }
3133
ext4_da_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3134 static int ext4_da_write_begin(const struct kiocb *iocb,
3135 struct address_space *mapping,
3136 loff_t pos, unsigned len,
3137 struct folio **foliop, void **fsdata)
3138 {
3139 int ret, retries = 0;
3140 struct folio *folio;
3141 pgoff_t index;
3142 struct inode *inode = mapping->host;
3143
3144 ret = ext4_emergency_state(inode->i_sb);
3145 if (unlikely(ret))
3146 return ret;
3147
3148 index = pos >> PAGE_SHIFT;
3149
3150 if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
3151 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3152 return ext4_write_begin(iocb, mapping, pos,
3153 len, foliop, fsdata);
3154 }
3155 *fsdata = (void *)0;
3156 trace_ext4_da_write_begin(inode, pos, len);
3157
3158 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3159 ret = ext4_generic_write_inline_data(mapping, inode, pos, len,
3160 foliop, fsdata, true);
3161 if (ret < 0)
3162 return ret;
3163 if (ret == 1)
3164 return 0;
3165 }
3166
3167 retry:
3168 folio = write_begin_get_folio(iocb, mapping, index, len);
3169 if (IS_ERR(folio))
3170 return PTR_ERR(folio);
3171
3172 if (len > folio_next_pos(folio) - pos)
3173 len = folio_next_pos(folio) - pos;
3174
3175 ret = ext4_block_write_begin(NULL, folio, pos, len,
3176 ext4_da_get_block_prep);
3177 if (ret < 0) {
3178 folio_unlock(folio);
3179 folio_put(folio);
3180 /*
3181 * ext4_block_write_begin may have instantiated a few blocks
3182 * outside i_size. Trim these off again. Don't need
3183 * i_size_read because we hold inode lock.
3184 */
3185 if (pos + len > inode->i_size)
3186 ext4_truncate_failed_write(inode);
3187
3188 if (ret == -ENOSPC &&
3189 ext4_should_retry_alloc(inode->i_sb, &retries))
3190 goto retry;
3191 return ret;
3192 }
3193
3194 *foliop = folio;
3195 return ret;
3196 }
3197
3198 /*
3199 * Check if we should update i_disksize
3200 * when write to the end of file but not require block allocation
3201 */
ext4_da_should_update_i_disksize(struct folio * folio,unsigned long offset)3202 static int ext4_da_should_update_i_disksize(struct folio *folio,
3203 unsigned long offset)
3204 {
3205 struct buffer_head *bh;
3206 struct inode *inode = folio->mapping->host;
3207 unsigned int idx;
3208 int i;
3209
3210 bh = folio_buffers(folio);
3211 idx = offset >> inode->i_blkbits;
3212
3213 for (i = 0; i < idx; i++)
3214 bh = bh->b_this_page;
3215
3216 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3217 return 0;
3218 return 1;
3219 }
3220
ext4_da_do_write_end(struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio)3221 static int ext4_da_do_write_end(struct address_space *mapping,
3222 loff_t pos, unsigned len, unsigned copied,
3223 struct folio *folio)
3224 {
3225 struct inode *inode = mapping->host;
3226 loff_t old_size = inode->i_size;
3227 bool disksize_changed = false;
3228 loff_t new_i_size;
3229 handle_t *handle;
3230
3231 if (unlikely(!folio_buffers(folio))) {
3232 folio_unlock(folio);
3233 folio_put(folio);
3234 return -EIO;
3235 }
3236 /*
3237 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
3238 * flag, which all that's needed to trigger page writeback.
3239 */
3240 copied = block_write_end(pos, len, copied, folio);
3241 new_i_size = pos + copied;
3242
3243 /*
3244 * It's important to update i_size while still holding folio lock,
3245 * because folio writeout could otherwise come in and zero beyond
3246 * i_size.
3247 *
3248 * Since we are holding inode lock, we are sure i_disksize <=
3249 * i_size. We also know that if i_disksize < i_size, there are
3250 * delalloc writes pending in the range up to i_size. If the end of
3251 * the current write is <= i_size, there's no need to touch
3252 * i_disksize since writeback will push i_disksize up to i_size
3253 * eventually. If the end of the current write is > i_size and
3254 * inside an allocated block which ext4_da_should_update_i_disksize()
3255 * checked, we need to update i_disksize here as certain
3256 * ext4_writepages() paths not allocating blocks and update i_disksize.
3257 */
3258 if (new_i_size > inode->i_size) {
3259 unsigned long end;
3260
3261 i_size_write(inode, new_i_size);
3262 end = offset_in_folio(folio, new_i_size - 1);
3263 if (copied && ext4_da_should_update_i_disksize(folio, end)) {
3264 ext4_update_i_disksize(inode, new_i_size);
3265 disksize_changed = true;
3266 }
3267 }
3268
3269 folio_unlock(folio);
3270 folio_put(folio);
3271
3272 if (pos > old_size)
3273 pagecache_isize_extended(inode, old_size, pos);
3274
3275 if (!disksize_changed)
3276 return copied;
3277
3278 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
3279 if (IS_ERR(handle))
3280 return PTR_ERR(handle);
3281 ext4_mark_inode_dirty(handle, inode);
3282 ext4_journal_stop(handle);
3283
3284 return copied;
3285 }
3286
ext4_da_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3287 static int ext4_da_write_end(const struct kiocb *iocb,
3288 struct address_space *mapping,
3289 loff_t pos, unsigned len, unsigned copied,
3290 struct folio *folio, void *fsdata)
3291 {
3292 struct inode *inode = mapping->host;
3293 int write_mode = (int)(unsigned long)fsdata;
3294
3295 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3296 return ext4_write_end(iocb, mapping, pos,
3297 len, copied, folio, fsdata);
3298
3299 trace_ext4_da_write_end(inode, pos, len, copied);
3300
3301 if (write_mode != CONVERT_INLINE_DATA &&
3302 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3303 ext4_has_inline_data(inode))
3304 return ext4_write_inline_data_end(inode, pos, len, copied,
3305 folio);
3306
3307 if (unlikely(copied < len) && !folio_test_uptodate(folio))
3308 copied = 0;
3309
3310 return ext4_da_do_write_end(mapping, pos, len, copied, folio);
3311 }
3312
3313 /*
3314 * Force all delayed allocation blocks to be allocated for a given inode.
3315 */
ext4_alloc_da_blocks(struct inode * inode)3316 int ext4_alloc_da_blocks(struct inode *inode)
3317 {
3318 trace_ext4_alloc_da_blocks(inode);
3319
3320 if (!EXT4_I(inode)->i_reserved_data_blocks)
3321 return 0;
3322
3323 /*
3324 * We do something simple for now. The filemap_flush() will
3325 * also start triggering a write of the data blocks, which is
3326 * not strictly speaking necessary. However, to do otherwise
3327 * would require replicating code paths in:
3328 *
3329 * ext4_writepages() ->
3330 * write_cache_pages() ---> (via passed in callback function)
3331 * __mpage_da_writepage() -->
3332 * mpage_add_bh_to_extent()
3333 * mpage_da_map_blocks()
3334 *
3335 * The problem is that write_cache_pages(), located in
3336 * mm/page-writeback.c, marks pages clean in preparation for
3337 * doing I/O, which is not desirable if we're not planning on
3338 * doing I/O at all.
3339 *
3340 * We could call write_cache_pages(), and then redirty all of
3341 * the pages by calling redirty_page_for_writepage() but that
3342 * would be ugly in the extreme. So instead we would need to
3343 * replicate parts of the code in the above functions,
3344 * simplifying them because we wouldn't actually intend to
3345 * write out the pages, but rather only collect contiguous
3346 * logical block extents, call the multi-block allocator, and
3347 * then update the buffer heads with the block allocations.
3348 *
3349 * For now, though, we'll cheat by calling filemap_flush(),
3350 * which will map the blocks, and start the I/O, but not
3351 * actually wait for the I/O to complete.
3352 */
3353 return filemap_flush(inode->i_mapping);
3354 }
3355
3356 /*
3357 * bmap() is special. It gets used by applications such as lilo and by
3358 * the swapper to find the on-disk block of a specific piece of data.
3359 *
3360 * Naturally, this is dangerous if the block concerned is still in the
3361 * journal. If somebody makes a swapfile on an ext4 data-journaling
3362 * filesystem and enables swap, then they may get a nasty shock when the
3363 * data getting swapped to that swapfile suddenly gets overwritten by
3364 * the original zero's written out previously to the journal and
3365 * awaiting writeback in the kernel's buffer cache.
3366 *
3367 * So, if we see any bmap calls here on a modified, data-journaled file,
3368 * take extra steps to flush any blocks which might be in the cache.
3369 */
ext4_bmap(struct address_space * mapping,sector_t block)3370 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3371 {
3372 struct inode *inode = mapping->host;
3373 sector_t ret = 0;
3374
3375 inode_lock_shared(inode);
3376 /*
3377 * We can get here for an inline file via the FIBMAP ioctl
3378 */
3379 if (ext4_has_inline_data(inode))
3380 goto out;
3381
3382 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3383 (test_opt(inode->i_sb, DELALLOC) ||
3384 ext4_should_journal_data(inode))) {
3385 /*
3386 * With delalloc or journalled data we want to sync the file so
3387 * that we can make sure we allocate blocks for file and data
3388 * is in place for the user to see it
3389 */
3390 filemap_write_and_wait(mapping);
3391 }
3392
3393 ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3394
3395 out:
3396 inode_unlock_shared(inode);
3397 return ret;
3398 }
3399
ext4_invalidate_folio(struct folio * folio,size_t offset,size_t length)3400 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3401 size_t length)
3402 {
3403 trace_ext4_invalidate_folio(folio, offset, length);
3404
3405 /* No journalling happens on data buffers when this function is used */
3406 WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3407
3408 block_invalidate_folio(folio, offset, length);
3409 }
3410
__ext4_journalled_invalidate_folio(struct folio * folio,size_t offset,size_t length)3411 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3412 size_t offset, size_t length)
3413 {
3414 journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3415
3416 trace_ext4_journalled_invalidate_folio(folio, offset, length);
3417
3418 /*
3419 * If it's a full truncate we just forget about the pending dirtying
3420 */
3421 if (offset == 0 && length == folio_size(folio))
3422 folio_clear_checked(folio);
3423
3424 return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3425 }
3426
3427 /* Wrapper for aops... */
ext4_journalled_invalidate_folio(struct folio * folio,size_t offset,size_t length)3428 static void ext4_journalled_invalidate_folio(struct folio *folio,
3429 size_t offset,
3430 size_t length)
3431 {
3432 WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3433 }
3434
ext4_release_folio(struct folio * folio,gfp_t wait)3435 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3436 {
3437 struct inode *inode = folio->mapping->host;
3438 journal_t *journal = EXT4_JOURNAL(inode);
3439
3440 trace_ext4_release_folio(inode, folio);
3441
3442 /* Page has dirty journalled data -> cannot release */
3443 if (folio_test_checked(folio))
3444 return false;
3445 if (journal)
3446 return jbd2_journal_try_to_free_buffers(journal, folio);
3447 else
3448 return try_to_free_buffers(folio);
3449 }
3450
ext4_inode_datasync_dirty(struct inode * inode)3451 static bool ext4_inode_datasync_dirty(struct inode *inode)
3452 {
3453 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3454
3455 if (journal) {
3456 if (jbd2_transaction_committed(journal,
3457 EXT4_I(inode)->i_datasync_tid))
3458 return false;
3459 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3460 return !list_empty(&EXT4_I(inode)->i_fc_list);
3461 return true;
3462 }
3463
3464 /* Any metadata buffers to write? */
3465 if (mmb_has_buffers(&EXT4_I(inode)->i_metadata_bhs))
3466 return true;
3467 return inode_state_read_once(inode) & I_DIRTY_DATASYNC;
3468 }
3469
ext4_set_iomap(struct inode * inode,struct iomap * iomap,struct ext4_map_blocks * map,loff_t offset,loff_t length,unsigned int flags)3470 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3471 struct ext4_map_blocks *map, loff_t offset,
3472 loff_t length, unsigned int flags)
3473 {
3474 u8 blkbits = inode->i_blkbits;
3475
3476 /*
3477 * Writes that span EOF might trigger an I/O size update on completion,
3478 * so consider them to be dirty for the purpose of O_DSYNC, even if
3479 * there is no other metadata changes being made or are pending.
3480 */
3481 iomap->flags = 0;
3482 if (ext4_inode_datasync_dirty(inode) ||
3483 offset + length > i_size_read(inode))
3484 iomap->flags |= IOMAP_F_DIRTY;
3485
3486 if (map->m_flags & EXT4_MAP_NEW)
3487 iomap->flags |= IOMAP_F_NEW;
3488
3489 /* HW-offload atomics are always used */
3490 if (flags & IOMAP_ATOMIC)
3491 iomap->flags |= IOMAP_F_ATOMIC_BIO;
3492
3493 if (flags & IOMAP_DAX)
3494 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3495 else
3496 iomap->bdev = inode->i_sb->s_bdev;
3497 iomap->offset = EXT4_LBLK_TO_B(inode, map->m_lblk);
3498 iomap->length = EXT4_LBLK_TO_B(inode, map->m_len);
3499
3500 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3501 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3502 iomap->flags |= IOMAP_F_MERGED;
3503
3504 /*
3505 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3506 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3507 * set. In order for any allocated unwritten extents to be converted
3508 * into written extents correctly within the ->end_io() handler, we
3509 * need to ensure that the iomap->type is set appropriately. Hence, the
3510 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3511 * been set first.
3512 */
3513 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3514 iomap->type = IOMAP_UNWRITTEN;
3515 iomap->addr = (u64) map->m_pblk << blkbits;
3516 if (flags & IOMAP_DAX)
3517 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3518 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3519 iomap->type = IOMAP_MAPPED;
3520 iomap->addr = (u64) map->m_pblk << blkbits;
3521 if (flags & IOMAP_DAX)
3522 iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3523 } else if (map->m_flags & EXT4_MAP_DELAYED) {
3524 iomap->type = IOMAP_DELALLOC;
3525 iomap->addr = IOMAP_NULL_ADDR;
3526 } else {
3527 iomap->type = IOMAP_HOLE;
3528 iomap->addr = IOMAP_NULL_ADDR;
3529 }
3530 }
3531
ext4_map_blocks_atomic_write_slow(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map)3532 static int ext4_map_blocks_atomic_write_slow(handle_t *handle,
3533 struct inode *inode, struct ext4_map_blocks *map)
3534 {
3535 ext4_lblk_t m_lblk = map->m_lblk;
3536 unsigned int m_len = map->m_len;
3537 unsigned int mapped_len = 0, m_flags = 0;
3538 ext4_fsblk_t next_pblk = 0;
3539 bool check_next_pblk = false;
3540 int ret = 0;
3541
3542 WARN_ON_ONCE(!ext4_has_feature_bigalloc(inode->i_sb));
3543
3544 /*
3545 * This is a slow path in case of mixed mapping. We use
3546 * EXT4_GET_BLOCKS_CREATE_ZERO flag here to make sure we get a single
3547 * contiguous mapped mapping. This will ensure any unwritten or hole
3548 * regions within the requested range is zeroed out and we return
3549 * a single contiguous mapped extent.
3550 */
3551 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3552
3553 do {
3554 ret = ext4_map_blocks(handle, inode, map, m_flags);
3555 if (ret < 0 && ret != -ENOSPC)
3556 goto out_err;
3557 /*
3558 * This should never happen, but let's return an error code to
3559 * avoid an infinite loop in here.
3560 */
3561 if (ret == 0) {
3562 ret = -EFSCORRUPTED;
3563 ext4_warning_inode(inode,
3564 "ext4_map_blocks() couldn't allocate blocks m_flags: 0x%x, ret:%d",
3565 m_flags, ret);
3566 goto out_err;
3567 }
3568 /*
3569 * With bigalloc we should never get ENOSPC nor discontiguous
3570 * physical extents.
3571 */
3572 if ((check_next_pblk && next_pblk != map->m_pblk) ||
3573 ret == -ENOSPC) {
3574 ext4_warning_inode(inode,
3575 "Non-contiguous allocation detected: expected %llu, got %llu, "
3576 "or ext4_map_blocks() returned out of space ret: %d",
3577 next_pblk, map->m_pblk, ret);
3578 ret = -EFSCORRUPTED;
3579 goto out_err;
3580 }
3581 next_pblk = map->m_pblk + map->m_len;
3582 check_next_pblk = true;
3583
3584 mapped_len += map->m_len;
3585 map->m_lblk += map->m_len;
3586 map->m_len = m_len - mapped_len;
3587 } while (mapped_len < m_len);
3588
3589 /*
3590 * We might have done some work in above loop, so we need to query the
3591 * start of the physical extent, based on the origin m_lblk and m_len.
3592 * Let's also ensure we were able to allocate the required range for
3593 * mixed mapping case.
3594 */
3595 map->m_lblk = m_lblk;
3596 map->m_len = m_len;
3597 map->m_flags = 0;
3598
3599 ret = ext4_map_blocks(handle, inode, map,
3600 EXT4_GET_BLOCKS_QUERY_LAST_IN_LEAF);
3601 if (ret != m_len) {
3602 ext4_warning_inode(inode,
3603 "allocation failed for atomic write request m_lblk:%u, m_len:%u, ret:%d\n",
3604 m_lblk, m_len, ret);
3605 ret = -EINVAL;
3606 }
3607 return ret;
3608
3609 out_err:
3610 /* reset map before returning an error */
3611 map->m_lblk = m_lblk;
3612 map->m_len = m_len;
3613 map->m_flags = 0;
3614 return ret;
3615 }
3616
3617 /*
3618 * ext4_map_blocks_atomic: Helper routine to ensure the entire requested
3619 * range in @map [lblk, lblk + len) is one single contiguous extent with no
3620 * mixed mappings.
3621 *
3622 * We first use m_flags passed to us by our caller (ext4_iomap_alloc()).
3623 * We only call EXT4_GET_BLOCKS_ZERO in the slow path, when the underlying
3624 * physical extent for the requested range does not have a single contiguous
3625 * mapping type i.e. (Hole, Mapped, or Unwritten) throughout.
3626 * In that case we will loop over the requested range to allocate and zero out
3627 * the unwritten / holes in between, to get a single mapped extent from
3628 * [m_lblk, m_lblk + m_len). Note that this is only possible because we know
3629 * this can be called only with bigalloc enabled filesystem where the underlying
3630 * cluster is already allocated. This avoids allocating discontiguous extents
3631 * in the slow path due to multiple calls to ext4_map_blocks().
3632 * The slow path is mostly non-performance critical path, so it should be ok to
3633 * loop using ext4_map_blocks() with appropriate flags to allocate & zero the
3634 * underlying short holes/unwritten extents within the requested range.
3635 */
ext4_map_blocks_atomic_write(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int m_flags,bool * force_commit)3636 static int ext4_map_blocks_atomic_write(handle_t *handle, struct inode *inode,
3637 struct ext4_map_blocks *map, int m_flags,
3638 bool *force_commit)
3639 {
3640 ext4_lblk_t m_lblk = map->m_lblk;
3641 unsigned int m_len = map->m_len;
3642 int ret = 0;
3643
3644 WARN_ON_ONCE(m_len > 1 && !ext4_has_feature_bigalloc(inode->i_sb));
3645
3646 ret = ext4_map_blocks(handle, inode, map, m_flags);
3647 if (ret < 0 || ret == m_len)
3648 goto out;
3649 /*
3650 * This is a mixed mapping case where we were not able to allocate
3651 * a single contiguous extent. In that case let's reset requested
3652 * mapping and call the slow path.
3653 */
3654 map->m_lblk = m_lblk;
3655 map->m_len = m_len;
3656 map->m_flags = 0;
3657
3658 /*
3659 * slow path means we have mixed mapping, that means we will need
3660 * to force txn commit.
3661 */
3662 *force_commit = true;
3663 return ext4_map_blocks_atomic_write_slow(handle, inode, map);
3664 out:
3665 return ret;
3666 }
3667
ext4_iomap_alloc(struct inode * inode,struct ext4_map_blocks * map,unsigned int flags)3668 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3669 unsigned int flags)
3670 {
3671 handle_t *handle;
3672 int ret, dio_credits, m_flags = 0, retries = 0;
3673 bool force_commit = false;
3674
3675 /*
3676 * Trim the mapping request to the maximum value that we can map at
3677 * once for direct I/O.
3678 */
3679 if (map->m_len > DIO_MAX_BLOCKS)
3680 map->m_len = DIO_MAX_BLOCKS;
3681
3682 /*
3683 * journal credits estimation for atomic writes. We call
3684 * ext4_map_blocks(), to find if there could be a mixed mapping. If yes,
3685 * then let's assume the no. of pextents required can be m_len i.e.
3686 * every alternate block can be unwritten and hole.
3687 */
3688 if (flags & IOMAP_ATOMIC) {
3689 unsigned int orig_mlen = map->m_len;
3690
3691 ret = ext4_map_blocks(NULL, inode, map, 0);
3692 if (ret < 0)
3693 return ret;
3694 if (map->m_len < orig_mlen) {
3695 map->m_len = orig_mlen;
3696 dio_credits = ext4_meta_trans_blocks(inode, orig_mlen,
3697 map->m_len);
3698 } else {
3699 dio_credits = ext4_chunk_trans_blocks(inode,
3700 map->m_len);
3701 }
3702 } else {
3703 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3704 }
3705
3706 retry:
3707 /*
3708 * Either we allocate blocks and then don't get an unwritten extent, so
3709 * in that case we have reserved enough credits. Or, the blocks are
3710 * already allocated and unwritten. In that case, the extent conversion
3711 * fits into the credits as well.
3712 */
3713 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3714 if (IS_ERR(handle))
3715 return PTR_ERR(handle);
3716
3717 /*
3718 * DAX and direct I/O are the only two operations that are currently
3719 * supported with IOMAP_WRITE.
3720 */
3721 WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3722 if (flags & IOMAP_DAX)
3723 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3724 /*
3725 * We use i_size instead of i_disksize here because delalloc writeback
3726 * can complete at any point during the I/O and subsequently push the
3727 * i_disksize out to i_size. This could be beyond where direct I/O is
3728 * happening and thus expose allocated blocks to direct I/O reads.
3729 */
3730 else if (EXT4_LBLK_TO_B(inode, map->m_lblk) >= i_size_read(inode))
3731 m_flags = EXT4_GET_BLOCKS_CREATE;
3732 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3733 m_flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
3734
3735 if (flags & IOMAP_ATOMIC)
3736 ret = ext4_map_blocks_atomic_write(handle, inode, map, m_flags,
3737 &force_commit);
3738 else
3739 ret = ext4_map_blocks(handle, inode, map, m_flags);
3740
3741 /*
3742 * We cannot fill holes in indirect tree based inodes as that could
3743 * expose stale data in the case of a crash. Use the magic error code
3744 * to fallback to buffered I/O.
3745 */
3746 if (!m_flags && !ret)
3747 ret = -ENOTBLK;
3748
3749 ext4_journal_stop(handle);
3750 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3751 goto retry;
3752
3753 /*
3754 * Force commit the current transaction if the allocation spans a mixed
3755 * mapping range. This ensures any pending metadata updates (like
3756 * unwritten to written extents conversion) in this range are in
3757 * consistent state with the file data blocks, before performing the
3758 * actual write I/O. If the commit fails, the whole I/O must be aborted
3759 * to prevent any possible torn writes.
3760 */
3761 if (ret > 0 && force_commit) {
3762 int ret2;
3763
3764 ret2 = ext4_force_commit(inode->i_sb);
3765 if (ret2)
3766 return ret2;
3767 }
3768
3769 return ret;
3770 }
3771
3772
ext4_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3773 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3774 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3775 {
3776 int ret;
3777 struct ext4_map_blocks map;
3778 u8 blkbits = inode->i_blkbits;
3779 unsigned int orig_mlen;
3780
3781 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3782 return -EINVAL;
3783
3784 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3785 return -ERANGE;
3786
3787 /*
3788 * Calculate the first and last logical blocks respectively.
3789 */
3790 map.m_lblk = offset >> blkbits;
3791 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3792 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3793 orig_mlen = map.m_len;
3794
3795 if (flags & IOMAP_WRITE) {
3796 /*
3797 * We check here if the blocks are already allocated, then we
3798 * don't need to start a journal txn and we can directly return
3799 * the mapping information. This could boost performance
3800 * especially in multi-threaded overwrite requests.
3801 */
3802 if (offset + length <= i_size_read(inode)) {
3803 ret = ext4_map_blocks(NULL, inode, &map, 0);
3804 /*
3805 * For DAX we convert extents to initialized ones before
3806 * copying the data, otherwise we do it after I/O so
3807 * there's no need to call into ext4_iomap_alloc().
3808 */
3809 if ((map.m_flags & EXT4_MAP_MAPPED) ||
3810 (!(flags & IOMAP_DAX) &&
3811 (map.m_flags & EXT4_MAP_UNWRITTEN))) {
3812 /*
3813 * For atomic writes the entire requested
3814 * length should be mapped.
3815 */
3816 if (ret == orig_mlen ||
3817 (!(flags & IOMAP_ATOMIC) && ret > 0))
3818 goto out;
3819 }
3820 map.m_len = orig_mlen;
3821 }
3822 ret = ext4_iomap_alloc(inode, &map, flags);
3823 } else {
3824 ret = ext4_map_blocks(NULL, inode, &map, 0);
3825 }
3826
3827 if (ret < 0)
3828 return ret;
3829 out:
3830 /*
3831 * When inline encryption is enabled, sometimes I/O to an encrypted file
3832 * has to be broken up to guarantee DUN contiguity. Handle this by
3833 * limiting the length of the mapping returned.
3834 */
3835 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3836
3837 /*
3838 * Before returning to iomap, let's ensure the allocated mapping
3839 * covers the entire requested length for atomic writes.
3840 */
3841 if (flags & IOMAP_ATOMIC) {
3842 if (map.m_len < (length >> blkbits)) {
3843 WARN_ON_ONCE(1);
3844 return -EINVAL;
3845 }
3846 }
3847 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3848
3849 return 0;
3850 }
3851
3852 const struct iomap_ops ext4_iomap_ops = {
3853 .iomap_begin = ext4_iomap_begin,
3854 };
3855
ext4_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)3856 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3857 loff_t length, unsigned int flags,
3858 struct iomap *iomap, struct iomap *srcmap)
3859 {
3860 int ret;
3861 struct ext4_map_blocks map;
3862 u8 blkbits = inode->i_blkbits;
3863
3864 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3865 return -EINVAL;
3866
3867 if (ext4_has_inline_data(inode)) {
3868 ret = ext4_inline_data_iomap(inode, iomap);
3869 if (ret != -EAGAIN) {
3870 if (ret == 0 && offset >= iomap->length)
3871 ret = -ENOENT;
3872 return ret;
3873 }
3874 }
3875
3876 /*
3877 * Calculate the first and last logical block respectively.
3878 */
3879 map.m_lblk = offset >> blkbits;
3880 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3881 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3882
3883 /*
3884 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3885 * So handle it here itself instead of querying ext4_map_blocks().
3886 * Since ext4_map_blocks() will warn about it and will return
3887 * -EIO error.
3888 */
3889 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3890 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3891
3892 if (offset >= sbi->s_bitmap_maxbytes) {
3893 map.m_flags = 0;
3894 goto set_iomap;
3895 }
3896 }
3897
3898 ret = ext4_map_blocks(NULL, inode, &map, 0);
3899 if (ret < 0)
3900 return ret;
3901 set_iomap:
3902 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3903
3904 return 0;
3905 }
3906
3907 const struct iomap_ops ext4_iomap_report_ops = {
3908 .iomap_begin = ext4_iomap_begin_report,
3909 };
3910
3911 /*
3912 * For data=journal mode, folio should be marked dirty only when it was
3913 * writeably mapped. When that happens, it was already attached to the
3914 * transaction and marked as jbddirty (we take care of this in
3915 * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3916 * so we should have nothing to do here, except for the case when someone
3917 * had the page pinned and dirtied the page through this pin (e.g. by doing
3918 * direct IO to it). In that case we'd need to attach buffers here to the
3919 * transaction but we cannot due to lock ordering. We cannot just dirty the
3920 * folio and leave attached buffers clean, because the buffers' dirty state is
3921 * "definitive". We cannot just set the buffers dirty or jbddirty because all
3922 * the journalling code will explode. So what we do is to mark the folio
3923 * "pending dirty" and next time ext4_writepages() is called, attach buffers
3924 * to the transaction appropriately.
3925 */
ext4_journalled_dirty_folio(struct address_space * mapping,struct folio * folio)3926 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3927 struct folio *folio)
3928 {
3929 WARN_ON_ONCE(!folio_buffers(folio));
3930 if (folio_maybe_dma_pinned(folio))
3931 folio_set_checked(folio);
3932 return filemap_dirty_folio(mapping, folio);
3933 }
3934
ext4_dirty_folio(struct address_space * mapping,struct folio * folio)3935 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3936 {
3937 WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3938 WARN_ON_ONCE(!folio_buffers(folio));
3939 return block_dirty_folio(mapping, folio);
3940 }
3941
ext4_iomap_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)3942 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3943 struct file *file, sector_t *span)
3944 {
3945 return iomap_swapfile_activate(sis, file, span,
3946 &ext4_iomap_report_ops);
3947 }
3948
3949 static const struct address_space_operations ext4_aops = {
3950 .read_folio = ext4_read_folio,
3951 .readahead = ext4_readahead,
3952 .writepages = ext4_writepages,
3953 .write_begin = ext4_write_begin,
3954 .write_end = ext4_write_end,
3955 .dirty_folio = ext4_dirty_folio,
3956 .bmap = ext4_bmap,
3957 .invalidate_folio = ext4_invalidate_folio,
3958 .release_folio = ext4_release_folio,
3959 .migrate_folio = buffer_migrate_folio,
3960 .is_partially_uptodate = block_is_partially_uptodate,
3961 .error_remove_folio = generic_error_remove_folio,
3962 .swap_activate = ext4_iomap_swap_activate,
3963 };
3964
3965 static const struct address_space_operations ext4_journalled_aops = {
3966 .read_folio = ext4_read_folio,
3967 .readahead = ext4_readahead,
3968 .writepages = ext4_writepages,
3969 .write_begin = ext4_write_begin,
3970 .write_end = ext4_journalled_write_end,
3971 .dirty_folio = ext4_journalled_dirty_folio,
3972 .bmap = ext4_bmap,
3973 .invalidate_folio = ext4_journalled_invalidate_folio,
3974 .release_folio = ext4_release_folio,
3975 .migrate_folio = buffer_migrate_folio_norefs,
3976 .is_partially_uptodate = block_is_partially_uptodate,
3977 .error_remove_folio = generic_error_remove_folio,
3978 .swap_activate = ext4_iomap_swap_activate,
3979 };
3980
3981 static const struct address_space_operations ext4_da_aops = {
3982 .read_folio = ext4_read_folio,
3983 .readahead = ext4_readahead,
3984 .writepages = ext4_writepages,
3985 .write_begin = ext4_da_write_begin,
3986 .write_end = ext4_da_write_end,
3987 .dirty_folio = ext4_dirty_folio,
3988 .bmap = ext4_bmap,
3989 .invalidate_folio = ext4_invalidate_folio,
3990 .release_folio = ext4_release_folio,
3991 .migrate_folio = buffer_migrate_folio,
3992 .is_partially_uptodate = block_is_partially_uptodate,
3993 .error_remove_folio = generic_error_remove_folio,
3994 .swap_activate = ext4_iomap_swap_activate,
3995 };
3996
3997 static const struct address_space_operations ext4_dax_aops = {
3998 .writepages = ext4_dax_writepages,
3999 .dirty_folio = noop_dirty_folio,
4000 .bmap = ext4_bmap,
4001 .swap_activate = ext4_iomap_swap_activate,
4002 };
4003
ext4_set_aops(struct inode * inode)4004 void ext4_set_aops(struct inode *inode)
4005 {
4006 switch (ext4_inode_journal_mode(inode)) {
4007 case EXT4_INODE_ORDERED_DATA_MODE:
4008 case EXT4_INODE_WRITEBACK_DATA_MODE:
4009 break;
4010 case EXT4_INODE_JOURNAL_DATA_MODE:
4011 inode->i_mapping->a_ops = &ext4_journalled_aops;
4012 return;
4013 default:
4014 BUG();
4015 }
4016 if (IS_DAX(inode))
4017 inode->i_mapping->a_ops = &ext4_dax_aops;
4018 else if (test_opt(inode->i_sb, DELALLOC))
4019 inode->i_mapping->a_ops = &ext4_da_aops;
4020 else
4021 inode->i_mapping->a_ops = &ext4_aops;
4022 }
4023
4024 /*
4025 * Here we can't skip an unwritten buffer even though it usually reads zero
4026 * because it might have data in pagecache (eg, if called from ext4_zero_range,
4027 * ext4_punch_hole, etc) which needs to be properly zeroed out. Otherwise a
4028 * racing writeback can come later and flush the stale pagecache to disk.
4029 */
ext4_load_tail_bh(struct inode * inode,loff_t from)4030 static struct buffer_head *ext4_load_tail_bh(struct inode *inode, loff_t from)
4031 {
4032 unsigned int offset, blocksize, pos;
4033 ext4_lblk_t iblock;
4034 struct address_space *mapping = inode->i_mapping;
4035 struct buffer_head *bh;
4036 struct folio *folio;
4037 int err = 0;
4038
4039 folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
4040 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
4041 mapping_gfp_constraint(mapping, ~__GFP_FS));
4042 if (IS_ERR(folio))
4043 return ERR_CAST(folio);
4044
4045 blocksize = inode->i_sb->s_blocksize;
4046
4047 iblock = EXT4_PG_TO_LBLK(inode, folio->index);
4048
4049 bh = folio_buffers(folio);
4050 if (!bh)
4051 bh = create_empty_buffers(folio, blocksize, 0);
4052
4053 /* Find the buffer that contains "offset" */
4054 offset = offset_in_folio(folio, from);
4055 pos = blocksize;
4056 while (offset >= pos) {
4057 bh = bh->b_this_page;
4058 iblock++;
4059 pos += blocksize;
4060 }
4061 if (buffer_freed(bh)) {
4062 BUFFER_TRACE(bh, "freed: skip");
4063 goto unlock;
4064 }
4065 if (!buffer_mapped(bh)) {
4066 BUFFER_TRACE(bh, "unmapped");
4067 ext4_get_block(inode, iblock, bh, 0);
4068 /* unmapped? It's a hole - nothing to do */
4069 if (!buffer_mapped(bh)) {
4070 BUFFER_TRACE(bh, "still unmapped");
4071 goto unlock;
4072 }
4073 }
4074
4075 /* Ok, it's mapped. Make sure it's up-to-date */
4076 if (folio_test_uptodate(folio))
4077 set_buffer_uptodate(bh);
4078
4079 if (!buffer_uptodate(bh)) {
4080 err = ext4_read_bh_lock(bh, 0, true);
4081 if (err)
4082 goto unlock;
4083 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
4084 /* We expect the key to be set. */
4085 BUG_ON(!fscrypt_has_encryption_key(inode));
4086 err = fscrypt_decrypt_pagecache_blocks(folio,
4087 blocksize,
4088 bh_offset(bh));
4089 if (err) {
4090 clear_buffer_uptodate(bh);
4091 goto unlock;
4092 }
4093 }
4094 }
4095 return bh;
4096
4097 unlock:
4098 folio_unlock(folio);
4099 folio_put(folio);
4100 return err ? ERR_PTR(err) : NULL;
4101 }
4102
ext4_block_do_zero_range(struct inode * inode,loff_t from,loff_t length,bool * did_zero,bool * zero_written)4103 static int ext4_block_do_zero_range(struct inode *inode, loff_t from,
4104 loff_t length, bool *did_zero,
4105 bool *zero_written)
4106 {
4107 struct buffer_head *bh;
4108 struct folio *folio;
4109
4110 bh = ext4_load_tail_bh(inode, from);
4111 if (IS_ERR_OR_NULL(bh))
4112 return PTR_ERR_OR_ZERO(bh);
4113
4114 folio = bh->b_folio;
4115 folio_zero_range(folio, offset_in_folio(folio, from), length);
4116 BUFFER_TRACE(bh, "zeroed end of block");
4117
4118 mark_buffer_dirty(bh);
4119 if (did_zero)
4120 *did_zero = true;
4121 if (zero_written && !buffer_unwritten(bh) && !buffer_delay(bh))
4122 *zero_written = true;
4123
4124 folio_unlock(folio);
4125 folio_put(folio);
4126 return 0;
4127 }
4128
ext4_block_journalled_zero_range(struct inode * inode,loff_t from,loff_t length,bool * did_zero)4129 static int ext4_block_journalled_zero_range(struct inode *inode, loff_t from,
4130 loff_t length, bool *did_zero)
4131 {
4132 struct buffer_head *bh;
4133 struct folio *folio;
4134 handle_t *handle;
4135 int err;
4136
4137 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4138 if (IS_ERR(handle))
4139 return PTR_ERR(handle);
4140
4141 bh = ext4_load_tail_bh(inode, from);
4142 if (IS_ERR_OR_NULL(bh)) {
4143 err = PTR_ERR_OR_ZERO(bh);
4144 goto out_handle;
4145 }
4146 folio = bh->b_folio;
4147
4148 BUFFER_TRACE(bh, "get write access");
4149 err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
4150 EXT4_JTR_NONE);
4151 if (err)
4152 goto out;
4153
4154 folio_zero_range(folio, offset_in_folio(folio, from), length);
4155 BUFFER_TRACE(bh, "zeroed end of block");
4156
4157 err = ext4_dirty_journalled_data(handle, bh);
4158 if (err)
4159 goto out;
4160
4161 if (did_zero)
4162 *did_zero = true;
4163 out:
4164 folio_unlock(folio);
4165 folio_put(folio);
4166 out_handle:
4167 ext4_journal_stop(handle);
4168 return err;
4169 }
4170
4171 /*
4172 * Zeros out a mapping of length 'length' starting from file offset
4173 * 'from'. The range to be zero'd must be contained with in one block.
4174 * If the specified range exceeds the end of the block it will be
4175 * shortened to end of the block that corresponds to 'from'.
4176 */
ext4_block_zero_range(struct inode * inode,loff_t from,loff_t length,bool * did_zero,bool * zero_written)4177 static int ext4_block_zero_range(struct inode *inode,
4178 loff_t from, loff_t length, bool *did_zero,
4179 bool *zero_written)
4180 {
4181 unsigned blocksize = inode->i_sb->s_blocksize;
4182 unsigned int max = blocksize - (from & (blocksize - 1));
4183
4184 /*
4185 * correct length if it does not fall between
4186 * 'from' and the end of the block
4187 */
4188 if (length > max || length < 0)
4189 length = max;
4190
4191 if (IS_DAX(inode)) {
4192 return dax_zero_range(inode, from, length, did_zero,
4193 &ext4_iomap_ops);
4194 } else if (ext4_should_journal_data(inode)) {
4195 return ext4_block_journalled_zero_range(inode, from, length,
4196 did_zero);
4197 }
4198 return ext4_block_do_zero_range(inode, from, length, did_zero,
4199 zero_written);
4200 }
4201
4202 /*
4203 * Zero out a mapping from file offset 'from' up to the end of the block
4204 * which corresponds to 'from' or to the given 'end' inside this block.
4205 * This required during truncate up and performing append writes. We need
4206 * to physically zero the tail end of that block so it doesn't yield old
4207 * data if the file is grown.
4208 */
ext4_block_zero_eof(struct inode * inode,loff_t from,loff_t end)4209 int ext4_block_zero_eof(struct inode *inode, loff_t from, loff_t end)
4210 {
4211 unsigned int blocksize = i_blocksize(inode);
4212 unsigned int offset;
4213 loff_t length = end - from;
4214 bool did_zero = false;
4215 bool zero_written = false;
4216 int err;
4217
4218 offset = from & (blocksize - 1);
4219 if (!offset || from >= end)
4220 return 0;
4221 /* If we are processing an encrypted inode during orphan list handling */
4222 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
4223 return 0;
4224
4225 if (length > blocksize - offset)
4226 length = blocksize - offset;
4227
4228 err = ext4_block_zero_range(inode, from, length,
4229 &did_zero, &zero_written);
4230 if (err)
4231 return err;
4232 /*
4233 * It's necessary to order zeroed data before update i_disksize when
4234 * truncating up or performing an append write, because there might be
4235 * exposing stale on-disk data which may caused by concurrent post-EOF
4236 * mmap write during folio writeback.
4237 */
4238 if (ext4_should_order_data(inode) &&
4239 did_zero && zero_written && !IS_DAX(inode)) {
4240 handle_t *handle;
4241
4242 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4243 if (IS_ERR(handle))
4244 return PTR_ERR(handle);
4245
4246 err = ext4_jbd2_inode_add_write(handle, inode, from, length);
4247 ext4_journal_stop(handle);
4248 if (err)
4249 return err;
4250 }
4251
4252 return 0;
4253 }
4254
ext4_zero_partial_blocks(struct inode * inode,loff_t lstart,loff_t length,bool * did_zero)4255 int ext4_zero_partial_blocks(struct inode *inode, loff_t lstart, loff_t length,
4256 bool *did_zero)
4257 {
4258 struct super_block *sb = inode->i_sb;
4259 unsigned partial_start, partial_end;
4260 ext4_fsblk_t start, end;
4261 loff_t byte_end = (lstart + length - 1);
4262 int err = 0;
4263
4264 partial_start = lstart & (sb->s_blocksize - 1);
4265 partial_end = byte_end & (sb->s_blocksize - 1);
4266
4267 start = lstart >> sb->s_blocksize_bits;
4268 end = byte_end >> sb->s_blocksize_bits;
4269
4270 /* Handle partial zero within the single block */
4271 if (start == end &&
4272 (partial_start || (partial_end != sb->s_blocksize - 1))) {
4273 err = ext4_block_zero_range(inode, lstart, length, did_zero,
4274 NULL);
4275 return err;
4276 }
4277 /* Handle partial zero out on the start of the range */
4278 if (partial_start) {
4279 err = ext4_block_zero_range(inode, lstart, sb->s_blocksize,
4280 did_zero, NULL);
4281 if (err)
4282 return err;
4283 }
4284 /* Handle partial zero out on the end of the range */
4285 if (partial_end != sb->s_blocksize - 1)
4286 err = ext4_block_zero_range(inode, byte_end - partial_end,
4287 partial_end + 1, did_zero, NULL);
4288 return err;
4289 }
4290
ext4_can_truncate(struct inode * inode)4291 int ext4_can_truncate(struct inode *inode)
4292 {
4293 if (S_ISREG(inode->i_mode))
4294 return 1;
4295 if (S_ISDIR(inode->i_mode))
4296 return 1;
4297 if (S_ISLNK(inode->i_mode))
4298 return !ext4_inode_is_fast_symlink(inode);
4299 return 0;
4300 }
4301
4302 /*
4303 * We have to make sure i_disksize gets properly updated before we truncate
4304 * page cache due to hole punching or zero range. Otherwise i_disksize update
4305 * can get lost as it may have been postponed to submission of writeback but
4306 * that will never happen if we remove the folio containing i_size from the
4307 * page cache. Also if we punch hole within i_size but above i_disksize,
4308 * following ext4_page_mkwrite() may mistakenly allocate written blocks over
4309 * the hole and thus introduce allocated blocks beyond i_disksize which is
4310 * not allowed (e2fsck would complain in case of crash).
4311 */
ext4_update_disksize_before_punch(struct inode * inode,loff_t offset,loff_t len)4312 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4313 loff_t len)
4314 {
4315 handle_t *handle;
4316 int ret;
4317
4318 loff_t size = i_size_read(inode);
4319
4320 WARN_ON(!inode_is_locked(inode));
4321 if (offset > size)
4322 return 0;
4323
4324 if (offset + len < size)
4325 size = offset + len;
4326 if (EXT4_I(inode)->i_disksize >= size)
4327 return 0;
4328
4329 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4330 if (IS_ERR(handle))
4331 return PTR_ERR(handle);
4332 ext4_update_i_disksize(inode, size);
4333 ret = ext4_mark_inode_dirty(handle, inode);
4334 ext4_journal_stop(handle);
4335
4336 return ret;
4337 }
4338
ext4_truncate_folio(struct inode * inode,loff_t start,loff_t end)4339 static inline void ext4_truncate_folio(struct inode *inode,
4340 loff_t start, loff_t end)
4341 {
4342 unsigned long blocksize = i_blocksize(inode);
4343 struct folio *folio;
4344
4345 /* Nothing to be done if no complete block needs to be truncated. */
4346 if (round_up(start, blocksize) >= round_down(end, blocksize))
4347 return;
4348
4349 folio = filemap_lock_folio(inode->i_mapping, start >> PAGE_SHIFT);
4350 if (IS_ERR(folio))
4351 return;
4352
4353 if (folio_mkclean(folio))
4354 folio_mark_dirty(folio);
4355 folio_unlock(folio);
4356 folio_put(folio);
4357 }
4358
ext4_truncate_page_cache_block_range(struct inode * inode,loff_t start,loff_t end)4359 int ext4_truncate_page_cache_block_range(struct inode *inode,
4360 loff_t start, loff_t end)
4361 {
4362 unsigned long blocksize = i_blocksize(inode);
4363 int ret;
4364
4365 /*
4366 * For journalled data we need to write (and checkpoint) pages
4367 * before discarding page cache to avoid inconsitent data on disk
4368 * in case of crash before freeing or unwritten converting trans
4369 * is committed.
4370 */
4371 if (ext4_should_journal_data(inode)) {
4372 ret = filemap_write_and_wait_range(inode->i_mapping, start,
4373 end - 1);
4374 if (ret)
4375 return ret;
4376 goto truncate_pagecache;
4377 }
4378
4379 /*
4380 * If the block size is less than the page size, the file's mapped
4381 * blocks within one page could be freed or converted to unwritten.
4382 * So it's necessary to remove writable userspace mappings, and then
4383 * ext4_page_mkwrite() can be called during subsequent write access
4384 * to these partial folios.
4385 */
4386 if (!IS_ALIGNED(start | end, PAGE_SIZE) &&
4387 blocksize < PAGE_SIZE && start < inode->i_size) {
4388 loff_t page_boundary = round_up(start, PAGE_SIZE);
4389
4390 ext4_truncate_folio(inode, start, min(page_boundary, end));
4391 if (end > page_boundary)
4392 ext4_truncate_folio(inode,
4393 round_down(end, PAGE_SIZE), end);
4394 }
4395
4396 truncate_pagecache:
4397 truncate_pagecache_range(inode, start, end - 1);
4398 return 0;
4399 }
4400
ext4_wait_dax_page(struct inode * inode)4401 static void ext4_wait_dax_page(struct inode *inode)
4402 {
4403 filemap_invalidate_unlock(inode->i_mapping);
4404 schedule();
4405 filemap_invalidate_lock(inode->i_mapping);
4406 }
4407
ext4_break_layouts(struct inode * inode)4408 int ext4_break_layouts(struct inode *inode)
4409 {
4410 if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
4411 return -EINVAL;
4412
4413 return dax_break_layout_inode(inode, ext4_wait_dax_page);
4414 }
4415
4416 /*
4417 * ext4_punch_hole: punches a hole in a file by releasing the blocks
4418 * associated with the given offset and length
4419 *
4420 * @inode: File inode
4421 * @offset: The offset where the hole will begin
4422 * @len: The length of the hole
4423 *
4424 * Returns: 0 on success or negative on failure
4425 */
4426
ext4_punch_hole(struct file * file,loff_t offset,loff_t length)4427 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
4428 {
4429 struct inode *inode = file_inode(file);
4430 struct super_block *sb = inode->i_sb;
4431 ext4_lblk_t start_lblk, end_lblk;
4432 loff_t max_end = sb->s_maxbytes;
4433 loff_t end = offset + length;
4434 handle_t *handle;
4435 unsigned int credits;
4436 bool partial_zeroed = false;
4437 int ret;
4438
4439 trace_ext4_punch_hole(inode, offset, length, 0);
4440 WARN_ON_ONCE(!inode_is_locked(inode));
4441
4442 /*
4443 * For indirect-block based inodes, make sure that the hole within
4444 * one block before last range.
4445 */
4446 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4447 max_end = EXT4_SB(sb)->s_bitmap_maxbytes - sb->s_blocksize;
4448
4449 /* No need to punch hole beyond i_size */
4450 if (offset >= inode->i_size || offset >= max_end)
4451 return 0;
4452
4453 /*
4454 * If the hole extends beyond i_size, set the hole to end after
4455 * the block that contains i_size to save pointless tail block zeroing.
4456 */
4457 if (end >= inode->i_size)
4458 end = round_up(inode->i_size, sb->s_blocksize);
4459 if (end > max_end)
4460 end = max_end;
4461 length = end - offset;
4462
4463 ret = ext4_update_disksize_before_punch(inode, offset, length);
4464 if (ret)
4465 return ret;
4466
4467 /* Now release the pages and zero block aligned part of pages*/
4468 ret = ext4_truncate_page_cache_block_range(inode, offset, end);
4469 if (ret)
4470 return ret;
4471
4472 ret = ext4_zero_partial_blocks(inode, offset, length, &partial_zeroed);
4473 if (ret)
4474 return ret;
4475 if (((file->f_flags & O_SYNC) || IS_SYNC(inode)) && partial_zeroed) {
4476 ret = filemap_write_and_wait_range(inode->i_mapping, offset,
4477 end - 1);
4478 if (ret)
4479 return ret;
4480 }
4481
4482 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4483 credits = ext4_chunk_trans_extent(inode, 0);
4484 else
4485 credits = ext4_blocks_for_truncate(inode);
4486 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4487 if (IS_ERR(handle)) {
4488 ret = PTR_ERR(handle);
4489 ext4_std_error(sb, ret);
4490 return ret;
4491 }
4492
4493 /* If there are blocks to remove, do it */
4494 start_lblk = EXT4_B_TO_LBLK(inode, offset);
4495 end_lblk = end >> inode->i_blkbits;
4496
4497 if (end_lblk > start_lblk) {
4498 ext4_lblk_t hole_len = end_lblk - start_lblk;
4499
4500 ext4_fc_track_inode(handle, inode);
4501 ext4_check_map_extents_env(inode);
4502 down_write(&EXT4_I(inode)->i_data_sem);
4503 ext4_discard_preallocations(inode);
4504
4505 ext4_es_remove_extent(inode, start_lblk, hole_len);
4506
4507 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4508 ret = ext4_ext_remove_space(inode, start_lblk,
4509 end_lblk - 1);
4510 else
4511 ret = ext4_ind_remove_space(handle, inode, start_lblk,
4512 end_lblk);
4513 if (ret) {
4514 up_write(&EXT4_I(inode)->i_data_sem);
4515 goto out_handle;
4516 }
4517
4518 ext4_es_insert_extent(inode, start_lblk, hole_len, ~0,
4519 EXTENT_STATUS_HOLE, 0);
4520 up_write(&EXT4_I(inode)->i_data_sem);
4521 }
4522 ext4_fc_track_range(handle, inode, start_lblk, end_lblk);
4523
4524 ret = ext4_mark_inode_dirty(handle, inode);
4525 if (unlikely(ret))
4526 goto out_handle;
4527
4528 ext4_update_inode_fsync_trans(handle, inode, 1);
4529 if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
4530 ext4_handle_sync(handle);
4531 out_handle:
4532 ext4_journal_stop(handle);
4533 return ret;
4534 }
4535
ext4_inode_attach_jinode(struct inode * inode)4536 int ext4_inode_attach_jinode(struct inode *inode)
4537 {
4538 struct ext4_inode_info *ei = EXT4_I(inode);
4539 struct jbd2_inode *jinode;
4540
4541 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4542 return 0;
4543
4544 jinode = jbd2_alloc_inode(GFP_KERNEL);
4545 spin_lock(&inode->i_lock);
4546 if (!ei->jinode) {
4547 if (!jinode) {
4548 spin_unlock(&inode->i_lock);
4549 return -ENOMEM;
4550 }
4551 jbd2_journal_init_jbd_inode(jinode, inode);
4552 /*
4553 * Publish ->jinode only after it is fully initialized so that
4554 * readers never observe a partially initialized jbd2_inode.
4555 */
4556 smp_wmb();
4557 WRITE_ONCE(ei->jinode, jinode);
4558 jinode = NULL;
4559 }
4560 spin_unlock(&inode->i_lock);
4561 if (unlikely(jinode != NULL))
4562 jbd2_free_inode(jinode);
4563 return 0;
4564 }
4565
4566 /*
4567 * ext4_truncate()
4568 *
4569 * We block out ext4_get_block() block instantiations across the entire
4570 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4571 * simultaneously on behalf of the same inode.
4572 *
4573 * As we work through the truncate and commit bits of it to the journal there
4574 * is one core, guiding principle: the file's tree must always be consistent on
4575 * disk. We must be able to restart the truncate after a crash.
4576 *
4577 * The file's tree may be transiently inconsistent in memory (although it
4578 * probably isn't), but whenever we close off and commit a journal transaction,
4579 * the contents of (the filesystem + the journal) must be consistent and
4580 * restartable. It's pretty simple, really: bottom up, right to left (although
4581 * left-to-right works OK too).
4582 *
4583 * Note that at recovery time, journal replay occurs *before* the restart of
4584 * truncate against the orphan inode list.
4585 *
4586 * The committed inode has the new, desired i_size (which is the same as
4587 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4588 * that this inode's truncate did not complete and it will again call
4589 * ext4_truncate() to have another go. So there will be instantiated blocks
4590 * to the right of the truncation point in a crashed ext4 filesystem. But
4591 * that's fine - as long as they are linked from the inode, the post-crash
4592 * ext4_truncate() run will find them and release them.
4593 */
ext4_truncate(struct inode * inode)4594 int ext4_truncate(struct inode *inode)
4595 {
4596 struct ext4_inode_info *ei = EXT4_I(inode);
4597 unsigned int credits;
4598 int err = 0, err2;
4599 handle_t *handle;
4600
4601 /*
4602 * There is a possibility that we're either freeing the inode
4603 * or it's a completely new inode. In those cases we might not
4604 * have i_rwsem locked because it's not necessary.
4605 */
4606 if (!(inode_state_read_once(inode) & (I_NEW | I_FREEING)))
4607 WARN_ON(!inode_is_locked(inode));
4608 trace_ext4_truncate_enter(inode);
4609
4610 if (!ext4_can_truncate(inode))
4611 goto out_trace;
4612
4613 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4614 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4615
4616 if (ext4_has_inline_data(inode)) {
4617 int has_inline = 1;
4618
4619 err = ext4_inline_data_truncate(inode, &has_inline);
4620 if (err || has_inline)
4621 goto out_trace;
4622 }
4623
4624 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4625 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4626 err = ext4_inode_attach_jinode(inode);
4627 if (err)
4628 goto out_trace;
4629
4630 /* Zero to the end of the block containing i_size */
4631 err = ext4_block_zero_eof(inode, inode->i_size, LLONG_MAX);
4632 if (err)
4633 goto out_trace;
4634 }
4635
4636 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4637 credits = ext4_chunk_trans_extent(inode, 1);
4638 else
4639 credits = ext4_blocks_for_truncate(inode);
4640
4641 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4642 if (IS_ERR(handle)) {
4643 err = PTR_ERR(handle);
4644 goto out_trace;
4645 }
4646
4647 /*
4648 * We add the inode to the orphan list, so that if this
4649 * truncate spans multiple transactions, and we crash, we will
4650 * resume the truncate when the filesystem recovers. It also
4651 * marks the inode dirty, to catch the new size.
4652 *
4653 * Implication: the file must always be in a sane, consistent
4654 * truncatable state while each transaction commits.
4655 */
4656 err = ext4_orphan_add(handle, inode);
4657 if (err)
4658 goto out_stop;
4659
4660 ext4_fc_track_inode(handle, inode);
4661 ext4_check_map_extents_env(inode);
4662
4663 down_write(&EXT4_I(inode)->i_data_sem);
4664 ext4_discard_preallocations(inode);
4665
4666 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4667 err = ext4_ext_truncate(handle, inode);
4668 else
4669 ext4_ind_truncate(handle, inode);
4670
4671 up_write(&ei->i_data_sem);
4672 if (err)
4673 goto out_stop;
4674
4675 if (IS_SYNC(inode))
4676 ext4_handle_sync(handle);
4677
4678 out_stop:
4679 /*
4680 * If this was a simple ftruncate() and the file will remain alive,
4681 * then we need to clear up the orphan record which we created above.
4682 * However, if this was a real unlink then we were called by
4683 * ext4_evict_inode(), and we allow that function to clean up the
4684 * orphan info for us.
4685 */
4686 if (inode->i_nlink)
4687 ext4_orphan_del(handle, inode);
4688
4689 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4690 err2 = ext4_mark_inode_dirty(handle, inode);
4691 if (unlikely(err2 && !err))
4692 err = err2;
4693 ext4_journal_stop(handle);
4694
4695 out_trace:
4696 trace_ext4_truncate_exit(inode);
4697 return err;
4698 }
4699
ext4_inode_peek_iversion(const struct inode * inode)4700 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4701 {
4702 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4703 return inode_peek_iversion_raw(inode);
4704 else
4705 return inode_peek_iversion(inode);
4706 }
4707
ext4_inode_blocks_set(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4708 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4709 struct ext4_inode_info *ei)
4710 {
4711 struct inode *inode = &(ei->vfs_inode);
4712 u64 i_blocks = READ_ONCE(inode->i_blocks);
4713 struct super_block *sb = inode->i_sb;
4714
4715 if (i_blocks <= ~0U) {
4716 /*
4717 * i_blocks can be represented in a 32 bit variable
4718 * as multiple of 512 bytes
4719 */
4720 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4721 raw_inode->i_blocks_high = 0;
4722 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4723 return 0;
4724 }
4725
4726 /*
4727 * This should never happen since sb->s_maxbytes should not have
4728 * allowed this, sb->s_maxbytes was set according to the huge_file
4729 * feature in ext4_fill_super().
4730 */
4731 if (!ext4_has_feature_huge_file(sb))
4732 return -EFSCORRUPTED;
4733
4734 if (i_blocks <= 0xffffffffffffULL) {
4735 /*
4736 * i_blocks can be represented in a 48 bit variable
4737 * as multiple of 512 bytes
4738 */
4739 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4740 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4741 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4742 } else {
4743 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4744 /* i_block is stored in file system block size */
4745 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4746 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4747 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4748 }
4749 return 0;
4750 }
4751
ext4_fill_raw_inode(struct inode * inode,struct ext4_inode * raw_inode)4752 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4753 {
4754 struct ext4_inode_info *ei = EXT4_I(inode);
4755 uid_t i_uid;
4756 gid_t i_gid;
4757 projid_t i_projid;
4758 int block;
4759 int err;
4760
4761 err = ext4_inode_blocks_set(raw_inode, ei);
4762
4763 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4764 i_uid = i_uid_read(inode);
4765 i_gid = i_gid_read(inode);
4766 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4767 if (!(test_opt(inode->i_sb, NO_UID32))) {
4768 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4769 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4770 /*
4771 * Fix up interoperability with old kernels. Otherwise,
4772 * old inodes get re-used with the upper 16 bits of the
4773 * uid/gid intact.
4774 */
4775 if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) {
4776 raw_inode->i_uid_high = 0;
4777 raw_inode->i_gid_high = 0;
4778 } else {
4779 raw_inode->i_uid_high =
4780 cpu_to_le16(high_16_bits(i_uid));
4781 raw_inode->i_gid_high =
4782 cpu_to_le16(high_16_bits(i_gid));
4783 }
4784 } else {
4785 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4786 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4787 raw_inode->i_uid_high = 0;
4788 raw_inode->i_gid_high = 0;
4789 }
4790 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4791
4792 EXT4_INODE_SET_CTIME(inode, raw_inode);
4793 EXT4_INODE_SET_MTIME(inode, raw_inode);
4794 EXT4_INODE_SET_ATIME(inode, raw_inode);
4795 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4796
4797 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4798 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4799 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4800 raw_inode->i_file_acl_high =
4801 cpu_to_le16(ei->i_file_acl >> 32);
4802 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4803 ext4_isize_set(raw_inode, ei->i_disksize);
4804
4805 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4806 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4807 if (old_valid_dev(inode->i_rdev)) {
4808 raw_inode->i_block[0] =
4809 cpu_to_le32(old_encode_dev(inode->i_rdev));
4810 raw_inode->i_block[1] = 0;
4811 } else {
4812 raw_inode->i_block[0] = 0;
4813 raw_inode->i_block[1] =
4814 cpu_to_le32(new_encode_dev(inode->i_rdev));
4815 raw_inode->i_block[2] = 0;
4816 }
4817 } else if (!ext4_has_inline_data(inode)) {
4818 for (block = 0; block < EXT4_N_BLOCKS; block++)
4819 raw_inode->i_block[block] = ei->i_data[block];
4820 }
4821
4822 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4823 u64 ivers = ext4_inode_peek_iversion(inode);
4824
4825 raw_inode->i_disk_version = cpu_to_le32(ivers);
4826 if (ei->i_extra_isize) {
4827 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4828 raw_inode->i_version_hi =
4829 cpu_to_le32(ivers >> 32);
4830 raw_inode->i_extra_isize =
4831 cpu_to_le16(ei->i_extra_isize);
4832 }
4833 }
4834
4835 if (i_projid != EXT4_DEF_PROJID &&
4836 !ext4_has_feature_project(inode->i_sb))
4837 err = err ?: -EFSCORRUPTED;
4838
4839 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4840 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4841 raw_inode->i_projid = cpu_to_le32(i_projid);
4842
4843 ext4_inode_csum_set(inode, raw_inode, ei);
4844 return err;
4845 }
4846
4847 /*
4848 * ext4_get_inode_loc returns with an extra refcount against the inode's
4849 * underlying buffer_head on success. If we pass 'inode' and it does not
4850 * have in-inode xattr, we have all inode data in memory that is needed
4851 * to recreate the on-disk version of this inode.
4852 */
__ext4_get_inode_loc(struct super_block * sb,unsigned long ino,struct inode * inode,struct ext4_iloc * iloc,ext4_fsblk_t * ret_block)4853 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4854 struct inode *inode, struct ext4_iloc *iloc,
4855 ext4_fsblk_t *ret_block)
4856 {
4857 struct ext4_group_desc *gdp;
4858 struct buffer_head *bh;
4859 ext4_fsblk_t block;
4860 struct blk_plug plug;
4861 int inodes_per_block, inode_offset;
4862
4863 iloc->bh = NULL;
4864 if (ino < EXT4_ROOT_INO ||
4865 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4866 return -EFSCORRUPTED;
4867
4868 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4869 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4870 if (!gdp)
4871 return -EIO;
4872
4873 /*
4874 * Figure out the offset within the block group inode table
4875 */
4876 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4877 inode_offset = ((ino - 1) %
4878 EXT4_INODES_PER_GROUP(sb));
4879 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4880
4881 block = ext4_inode_table(sb, gdp);
4882 if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4883 (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4884 ext4_error(sb, "Invalid inode table block %llu in "
4885 "block_group %u", block, iloc->block_group);
4886 return -EFSCORRUPTED;
4887 }
4888 block += (inode_offset / inodes_per_block);
4889
4890 bh = sb_getblk(sb, block);
4891 if (unlikely(!bh))
4892 return -ENOMEM;
4893 if (ext4_buffer_uptodate(bh))
4894 goto has_buffer;
4895
4896 lock_buffer(bh);
4897 if (ext4_buffer_uptodate(bh)) {
4898 /* Someone brought it uptodate while we waited */
4899 unlock_buffer(bh);
4900 goto has_buffer;
4901 }
4902
4903 /*
4904 * If we have all information of the inode in memory and this
4905 * is the only valid inode in the block, we need not read the
4906 * block.
4907 */
4908 if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4909 struct buffer_head *bitmap_bh;
4910 int i, start;
4911
4912 start = inode_offset & ~(inodes_per_block - 1);
4913
4914 /* Is the inode bitmap in cache? */
4915 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4916 if (unlikely(!bitmap_bh))
4917 goto make_io;
4918
4919 /*
4920 * If the inode bitmap isn't in cache then the
4921 * optimisation may end up performing two reads instead
4922 * of one, so skip it.
4923 */
4924 if (!buffer_uptodate(bitmap_bh)) {
4925 brelse(bitmap_bh);
4926 goto make_io;
4927 }
4928 for (i = start; i < start + inodes_per_block; i++) {
4929 if (i == inode_offset)
4930 continue;
4931 if (ext4_test_bit(i, bitmap_bh->b_data))
4932 break;
4933 }
4934 brelse(bitmap_bh);
4935 if (i == start + inodes_per_block) {
4936 struct ext4_inode *raw_inode =
4937 (struct ext4_inode *) (bh->b_data + iloc->offset);
4938
4939 /* all other inodes are free, so skip I/O */
4940 memset(bh->b_data, 0, bh->b_size);
4941 if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4942 ext4_fill_raw_inode(inode, raw_inode);
4943 set_buffer_uptodate(bh);
4944 unlock_buffer(bh);
4945 goto has_buffer;
4946 }
4947 }
4948
4949 make_io:
4950 /*
4951 * If we need to do any I/O, try to pre-readahead extra
4952 * blocks from the inode table.
4953 */
4954 blk_start_plug(&plug);
4955 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4956 ext4_fsblk_t b, end, table;
4957 unsigned num;
4958 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4959
4960 table = ext4_inode_table(sb, gdp);
4961 /* s_inode_readahead_blks is always a power of 2 */
4962 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4963 if (table > b)
4964 b = table;
4965 end = b + ra_blks;
4966 num = EXT4_INODES_PER_GROUP(sb);
4967 if (ext4_has_group_desc_csum(sb))
4968 num -= ext4_itable_unused_count(sb, gdp);
4969 table += num / inodes_per_block;
4970 if (end > table)
4971 end = table;
4972 while (b <= end)
4973 ext4_sb_breadahead_unmovable(sb, b++);
4974 }
4975
4976 /*
4977 * There are other valid inodes in the buffer, this inode
4978 * has in-inode xattrs, or we don't have this inode in memory.
4979 * Read the block from disk.
4980 */
4981 trace_ext4_load_inode(sb, ino);
4982 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL,
4983 ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO));
4984 blk_finish_plug(&plug);
4985 wait_on_buffer(bh);
4986 if (!buffer_uptodate(bh)) {
4987 if (ret_block)
4988 *ret_block = block;
4989 brelse(bh);
4990 return -EIO;
4991 }
4992 has_buffer:
4993 iloc->bh = bh;
4994 return 0;
4995 }
4996
__ext4_get_inode_loc_noinmem(struct inode * inode,struct ext4_iloc * iloc)4997 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4998 struct ext4_iloc *iloc)
4999 {
5000 ext4_fsblk_t err_blk = 0;
5001 int ret;
5002
5003 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
5004 &err_blk);
5005
5006 if (ret == -EIO)
5007 ext4_error_inode_block(inode, err_blk, EIO,
5008 "unable to read itable block");
5009
5010 return ret;
5011 }
5012
ext4_get_inode_loc(struct inode * inode,struct ext4_iloc * iloc)5013 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
5014 {
5015 ext4_fsblk_t err_blk = 0;
5016 int ret;
5017
5018 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
5019 &err_blk);
5020
5021 if (ret == -EIO)
5022 ext4_error_inode_block(inode, err_blk, EIO,
5023 "unable to read itable block");
5024
5025 return ret;
5026 }
5027
5028
ext4_get_fc_inode_loc(struct super_block * sb,unsigned long ino,struct ext4_iloc * iloc)5029 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
5030 struct ext4_iloc *iloc)
5031 {
5032 return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
5033 }
5034
ext4_should_enable_dax(struct inode * inode)5035 static bool ext4_should_enable_dax(struct inode *inode)
5036 {
5037 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5038
5039 if (test_opt2(inode->i_sb, DAX_NEVER))
5040 return false;
5041 if (!S_ISREG(inode->i_mode))
5042 return false;
5043 if (ext4_should_journal_data(inode))
5044 return false;
5045 if (ext4_has_inline_data(inode))
5046 return false;
5047 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
5048 return false;
5049 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
5050 return false;
5051 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
5052 return false;
5053 if (test_opt(inode->i_sb, DAX_ALWAYS))
5054 return true;
5055
5056 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
5057 }
5058
ext4_set_inode_flags(struct inode * inode,bool init)5059 void ext4_set_inode_flags(struct inode *inode, bool init)
5060 {
5061 unsigned int flags = EXT4_I(inode)->i_flags;
5062 unsigned int new_fl = 0;
5063
5064 WARN_ON_ONCE(IS_DAX(inode) && init);
5065
5066 if (flags & EXT4_SYNC_FL)
5067 new_fl |= S_SYNC;
5068 if (flags & EXT4_APPEND_FL)
5069 new_fl |= S_APPEND;
5070 if (flags & EXT4_IMMUTABLE_FL)
5071 new_fl |= S_IMMUTABLE;
5072 if (flags & EXT4_NOATIME_FL)
5073 new_fl |= S_NOATIME;
5074 if (flags & EXT4_DIRSYNC_FL)
5075 new_fl |= S_DIRSYNC;
5076
5077 /* Because of the way inode_set_flags() works we must preserve S_DAX
5078 * here if already set. */
5079 new_fl |= (inode->i_flags & S_DAX);
5080 if (init && ext4_should_enable_dax(inode))
5081 new_fl |= S_DAX;
5082
5083 if (flags & EXT4_ENCRYPT_FL)
5084 new_fl |= S_ENCRYPTED;
5085 if (flags & EXT4_CASEFOLD_FL)
5086 new_fl |= S_CASEFOLD;
5087 if (flags & EXT4_VERITY_FL)
5088 new_fl |= S_VERITY;
5089 inode_set_flags(inode, new_fl,
5090 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
5091 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
5092 }
5093
ext4_inode_blocks(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)5094 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
5095 struct ext4_inode_info *ei)
5096 {
5097 blkcnt_t i_blocks ;
5098 struct inode *inode = &(ei->vfs_inode);
5099 struct super_block *sb = inode->i_sb;
5100
5101 if (ext4_has_feature_huge_file(sb)) {
5102 /* we are using combined 48 bit field */
5103 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
5104 le32_to_cpu(raw_inode->i_blocks_lo);
5105 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
5106 /* i_blocks represent file system block size */
5107 return i_blocks << (inode->i_blkbits - 9);
5108 } else {
5109 return i_blocks;
5110 }
5111 } else {
5112 return le32_to_cpu(raw_inode->i_blocks_lo);
5113 }
5114 }
5115
ext4_iget_extra_inode(struct inode * inode,struct ext4_inode * raw_inode,struct ext4_inode_info * ei)5116 static inline int ext4_iget_extra_inode(struct inode *inode,
5117 struct ext4_inode *raw_inode,
5118 struct ext4_inode_info *ei)
5119 {
5120 __le32 *magic = (void *)raw_inode +
5121 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
5122
5123 if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
5124 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
5125 int err;
5126
5127 err = xattr_check_inode(inode, IHDR(inode, raw_inode),
5128 ITAIL(inode, raw_inode));
5129 if (err)
5130 return err;
5131
5132 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
5133 err = ext4_find_inline_data_nolock(inode);
5134 if (!err && ext4_has_inline_data(inode))
5135 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
5136 return err;
5137 } else
5138 EXT4_I(inode)->i_inline_off = 0;
5139 return 0;
5140 }
5141
ext4_get_projid(struct inode * inode,kprojid_t * projid)5142 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
5143 {
5144 if (!ext4_has_feature_project(inode->i_sb))
5145 return -EOPNOTSUPP;
5146 *projid = EXT4_I(inode)->i_projid;
5147 return 0;
5148 }
5149
5150 /*
5151 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
5152 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
5153 * set.
5154 */
ext4_inode_set_iversion_queried(struct inode * inode,u64 val)5155 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
5156 {
5157 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
5158 inode_set_iversion_raw(inode, val);
5159 else
5160 inode_set_iversion_queried(inode, val);
5161 }
5162
check_igot_inode(struct inode * inode,ext4_iget_flags flags,const char * function,unsigned int line)5163 static int check_igot_inode(struct inode *inode, ext4_iget_flags flags,
5164 const char *function, unsigned int line)
5165 {
5166 const char *err_str;
5167
5168 if (flags & EXT4_IGET_EA_INODE) {
5169 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
5170 err_str = "missing EA_INODE flag";
5171 goto error;
5172 }
5173 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5174 EXT4_I(inode)->i_file_acl) {
5175 err_str = "ea_inode with extended attributes";
5176 goto error;
5177 }
5178 } else {
5179 if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
5180 /*
5181 * open_by_handle_at() could provide an old inode number
5182 * that has since been reused for an ea_inode; this does
5183 * not indicate filesystem corruption
5184 */
5185 if (flags & EXT4_IGET_HANDLE)
5186 return -ESTALE;
5187 err_str = "unexpected EA_INODE flag";
5188 goto error;
5189 }
5190 }
5191 if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
5192 err_str = "unexpected bad inode w/o EXT4_IGET_BAD";
5193 goto error;
5194 }
5195 return 0;
5196
5197 error:
5198 ext4_error_inode(inode, function, line, 0, "%s", err_str);
5199 return -EFSCORRUPTED;
5200 }
5201
ext4_set_inode_mapping_order(struct inode * inode)5202 void ext4_set_inode_mapping_order(struct inode *inode)
5203 {
5204 struct super_block *sb = inode->i_sb;
5205 u16 min_order, max_order;
5206
5207 max_order = EXT4_SB(sb)->s_max_folio_order;
5208 if (!max_order)
5209 return;
5210
5211 min_order = EXT4_SB(sb)->s_min_folio_order;
5212 if (!min_order && !S_ISREG(inode->i_mode))
5213 return;
5214
5215 if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
5216 max_order = min_order;
5217
5218 mapping_set_folio_order_range(inode->i_mapping, min_order, max_order);
5219 }
5220
__ext4_iget(struct super_block * sb,unsigned long ino,ext4_iget_flags flags,const char * function,unsigned int line)5221 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
5222 ext4_iget_flags flags, const char *function,
5223 unsigned int line)
5224 {
5225 struct ext4_iloc iloc;
5226 struct ext4_inode *raw_inode;
5227 struct ext4_inode_info *ei;
5228 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5229 struct inode *inode;
5230 journal_t *journal = EXT4_SB(sb)->s_journal;
5231 long ret;
5232 loff_t size;
5233 int block;
5234 uid_t i_uid;
5235 gid_t i_gid;
5236 projid_t i_projid;
5237
5238 if ((!(flags & EXT4_IGET_SPECIAL) && is_special_ino(sb, ino)) ||
5239 (ino < EXT4_ROOT_INO) ||
5240 (ino > le32_to_cpu(es->s_inodes_count))) {
5241 if (flags & EXT4_IGET_HANDLE)
5242 return ERR_PTR(-ESTALE);
5243 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
5244 "inode #%lu: comm %s: iget: illegal inode #",
5245 ino, current->comm);
5246 return ERR_PTR(-EFSCORRUPTED);
5247 }
5248
5249 inode = iget_locked(sb, ino);
5250 if (!inode)
5251 return ERR_PTR(-ENOMEM);
5252 if (!(inode_state_read_once(inode) & I_NEW)) {
5253 ret = check_igot_inode(inode, flags, function, line);
5254 if (ret) {
5255 iput(inode);
5256 return ERR_PTR(ret);
5257 }
5258 return inode;
5259 }
5260
5261 ei = EXT4_I(inode);
5262 iloc.bh = NULL;
5263
5264 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
5265 if (ret < 0)
5266 goto bad_inode;
5267 raw_inode = ext4_raw_inode(&iloc);
5268
5269 if ((flags & EXT4_IGET_HANDLE) &&
5270 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
5271 ret = -ESTALE;
5272 goto bad_inode;
5273 }
5274
5275 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5276 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
5277 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
5278 EXT4_INODE_SIZE(inode->i_sb) ||
5279 (ei->i_extra_isize & 3)) {
5280 ext4_error_inode(inode, function, line, 0,
5281 "iget: bad extra_isize %u "
5282 "(inode size %u)",
5283 ei->i_extra_isize,
5284 EXT4_INODE_SIZE(inode->i_sb));
5285 ret = -EFSCORRUPTED;
5286 goto bad_inode;
5287 }
5288 } else
5289 ei->i_extra_isize = 0;
5290
5291 /* Precompute checksum seed for inode metadata */
5292 if (ext4_has_feature_metadata_csum(sb)) {
5293 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5294 __u32 csum;
5295 __le32 inum = cpu_to_le32(inode->i_ino);
5296 __le32 gen = raw_inode->i_generation;
5297 csum = ext4_chksum(sbi->s_csum_seed, (__u8 *)&inum,
5298 sizeof(inum));
5299 ei->i_csum_seed = ext4_chksum(csum, (__u8 *)&gen, sizeof(gen));
5300 }
5301
5302 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
5303 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
5304 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
5305 ext4_error_inode_err(inode, function, line, 0,
5306 EFSBADCRC, "iget: checksum invalid");
5307 ret = -EFSBADCRC;
5308 goto bad_inode;
5309 }
5310
5311 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
5312 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
5313 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
5314 if (ext4_has_feature_project(sb) &&
5315 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5316 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5317 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
5318 else
5319 i_projid = EXT4_DEF_PROJID;
5320
5321 if (!(test_opt(inode->i_sb, NO_UID32))) {
5322 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
5323 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
5324 }
5325 i_uid_write(inode, i_uid);
5326 i_gid_write(inode, i_gid);
5327 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
5328 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
5329
5330 ei->i_inline_off = 0;
5331 ei->i_dir_start_lookup = 0;
5332 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
5333 /* We now have enough fields to check if the inode was active or not.
5334 * This is needed because nfsd might try to access dead inodes
5335 * the test is that same one that e2fsck uses
5336 * NeilBrown 1999oct15
5337 */
5338 if (inode->i_nlink == 0) {
5339 if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
5340 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
5341 ino != EXT4_BOOT_LOADER_INO) {
5342 /* this inode is deleted or unallocated */
5343 if (flags & EXT4_IGET_SPECIAL) {
5344 ext4_error_inode(inode, function, line, 0,
5345 "iget: special inode unallocated");
5346 ret = -EFSCORRUPTED;
5347 } else
5348 ret = -ESTALE;
5349 goto bad_inode;
5350 }
5351 /* The only unlinked inodes we let through here have
5352 * valid i_mode and are being read by the orphan
5353 * recovery code: that's fine, we're about to complete
5354 * the process of deleting those.
5355 * OR it is the EXT4_BOOT_LOADER_INO which is
5356 * not initialized on a new filesystem. */
5357 }
5358 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
5359 ext4_set_inode_flags(inode, true);
5360 /* Detect invalid flag combination - can't have both inline data and extents */
5361 if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
5362 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5363 ext4_error_inode(inode, function, line, 0,
5364 "inode has both inline data and extents flags");
5365 ret = -EFSCORRUPTED;
5366 goto bad_inode;
5367 }
5368 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
5369 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
5370 if (ext4_has_feature_64bit(sb))
5371 ei->i_file_acl |=
5372 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
5373 inode->i_size = ext4_isize(sb, raw_inode);
5374 size = i_size_read(inode);
5375 if (size < 0 || size > ext4_get_maxbytes(inode)) {
5376 ext4_error_inode(inode, function, line, 0,
5377 "iget: bad i_size value: %lld", size);
5378 ret = -EFSCORRUPTED;
5379 goto bad_inode;
5380 }
5381 /*
5382 * If dir_index is not enabled but there's dir with INDEX flag set,
5383 * we'd normally treat htree data as empty space. But with metadata
5384 * checksumming that corrupts checksums so forbid that.
5385 */
5386 if (!ext4_has_feature_dir_index(sb) &&
5387 ext4_has_feature_metadata_csum(sb) &&
5388 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
5389 ext4_error_inode(inode, function, line, 0,
5390 "iget: Dir with htree data on filesystem without dir_index feature.");
5391 ret = -EFSCORRUPTED;
5392 goto bad_inode;
5393 }
5394 ei->i_disksize = inode->i_size;
5395 #ifdef CONFIG_QUOTA
5396 ei->i_reserved_quota = 0;
5397 #endif
5398 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
5399 ei->i_block_group = iloc.block_group;
5400 ei->i_last_alloc_group = ~0;
5401 /*
5402 * NOTE! The in-memory inode i_data array is in little-endian order
5403 * even on big-endian machines: we do NOT byteswap the block numbers!
5404 */
5405 for (block = 0; block < EXT4_N_BLOCKS; block++)
5406 ei->i_data[block] = raw_inode->i_block[block];
5407 INIT_LIST_HEAD(&ei->i_orphan);
5408 ext4_fc_init_inode(&ei->vfs_inode);
5409
5410 /*
5411 * Set transaction id's of transactions that have to be committed
5412 * to finish f[data]sync. We set them to currently running transaction
5413 * as we cannot be sure that the inode or some of its metadata isn't
5414 * part of the transaction - the inode could have been reclaimed and
5415 * now it is reread from disk.
5416 */
5417 if (journal) {
5418 transaction_t *transaction;
5419 tid_t tid;
5420
5421 read_lock(&journal->j_state_lock);
5422 if (journal->j_running_transaction)
5423 transaction = journal->j_running_transaction;
5424 else
5425 transaction = journal->j_committing_transaction;
5426 if (transaction)
5427 tid = transaction->t_tid;
5428 else
5429 tid = journal->j_commit_sequence;
5430 read_unlock(&journal->j_state_lock);
5431 ei->i_sync_tid = tid;
5432 ei->i_datasync_tid = tid;
5433 }
5434
5435 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5436 if (ei->i_extra_isize == 0) {
5437 /* The extra space is currently unused. Use it. */
5438 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
5439 ei->i_extra_isize = sizeof(struct ext4_inode) -
5440 EXT4_GOOD_OLD_INODE_SIZE;
5441 } else {
5442 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
5443 if (ret)
5444 goto bad_inode;
5445 }
5446 }
5447
5448 EXT4_INODE_GET_CTIME(inode, raw_inode);
5449 EXT4_INODE_GET_ATIME(inode, raw_inode);
5450 EXT4_INODE_GET_MTIME(inode, raw_inode);
5451 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
5452
5453 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5454 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
5455
5456 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5457 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5458 ivers |=
5459 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
5460 }
5461 ext4_inode_set_iversion_queried(inode, ivers);
5462 }
5463
5464 ret = 0;
5465 if (ei->i_file_acl &&
5466 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
5467 ext4_error_inode(inode, function, line, 0,
5468 "iget: bad extended attribute block %llu",
5469 ei->i_file_acl);
5470 ret = -EFSCORRUPTED;
5471 goto bad_inode;
5472 } else if (!ext4_has_inline_data(inode)) {
5473 /* validate the block references in the inode */
5474 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
5475 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
5476 (S_ISLNK(inode->i_mode) &&
5477 !ext4_inode_is_fast_symlink(inode)))) {
5478 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5479 ret = ext4_ext_check_inode(inode);
5480 else
5481 ret = ext4_ind_check_inode(inode);
5482 }
5483 }
5484 if (ret)
5485 goto bad_inode;
5486
5487 if (S_ISREG(inode->i_mode)) {
5488 inode->i_op = &ext4_file_inode_operations;
5489 inode->i_fop = &ext4_file_operations;
5490 ext4_set_aops(inode);
5491 } else if (S_ISDIR(inode->i_mode)) {
5492 inode->i_op = &ext4_dir_inode_operations;
5493 inode->i_fop = &ext4_dir_operations;
5494 } else if (S_ISLNK(inode->i_mode)) {
5495 /* VFS does not allow setting these so must be corruption */
5496 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
5497 ext4_error_inode(inode, function, line, 0,
5498 "iget: immutable or append flags "
5499 "not allowed on symlinks");
5500 ret = -EFSCORRUPTED;
5501 goto bad_inode;
5502 }
5503 if (IS_ENCRYPTED(inode)) {
5504 inode->i_op = &ext4_encrypted_symlink_inode_operations;
5505 } else if (ext4_inode_is_fast_symlink(inode)) {
5506 inode->i_op = &ext4_fast_symlink_inode_operations;
5507
5508 /*
5509 * Orphan cleanup can see inodes with i_size == 0
5510 * and i_data uninitialized. Skip size checks in
5511 * that case. This is safe because the first thing
5512 * ext4_evict_inode() does for fast symlinks is
5513 * clearing of i_data and i_size.
5514 */
5515 if ((EXT4_SB(sb)->s_mount_state & EXT4_ORPHAN_FS)) {
5516 if (inode->i_nlink != 0) {
5517 ext4_error_inode(inode, function, line, 0,
5518 "invalid orphan symlink nlink %d",
5519 inode->i_nlink);
5520 ret = -EFSCORRUPTED;
5521 goto bad_inode;
5522 }
5523 } else {
5524 if (inode->i_size == 0 ||
5525 inode->i_size >= sizeof(ei->i_data) ||
5526 strnlen((char *)ei->i_data, inode->i_size + 1) !=
5527 inode->i_size) {
5528 ext4_error_inode(inode, function, line, 0,
5529 "invalid fast symlink length %llu",
5530 (unsigned long long)inode->i_size);
5531 ret = -EFSCORRUPTED;
5532 goto bad_inode;
5533 }
5534 inode_set_cached_link(inode, (char *)ei->i_data,
5535 inode->i_size);
5536 }
5537 } else {
5538 inode->i_op = &ext4_symlink_inode_operations;
5539 }
5540 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
5541 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
5542 inode->i_op = &ext4_special_inode_operations;
5543 if (raw_inode->i_block[0])
5544 init_special_inode(inode, inode->i_mode,
5545 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
5546 else
5547 init_special_inode(inode, inode->i_mode,
5548 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5549 } else if (ino == EXT4_BOOT_LOADER_INO) {
5550 make_bad_inode(inode);
5551 } else {
5552 ret = -EFSCORRUPTED;
5553 ext4_error_inode(inode, function, line, 0,
5554 "iget: bogus i_mode (%o)", inode->i_mode);
5555 goto bad_inode;
5556 }
5557 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
5558 ext4_error_inode(inode, function, line, 0,
5559 "casefold flag without casefold feature");
5560 ret = -EFSCORRUPTED;
5561 goto bad_inode;
5562 }
5563
5564 ext4_set_inode_mapping_order(inode);
5565
5566 ret = check_igot_inode(inode, flags, function, line);
5567 /*
5568 * -ESTALE here means there is nothing inherently wrong with the inode,
5569 * it's just not an inode we can return for an fhandle lookup.
5570 */
5571 if (ret == -ESTALE) {
5572 brelse(iloc.bh);
5573 unlock_new_inode(inode);
5574 iput(inode);
5575 return ERR_PTR(-ESTALE);
5576 }
5577 if (ret)
5578 goto bad_inode;
5579 brelse(iloc.bh);
5580 /* Initialize the "no ACL's" state for the simple cases */
5581 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) && !ei->i_file_acl)
5582 cache_no_acl(inode);
5583 unlock_new_inode(inode);
5584 return inode;
5585
5586 bad_inode:
5587 brelse(iloc.bh);
5588 iget_failed(inode);
5589 return ERR_PTR(ret);
5590 }
5591
__ext4_update_other_inode_time(struct super_block * sb,unsigned long orig_ino,unsigned long ino,struct ext4_inode * raw_inode)5592 static void __ext4_update_other_inode_time(struct super_block *sb,
5593 unsigned long orig_ino,
5594 unsigned long ino,
5595 struct ext4_inode *raw_inode)
5596 {
5597 struct inode *inode;
5598
5599 inode = find_inode_by_ino_rcu(sb, ino);
5600 if (!inode)
5601 return;
5602
5603 if (!inode_is_dirtytime_only(inode))
5604 return;
5605
5606 spin_lock(&inode->i_lock);
5607 if (inode_is_dirtytime_only(inode)) {
5608 struct ext4_inode_info *ei = EXT4_I(inode);
5609
5610 inode_state_clear(inode, I_DIRTY_TIME);
5611 spin_unlock(&inode->i_lock);
5612
5613 spin_lock(&ei->i_raw_lock);
5614 EXT4_INODE_SET_CTIME(inode, raw_inode);
5615 EXT4_INODE_SET_MTIME(inode, raw_inode);
5616 EXT4_INODE_SET_ATIME(inode, raw_inode);
5617 ext4_inode_csum_set(inode, raw_inode, ei);
5618 spin_unlock(&ei->i_raw_lock);
5619 trace_ext4_other_inode_update_time(inode, orig_ino);
5620 return;
5621 }
5622 spin_unlock(&inode->i_lock);
5623 }
5624
5625 /*
5626 * Opportunistically update the other time fields for other inodes in
5627 * the same inode table block.
5628 */
ext4_update_other_inodes_time(struct super_block * sb,unsigned long orig_ino,char * buf)5629 static void ext4_update_other_inodes_time(struct super_block *sb,
5630 unsigned long orig_ino, char *buf)
5631 {
5632 unsigned long ino;
5633 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5634 int inode_size = EXT4_INODE_SIZE(sb);
5635
5636 /*
5637 * Calculate the first inode in the inode table block. Inode
5638 * numbers are one-based. That is, the first inode in a block
5639 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5640 */
5641 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5642 rcu_read_lock();
5643 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5644 if (ino == orig_ino)
5645 continue;
5646 __ext4_update_other_inode_time(sb, orig_ino, ino,
5647 (struct ext4_inode *)buf);
5648 }
5649 rcu_read_unlock();
5650 }
5651
5652 /*
5653 * Post the struct inode info into an on-disk inode location in the
5654 * buffer-cache. This gobbles the caller's reference to the
5655 * buffer_head in the inode location struct.
5656 *
5657 * The caller must have write access to iloc->bh.
5658 */
ext4_do_update_inode(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5659 static int ext4_do_update_inode(handle_t *handle,
5660 struct inode *inode,
5661 struct ext4_iloc *iloc)
5662 {
5663 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5664 struct ext4_inode_info *ei = EXT4_I(inode);
5665 struct buffer_head *bh = iloc->bh;
5666 struct super_block *sb = inode->i_sb;
5667 int err;
5668 int need_datasync = 0, set_large_file = 0;
5669
5670 spin_lock(&ei->i_raw_lock);
5671
5672 /*
5673 * For fields not tracked in the in-memory inode, initialise them
5674 * to zero for new inodes.
5675 */
5676 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5677 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5678
5679 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5680 need_datasync = 1;
5681 if (ei->i_disksize > 0x7fffffffULL) {
5682 if (!ext4_has_feature_large_file(sb) ||
5683 EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5684 set_large_file = 1;
5685 }
5686
5687 err = ext4_fill_raw_inode(inode, raw_inode);
5688 spin_unlock(&ei->i_raw_lock);
5689 if (err) {
5690 EXT4_ERROR_INODE(inode, "corrupted inode contents");
5691 goto out_brelse;
5692 }
5693
5694 if (inode->i_sb->s_flags & SB_LAZYTIME)
5695 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5696 bh->b_data);
5697
5698 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5699 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5700 if (err)
5701 goto out_error;
5702 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5703 if (set_large_file) {
5704 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5705 err = ext4_journal_get_write_access(handle, sb,
5706 EXT4_SB(sb)->s_sbh,
5707 EXT4_JTR_NONE);
5708 if (err)
5709 goto out_error;
5710 lock_buffer(EXT4_SB(sb)->s_sbh);
5711 ext4_set_feature_large_file(sb);
5712 ext4_superblock_csum_set(sb);
5713 unlock_buffer(EXT4_SB(sb)->s_sbh);
5714 ext4_handle_sync(handle);
5715 err = ext4_handle_dirty_metadata(handle, NULL,
5716 EXT4_SB(sb)->s_sbh);
5717 }
5718 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5719 out_error:
5720 ext4_std_error(inode->i_sb, err);
5721 out_brelse:
5722 brelse(bh);
5723 return err;
5724 }
5725
5726 /*
5727 * ext4_write_inode()
5728 *
5729 * We are called from a few places:
5730 *
5731 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5732 * Here, there will be no transaction running. We wait for any running
5733 * transaction to commit.
5734 *
5735 * - Within flush work (sys_sync(), kupdate and such).
5736 * We wait on commit, if told to.
5737 *
5738 * - Within iput_final() -> write_inode_now()
5739 * We wait on commit, if told to.
5740 *
5741 * In all cases it is actually safe for us to return without doing anything,
5742 * because the inode has been copied into a raw inode buffer in
5743 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5744 * writeback.
5745 *
5746 * Note that we are absolutely dependent upon all inode dirtiers doing the
5747 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5748 * which we are interested.
5749 *
5750 * It would be a bug for them to not do this. The code:
5751 *
5752 * mark_inode_dirty(inode)
5753 * stuff();
5754 * inode->i_size = expr;
5755 *
5756 * is in error because write_inode() could occur while `stuff()' is running,
5757 * and the new i_size will be lost. Plus the inode will no longer be on the
5758 * superblock's dirty inode list.
5759 */
ext4_write_inode(struct inode * inode,struct writeback_control * wbc)5760 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5761 {
5762 int err;
5763
5764 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5765 return 0;
5766
5767 err = ext4_emergency_state(inode->i_sb);
5768 if (unlikely(err))
5769 return err;
5770
5771 if (EXT4_SB(inode->i_sb)->s_journal) {
5772 if (ext4_journal_current_handle()) {
5773 ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5774 dump_stack();
5775 return -EIO;
5776 }
5777
5778 /*
5779 * No need to force transaction in WB_SYNC_NONE mode. Also
5780 * ext4_sync_fs() will force the commit after everything is
5781 * written.
5782 */
5783 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5784 return 0;
5785
5786 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5787 EXT4_I(inode)->i_sync_tid);
5788 } else {
5789 struct ext4_iloc iloc;
5790
5791 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5792 if (err)
5793 return err;
5794 /*
5795 * sync(2) will flush the whole buffer cache. No need to do
5796 * it here separately for each inode.
5797 */
5798 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5799 sync_dirty_buffer(iloc.bh);
5800 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5801 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5802 "IO error syncing inode");
5803 err = -EIO;
5804 }
5805 brelse(iloc.bh);
5806 }
5807 return err;
5808 }
5809
5810 /*
5811 * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5812 * buffers that are attached to a folio straddling i_size and are undergoing
5813 * commit. In that case we have to wait for commit to finish and try again.
5814 */
ext4_wait_for_tail_page_commit(struct inode * inode)5815 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5816 {
5817 unsigned offset;
5818 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5819 tid_t commit_tid;
5820 int ret;
5821 bool has_transaction;
5822
5823 offset = inode->i_size & (PAGE_SIZE - 1);
5824 /*
5825 * If the folio is fully truncated, we don't need to wait for any commit
5826 * (and we even should not as __ext4_journalled_invalidate_folio() may
5827 * strip all buffers from the folio but keep the folio dirty which can then
5828 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5829 * buffers). Also we don't need to wait for any commit if all buffers in
5830 * the folio remain valid. This is most beneficial for the common case of
5831 * blocksize == PAGESIZE.
5832 */
5833 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5834 return;
5835 while (1) {
5836 struct folio *folio = filemap_lock_folio(inode->i_mapping,
5837 inode->i_size >> PAGE_SHIFT);
5838 if (IS_ERR(folio))
5839 return;
5840 ret = __ext4_journalled_invalidate_folio(folio, offset,
5841 folio_size(folio) - offset);
5842 folio_unlock(folio);
5843 folio_put(folio);
5844 if (ret != -EBUSY)
5845 return;
5846 has_transaction = false;
5847 read_lock(&journal->j_state_lock);
5848 if (journal->j_committing_transaction) {
5849 commit_tid = journal->j_committing_transaction->t_tid;
5850 has_transaction = true;
5851 }
5852 read_unlock(&journal->j_state_lock);
5853 if (has_transaction)
5854 jbd2_log_wait_commit(journal, commit_tid);
5855 }
5856 }
5857
5858 /*
5859 * ext4_setattr()
5860 *
5861 * Called from notify_change.
5862 *
5863 * We want to trap VFS attempts to truncate the file as soon as
5864 * possible. In particular, we want to make sure that when the VFS
5865 * shrinks i_size, we put the inode on the orphan list and modify
5866 * i_disksize immediately, so that during the subsequent flushing of
5867 * dirty pages and freeing of disk blocks, we can guarantee that any
5868 * commit will leave the blocks being flushed in an unused state on
5869 * disk. (On recovery, the inode will get truncated and the blocks will
5870 * be freed, so we have a strong guarantee that no future commit will
5871 * leave these blocks visible to the user.)
5872 *
5873 * Another thing we have to assure is that if we are in ordered mode
5874 * and inode is still attached to the committing transaction, we must
5875 * we start writeout of all the dirty pages which are being truncated.
5876 * This way we are sure that all the data written in the previous
5877 * transaction are already on disk (truncate waits for pages under
5878 * writeback).
5879 *
5880 * Called with inode->i_rwsem down.
5881 */
ext4_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5882 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5883 struct iattr *attr)
5884 {
5885 struct inode *inode = d_inode(dentry);
5886 int error, rc = 0;
5887 int orphan = 0;
5888 const unsigned int ia_valid = attr->ia_valid;
5889 bool inc_ivers = true;
5890
5891 error = ext4_emergency_state(inode->i_sb);
5892 if (unlikely(error))
5893 return error;
5894
5895 if (unlikely(IS_IMMUTABLE(inode)))
5896 return -EPERM;
5897
5898 if (unlikely(IS_APPEND(inode) &&
5899 (ia_valid & (ATTR_MODE | ATTR_UID |
5900 ATTR_GID | ATTR_TIMES_SET))))
5901 return -EPERM;
5902
5903 error = setattr_prepare(idmap, dentry, attr);
5904 if (error)
5905 return error;
5906
5907 error = fscrypt_prepare_setattr(dentry, attr);
5908 if (error)
5909 return error;
5910
5911 if (is_quota_modification(idmap, inode, attr)) {
5912 error = dquot_initialize(inode);
5913 if (error)
5914 return error;
5915 }
5916
5917 if (i_uid_needs_update(idmap, attr, inode) ||
5918 i_gid_needs_update(idmap, attr, inode)) {
5919 handle_t *handle;
5920
5921 /* (user+group)*(old+new) structure, inode write (sb,
5922 * inode block, ? - but truncate inode update has it) */
5923 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5924 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5925 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5926 if (IS_ERR(handle)) {
5927 error = PTR_ERR(handle);
5928 goto err_out;
5929 }
5930
5931 /* dquot_transfer() calls back ext4_get_inode_usage() which
5932 * counts xattr inode references.
5933 */
5934 down_read(&EXT4_I(inode)->xattr_sem);
5935 error = dquot_transfer(idmap, inode, attr);
5936 up_read(&EXT4_I(inode)->xattr_sem);
5937
5938 if (error) {
5939 ext4_journal_stop(handle);
5940 return error;
5941 }
5942 /* Update corresponding info in inode so that everything is in
5943 * one transaction */
5944 i_uid_update(idmap, attr, inode);
5945 i_gid_update(idmap, attr, inode);
5946 error = ext4_mark_inode_dirty(handle, inode);
5947 ext4_journal_stop(handle);
5948 if (unlikely(error)) {
5949 return error;
5950 }
5951 }
5952
5953 if (attr->ia_valid & ATTR_SIZE) {
5954 handle_t *handle;
5955 loff_t oldsize = inode->i_size;
5956 loff_t old_disksize;
5957 int shrink = (attr->ia_size < inode->i_size);
5958
5959 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5960 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5961
5962 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5963 return -EFBIG;
5964 }
5965 }
5966 if (!S_ISREG(inode->i_mode)) {
5967 return -EINVAL;
5968 }
5969
5970 if (attr->ia_size == inode->i_size)
5971 inc_ivers = false;
5972
5973 /*
5974 * If file has inline data but new size exceeds inline capacity,
5975 * convert to extent-based storage first to prevent inconsistent
5976 * state (inline flag set but size exceeds inline capacity).
5977 */
5978 if (ext4_has_inline_data(inode) &&
5979 attr->ia_size > EXT4_I(inode)->i_inline_size) {
5980 error = ext4_convert_inline_data(inode);
5981 if (error)
5982 goto err_out;
5983 }
5984
5985 if (shrink) {
5986 if (ext4_should_order_data(inode)) {
5987 error = ext4_begin_ordered_truncate(inode,
5988 attr->ia_size);
5989 if (error)
5990 goto err_out;
5991 }
5992 /*
5993 * Blocks are going to be removed from the inode. Wait
5994 * for dio in flight.
5995 */
5996 inode_dio_wait(inode);
5997 }
5998
5999 filemap_invalidate_lock(inode->i_mapping);
6000
6001 rc = ext4_break_layouts(inode);
6002 if (rc) {
6003 filemap_invalidate_unlock(inode->i_mapping);
6004 goto err_out;
6005 }
6006
6007 if (attr->ia_size != inode->i_size) {
6008 /* attach jbd2 jinode for EOF folio tail zeroing */
6009 if (attr->ia_size & (inode->i_sb->s_blocksize - 1) ||
6010 oldsize & (inode->i_sb->s_blocksize - 1)) {
6011 error = ext4_inode_attach_jinode(inode);
6012 if (error)
6013 goto out_mmap_sem;
6014 }
6015
6016 /*
6017 * Update c/mtime and tail zero the EOF folio on
6018 * truncate up. ext4_truncate() handles the shrink case
6019 * below.
6020 */
6021 if (!shrink) {
6022 inode_set_mtime_to_ts(inode,
6023 inode_set_ctime_current(inode));
6024 if (oldsize & (inode->i_sb->s_blocksize - 1)) {
6025 error = ext4_block_zero_eof(inode,
6026 oldsize, LLONG_MAX);
6027 if (error)
6028 goto out_mmap_sem;
6029 }
6030 }
6031
6032 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
6033 if (IS_ERR(handle)) {
6034 error = PTR_ERR(handle);
6035 goto out_mmap_sem;
6036 }
6037 if (ext4_handle_valid(handle) && shrink) {
6038 error = ext4_orphan_add(handle, inode);
6039 orphan = 1;
6040 }
6041
6042 if (shrink)
6043 ext4_fc_track_range(handle, inode,
6044 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
6045 inode->i_sb->s_blocksize_bits,
6046 EXT_MAX_BLOCKS - 1);
6047 else
6048 ext4_fc_track_range(
6049 handle, inode,
6050 (oldsize > 0 ? oldsize - 1 : oldsize) >>
6051 inode->i_sb->s_blocksize_bits,
6052 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
6053 inode->i_sb->s_blocksize_bits);
6054
6055 down_write(&EXT4_I(inode)->i_data_sem);
6056 old_disksize = EXT4_I(inode)->i_disksize;
6057 EXT4_I(inode)->i_disksize = attr->ia_size;
6058
6059 /*
6060 * We have to update i_size under i_data_sem together
6061 * with i_disksize to avoid races with writeback code
6062 * running ext4_wb_update_i_disksize().
6063 */
6064 if (!error)
6065 i_size_write(inode, attr->ia_size);
6066 else
6067 EXT4_I(inode)->i_disksize = old_disksize;
6068 up_write(&EXT4_I(inode)->i_data_sem);
6069 rc = ext4_mark_inode_dirty(handle, inode);
6070 if (!error)
6071 error = rc;
6072 ext4_journal_stop(handle);
6073 if (error)
6074 goto out_mmap_sem;
6075 if (!shrink) {
6076 pagecache_isize_extended(inode, oldsize,
6077 inode->i_size);
6078 } else if (ext4_should_journal_data(inode)) {
6079 ext4_wait_for_tail_page_commit(inode);
6080 }
6081 }
6082
6083 /*
6084 * Truncate pagecache after we've waited for commit
6085 * in data=journal mode to make pages freeable.
6086 */
6087 truncate_pagecache(inode, inode->i_size);
6088 /*
6089 * Call ext4_truncate() even if i_size didn't change to
6090 * truncate possible preallocated blocks.
6091 */
6092 if (attr->ia_size <= oldsize) {
6093 rc = ext4_truncate(inode);
6094 if (rc)
6095 error = rc;
6096 }
6097 out_mmap_sem:
6098 filemap_invalidate_unlock(inode->i_mapping);
6099 }
6100
6101 if (!error) {
6102 if (inc_ivers)
6103 inode_inc_iversion(inode);
6104 setattr_copy(idmap, inode, attr);
6105 mark_inode_dirty(inode);
6106 }
6107
6108 /*
6109 * If the call to ext4_truncate failed to get a transaction handle at
6110 * all, we need to clean up the in-core orphan list manually.
6111 */
6112 if (orphan && inode->i_nlink)
6113 ext4_orphan_del(NULL, inode);
6114
6115 if (!error && (ia_valid & ATTR_MODE))
6116 rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
6117
6118 err_out:
6119 if (error)
6120 ext4_std_error(inode->i_sb, error);
6121 if (!error)
6122 error = rc;
6123 return error;
6124 }
6125
ext4_dio_alignment(struct inode * inode)6126 u32 ext4_dio_alignment(struct inode *inode)
6127 {
6128 if (fsverity_active(inode))
6129 return 0;
6130 if (ext4_should_journal_data(inode))
6131 return 0;
6132 if (ext4_has_inline_data(inode))
6133 return 0;
6134 if (IS_ENCRYPTED(inode)) {
6135 if (!fscrypt_dio_supported(inode))
6136 return 0;
6137 return i_blocksize(inode);
6138 }
6139 return 1; /* use the iomap defaults */
6140 }
6141
ext4_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)6142 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
6143 struct kstat *stat, u32 request_mask, unsigned int query_flags)
6144 {
6145 struct inode *inode = d_inode(path->dentry);
6146 struct ext4_inode *raw_inode;
6147 struct ext4_inode_info *ei = EXT4_I(inode);
6148 unsigned int flags;
6149
6150 if ((request_mask & STATX_BTIME) &&
6151 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
6152 stat->result_mask |= STATX_BTIME;
6153 stat->btime.tv_sec = ei->i_crtime.tv_sec;
6154 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
6155 }
6156
6157 /*
6158 * Return the DIO alignment restrictions if requested. We only return
6159 * this information when requested, since on encrypted files it might
6160 * take a fair bit of work to get if the file wasn't opened recently.
6161 */
6162 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
6163 u32 dio_align = ext4_dio_alignment(inode);
6164
6165 stat->result_mask |= STATX_DIOALIGN;
6166 if (dio_align == 1) {
6167 struct block_device *bdev = inode->i_sb->s_bdev;
6168
6169 /* iomap defaults */
6170 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
6171 stat->dio_offset_align = bdev_logical_block_size(bdev);
6172 } else {
6173 stat->dio_mem_align = dio_align;
6174 stat->dio_offset_align = dio_align;
6175 }
6176 }
6177
6178 if ((request_mask & STATX_WRITE_ATOMIC) && S_ISREG(inode->i_mode)) {
6179 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6180 unsigned int awu_min = 0, awu_max = 0;
6181
6182 if (ext4_inode_can_atomic_write(inode)) {
6183 awu_min = sbi->s_awu_min;
6184 awu_max = sbi->s_awu_max;
6185 }
6186
6187 generic_fill_statx_atomic_writes(stat, awu_min, awu_max, 0);
6188 }
6189
6190 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
6191 if (flags & EXT4_APPEND_FL)
6192 stat->attributes |= STATX_ATTR_APPEND;
6193 if (flags & EXT4_COMPR_FL)
6194 stat->attributes |= STATX_ATTR_COMPRESSED;
6195 if (flags & EXT4_ENCRYPT_FL)
6196 stat->attributes |= STATX_ATTR_ENCRYPTED;
6197 if (flags & EXT4_IMMUTABLE_FL)
6198 stat->attributes |= STATX_ATTR_IMMUTABLE;
6199 if (flags & EXT4_NODUMP_FL)
6200 stat->attributes |= STATX_ATTR_NODUMP;
6201 if (flags & EXT4_VERITY_FL)
6202 stat->attributes |= STATX_ATTR_VERITY;
6203
6204 stat->attributes_mask |= (STATX_ATTR_APPEND |
6205 STATX_ATTR_COMPRESSED |
6206 STATX_ATTR_ENCRYPTED |
6207 STATX_ATTR_IMMUTABLE |
6208 STATX_ATTR_NODUMP |
6209 STATX_ATTR_VERITY);
6210
6211 generic_fillattr(idmap, request_mask, inode, stat);
6212 return 0;
6213 }
6214
ext4_file_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)6215 int ext4_file_getattr(struct mnt_idmap *idmap,
6216 const struct path *path, struct kstat *stat,
6217 u32 request_mask, unsigned int query_flags)
6218 {
6219 struct inode *inode = d_inode(path->dentry);
6220 u64 delalloc_blocks;
6221
6222 ext4_getattr(idmap, path, stat, request_mask, query_flags);
6223
6224 /*
6225 * If there is inline data in the inode, the inode will normally not
6226 * have data blocks allocated (it may have an external xattr block).
6227 * Report at least one sector for such files, so tools like tar, rsync,
6228 * others don't incorrectly think the file is completely sparse.
6229 */
6230 if (unlikely(ext4_has_inline_data(inode)))
6231 stat->blocks += (stat->size + 511) >> 9;
6232
6233 /*
6234 * We can't update i_blocks if the block allocation is delayed
6235 * otherwise in the case of system crash before the real block
6236 * allocation is done, we will have i_blocks inconsistent with
6237 * on-disk file blocks.
6238 * We always keep i_blocks updated together with real
6239 * allocation. But to not confuse with user, stat
6240 * will return the blocks that include the delayed allocation
6241 * blocks for this file.
6242 */
6243 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
6244 EXT4_I(inode)->i_reserved_data_blocks);
6245 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
6246 return 0;
6247 }
6248
ext4_index_trans_blocks(struct inode * inode,int lblocks,int pextents)6249 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
6250 int pextents)
6251 {
6252 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
6253 return ext4_ind_trans_blocks(inode, lblocks);
6254 return ext4_ext_index_trans_blocks(inode, pextents);
6255 }
6256
6257 /*
6258 * Account for index blocks, block groups bitmaps and block group
6259 * descriptor blocks if modify datablocks and index blocks
6260 * worse case, the indexs blocks spread over different block groups
6261 *
6262 * If datablocks are discontiguous, they are possible to spread over
6263 * different block groups too. If they are contiguous, with flexbg,
6264 * they could still across block group boundary.
6265 *
6266 * Also account for superblock, inode, quota and xattr blocks
6267 */
ext4_meta_trans_blocks(struct inode * inode,int lblocks,int pextents)6268 int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents)
6269 {
6270 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
6271 int gdpblocks;
6272 int idxblocks;
6273 int ret;
6274
6275 /*
6276 * How many index and leaf blocks need to touch to map @lblocks
6277 * logical blocks to @pextents physical extents?
6278 */
6279 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
6280
6281 /*
6282 * Now let's see how many group bitmaps and group descriptors need
6283 * to account
6284 */
6285 groups = idxblocks + pextents;
6286 gdpblocks = groups;
6287 if (groups > ngroups)
6288 groups = ngroups;
6289 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
6290 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
6291
6292 /* bitmaps and block group descriptor blocks */
6293 ret = idxblocks + groups + gdpblocks;
6294
6295 /* Blocks for super block, inode, quota and xattr blocks */
6296 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
6297
6298 return ret;
6299 }
6300
6301 /*
6302 * Calculate the journal credits for modifying the number of blocks
6303 * in a single extent within one transaction. 'nrblocks' is used only
6304 * for non-extent inodes. For extent type inodes, 'nrblocks' can be
6305 * zero if the exact number of blocks is unknown.
6306 */
ext4_chunk_trans_extent(struct inode * inode,int nrblocks)6307 int ext4_chunk_trans_extent(struct inode *inode, int nrblocks)
6308 {
6309 int ret;
6310
6311 ret = ext4_meta_trans_blocks(inode, nrblocks, 1);
6312 /* Account for data blocks for journalled mode */
6313 if (ext4_should_journal_data(inode))
6314 ret += nrblocks;
6315 return ret;
6316 }
6317
6318 /*
6319 * Calculate the journal credits for a chunk of data modification.
6320 *
6321 * This is called from DIO, fallocate or whoever calling
6322 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
6323 *
6324 * journal buffers for data blocks are not included here, as DIO
6325 * and fallocate do no need to journal data buffers.
6326 */
ext4_chunk_trans_blocks(struct inode * inode,int nrblocks)6327 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
6328 {
6329 return ext4_meta_trans_blocks(inode, nrblocks, 1);
6330 }
6331
6332 /*
6333 * The caller must have previously called ext4_reserve_inode_write().
6334 * Give this, we know that the caller already has write access to iloc->bh.
6335 */
ext4_mark_iloc_dirty(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)6336 int ext4_mark_iloc_dirty(handle_t *handle,
6337 struct inode *inode, struct ext4_iloc *iloc)
6338 {
6339 int err = 0;
6340
6341 err = ext4_emergency_state(inode->i_sb);
6342 if (unlikely(err)) {
6343 put_bh(iloc->bh);
6344 return err;
6345 }
6346 ext4_fc_track_inode(handle, inode);
6347
6348 /* the do_update_inode consumes one bh->b_count */
6349 get_bh(iloc->bh);
6350
6351 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
6352 err = ext4_do_update_inode(handle, inode, iloc);
6353 put_bh(iloc->bh);
6354 return err;
6355 }
6356
6357 /*
6358 * On success, We end up with an outstanding reference count against
6359 * iloc->bh. This _must_ be cleaned up later.
6360 */
6361
6362 int
ext4_reserve_inode_write(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)6363 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
6364 struct ext4_iloc *iloc)
6365 {
6366 int err;
6367
6368 err = ext4_emergency_state(inode->i_sb);
6369 if (unlikely(err))
6370 return err;
6371
6372 err = ext4_get_inode_loc(inode, iloc);
6373 if (!err) {
6374 BUFFER_TRACE(iloc->bh, "get_write_access");
6375 err = ext4_journal_get_write_access(handle, inode->i_sb,
6376 iloc->bh, EXT4_JTR_NONE);
6377 if (err) {
6378 brelse(iloc->bh);
6379 iloc->bh = NULL;
6380 }
6381 ext4_fc_track_inode(handle, inode);
6382 }
6383 ext4_std_error(inode->i_sb, err);
6384 return err;
6385 }
6386
__ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc,handle_t * handle,int * no_expand)6387 static int __ext4_expand_extra_isize(struct inode *inode,
6388 unsigned int new_extra_isize,
6389 struct ext4_iloc *iloc,
6390 handle_t *handle, int *no_expand)
6391 {
6392 struct ext4_inode *raw_inode;
6393 struct ext4_xattr_ibody_header *header;
6394 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
6395 struct ext4_inode_info *ei = EXT4_I(inode);
6396 int error;
6397
6398 /* this was checked at iget time, but double check for good measure */
6399 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
6400 (ei->i_extra_isize & 3)) {
6401 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
6402 ei->i_extra_isize,
6403 EXT4_INODE_SIZE(inode->i_sb));
6404 return -EFSCORRUPTED;
6405 }
6406 if ((new_extra_isize < ei->i_extra_isize) ||
6407 (new_extra_isize < 4) ||
6408 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
6409 return -EINVAL; /* Should never happen */
6410
6411 raw_inode = ext4_raw_inode(iloc);
6412
6413 header = IHDR(inode, raw_inode);
6414
6415 /* No extended attributes present */
6416 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
6417 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
6418 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
6419 EXT4_I(inode)->i_extra_isize, 0,
6420 new_extra_isize - EXT4_I(inode)->i_extra_isize);
6421 EXT4_I(inode)->i_extra_isize = new_extra_isize;
6422 return 0;
6423 }
6424
6425 /*
6426 * We may need to allocate external xattr block so we need quotas
6427 * initialized. Here we can be called with various locks held so we
6428 * cannot affort to initialize quotas ourselves. So just bail.
6429 */
6430 if (dquot_initialize_needed(inode))
6431 return -EAGAIN;
6432
6433 /* try to expand with EAs present */
6434 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
6435 raw_inode, handle);
6436 if (error) {
6437 /*
6438 * Inode size expansion failed; don't try again
6439 */
6440 *no_expand = 1;
6441 }
6442
6443 return error;
6444 }
6445
6446 /*
6447 * Expand an inode by new_extra_isize bytes.
6448 * Returns 0 on success or negative error number on failure.
6449 */
ext4_try_to_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc iloc,handle_t * handle)6450 static int ext4_try_to_expand_extra_isize(struct inode *inode,
6451 unsigned int new_extra_isize,
6452 struct ext4_iloc iloc,
6453 handle_t *handle)
6454 {
6455 int no_expand;
6456 int error;
6457
6458 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
6459 return -EOVERFLOW;
6460
6461 /*
6462 * In nojournal mode, we can immediately attempt to expand
6463 * the inode. When journaled, we first need to obtain extra
6464 * buffer credits since we may write into the EA block
6465 * with this same handle. If journal_extend fails, then it will
6466 * only result in a minor loss of functionality for that inode.
6467 * If this is felt to be critical, then e2fsck should be run to
6468 * force a large enough s_min_extra_isize.
6469 */
6470 if (ext4_journal_extend(handle,
6471 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
6472 return -ENOSPC;
6473
6474 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
6475 return -EBUSY;
6476
6477 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
6478 handle, &no_expand);
6479 ext4_write_unlock_xattr(inode, &no_expand);
6480
6481 return error;
6482 }
6483
ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc)6484 int ext4_expand_extra_isize(struct inode *inode,
6485 unsigned int new_extra_isize,
6486 struct ext4_iloc *iloc)
6487 {
6488 handle_t *handle;
6489 int no_expand;
6490 int error, rc;
6491
6492 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
6493 brelse(iloc->bh);
6494 return -EOVERFLOW;
6495 }
6496
6497 handle = ext4_journal_start(inode, EXT4_HT_INODE,
6498 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
6499 if (IS_ERR(handle)) {
6500 error = PTR_ERR(handle);
6501 brelse(iloc->bh);
6502 return error;
6503 }
6504
6505 ext4_write_lock_xattr(inode, &no_expand);
6506
6507 BUFFER_TRACE(iloc->bh, "get_write_access");
6508 error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
6509 EXT4_JTR_NONE);
6510 if (error) {
6511 brelse(iloc->bh);
6512 goto out_unlock;
6513 }
6514
6515 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
6516 handle, &no_expand);
6517
6518 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
6519 if (!error)
6520 error = rc;
6521
6522 out_unlock:
6523 ext4_write_unlock_xattr(inode, &no_expand);
6524 ext4_journal_stop(handle);
6525 return error;
6526 }
6527
6528 /*
6529 * What we do here is to mark the in-core inode as clean with respect to inode
6530 * dirtiness (it may still be data-dirty).
6531 * This means that the in-core inode may be reaped by prune_icache
6532 * without having to perform any I/O. This is a very good thing,
6533 * because *any* task may call prune_icache - even ones which
6534 * have a transaction open against a different journal.
6535 *
6536 * Is this cheating? Not really. Sure, we haven't written the
6537 * inode out, but prune_icache isn't a user-visible syncing function.
6538 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
6539 * we start and wait on commits.
6540 */
__ext4_mark_inode_dirty(handle_t * handle,struct inode * inode,const char * func,unsigned int line)6541 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
6542 const char *func, unsigned int line)
6543 {
6544 struct ext4_iloc iloc;
6545 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6546 int err;
6547
6548 might_sleep();
6549 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
6550 err = ext4_reserve_inode_write(handle, inode, &iloc);
6551 if (err)
6552 goto out;
6553
6554 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
6555 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
6556 iloc, handle);
6557
6558 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
6559 out:
6560 if (unlikely(err))
6561 ext4_error_inode_err(inode, func, line, 0, err,
6562 "mark_inode_dirty error");
6563 return err;
6564 }
6565
6566 /*
6567 * ext4_dirty_inode() is called from __mark_inode_dirty()
6568 *
6569 * We're really interested in the case where a file is being extended.
6570 * i_size has been changed by generic_commit_write() and we thus need
6571 * to include the updated inode in the current transaction.
6572 *
6573 * Also, dquot_alloc_block() will always dirty the inode when blocks
6574 * are allocated to the file.
6575 *
6576 * If the inode is marked synchronous, we don't honour that here - doing
6577 * so would cause a commit on atime updates, which we don't bother doing.
6578 * We handle synchronous inodes at the highest possible level.
6579 */
ext4_dirty_inode(struct inode * inode,int flags)6580 void ext4_dirty_inode(struct inode *inode, int flags)
6581 {
6582 handle_t *handle;
6583
6584 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6585 if (IS_ERR(handle))
6586 return;
6587 ext4_mark_inode_dirty(handle, inode);
6588 ext4_journal_stop(handle);
6589 }
6590
ext4_change_inode_journal_flag(struct inode * inode,int val)6591 int ext4_change_inode_journal_flag(struct inode *inode, int val)
6592 {
6593 journal_t *journal;
6594 handle_t *handle;
6595 int err;
6596 int alloc_ctx;
6597
6598 /*
6599 * We have to be very careful here: changing a data block's
6600 * journaling status dynamically is dangerous. If we write a
6601 * data block to the journal, change the status and then delete
6602 * that block, we risk forgetting to revoke the old log record
6603 * from the journal and so a subsequent replay can corrupt data.
6604 * So, first we make sure that the journal is empty and that
6605 * nobody is changing anything.
6606 */
6607
6608 journal = EXT4_JOURNAL(inode);
6609 if (!journal)
6610 return 0;
6611 if (is_journal_aborted(journal))
6612 return -EROFS;
6613
6614 /* Wait for all existing dio workers */
6615 inode_dio_wait(inode);
6616
6617 /*
6618 * Before flushing the journal and switching inode's aops, we have
6619 * to flush all dirty data the inode has. There can be outstanding
6620 * delayed allocations, there can be unwritten extents created by
6621 * fallocate or buffered writes in dioread_nolock mode covered by
6622 * dirty data which can be converted only after flushing the dirty
6623 * data (and journalled aops don't know how to handle these cases).
6624 */
6625 filemap_invalidate_lock(inode->i_mapping);
6626 err = filemap_write_and_wait(inode->i_mapping);
6627 if (err < 0) {
6628 filemap_invalidate_unlock(inode->i_mapping);
6629 return err;
6630 }
6631 /* Before switch the inode journalling mode evict all the page cache. */
6632 truncate_pagecache(inode, 0);
6633
6634 alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6635 jbd2_journal_lock_updates(journal);
6636
6637 /*
6638 * OK, there are no updates running now, and all cached data is
6639 * synced to disk. We are now in a completely consistent state
6640 * which doesn't have anything in the journal, and we know that
6641 * no filesystem updates are running, so it is safe to modify
6642 * the inode's in-core data-journaling state flag now.
6643 */
6644
6645 if (val)
6646 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6647 else {
6648 err = jbd2_journal_flush(journal, 0);
6649 if (err < 0) {
6650 jbd2_journal_unlock_updates(journal);
6651 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6652 filemap_invalidate_unlock(inode->i_mapping);
6653 return err;
6654 }
6655 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6656 }
6657 ext4_set_aops(inode);
6658 ext4_set_inode_mapping_order(inode);
6659
6660 jbd2_journal_unlock_updates(journal);
6661 ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6662 filemap_invalidate_unlock(inode->i_mapping);
6663
6664 /* Finally we can mark the inode as dirty. */
6665
6666 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6667 if (IS_ERR(handle))
6668 return PTR_ERR(handle);
6669
6670 ext4_fc_mark_ineligible(inode->i_sb,
6671 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6672 err = ext4_mark_inode_dirty(handle, inode);
6673 ext4_handle_sync(handle);
6674 ext4_journal_stop(handle);
6675 ext4_std_error(inode->i_sb, err);
6676
6677 return err;
6678 }
6679
ext4_bh_unmapped(handle_t * handle,struct inode * inode,struct buffer_head * bh)6680 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6681 struct buffer_head *bh)
6682 {
6683 return !buffer_mapped(bh);
6684 }
6685
ext4_block_page_mkwrite(struct inode * inode,struct folio * folio,get_block_t get_block)6686 static int ext4_block_page_mkwrite(struct inode *inode, struct folio *folio,
6687 get_block_t get_block)
6688 {
6689 handle_t *handle;
6690 loff_t size;
6691 unsigned long len;
6692 int credits;
6693 int ret;
6694
6695 credits = ext4_chunk_trans_extent(inode,
6696 ext4_journal_blocks_per_folio(inode));
6697 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, credits);
6698 if (IS_ERR(handle))
6699 return PTR_ERR(handle);
6700
6701 folio_lock(folio);
6702 size = i_size_read(inode);
6703 /* Page got truncated from under us? */
6704 if (folio->mapping != inode->i_mapping || folio_pos(folio) > size) {
6705 ret = -EFAULT;
6706 goto out_error;
6707 }
6708
6709 len = folio_size(folio);
6710 if (folio_pos(folio) + len > size)
6711 len = size - folio_pos(folio);
6712
6713 ret = ext4_block_write_begin(handle, folio, 0, len, get_block);
6714 if (ret)
6715 goto out_error;
6716
6717 if (!ext4_should_journal_data(inode)) {
6718 block_commit_write(folio, 0, len);
6719 folio_mark_dirty(folio);
6720 } else {
6721 ret = ext4_journal_folio_buffers(handle, folio, len);
6722 if (ret)
6723 goto out_error;
6724 }
6725 ext4_journal_stop(handle);
6726 folio_wait_stable(folio);
6727 return ret;
6728
6729 out_error:
6730 folio_unlock(folio);
6731 ext4_journal_stop(handle);
6732 return ret;
6733 }
6734
ext4_page_mkwrite(struct vm_fault * vmf)6735 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6736 {
6737 struct vm_area_struct *vma = vmf->vma;
6738 struct folio *folio = page_folio(vmf->page);
6739 loff_t size;
6740 unsigned long len;
6741 int err;
6742 vm_fault_t ret;
6743 struct file *file = vma->vm_file;
6744 struct inode *inode = file_inode(file);
6745 struct address_space *mapping = inode->i_mapping;
6746 get_block_t *get_block = ext4_get_block;
6747 int retries = 0;
6748
6749 if (unlikely(IS_IMMUTABLE(inode)))
6750 return VM_FAULT_SIGBUS;
6751
6752 sb_start_pagefault(inode->i_sb);
6753 file_update_time(vma->vm_file);
6754
6755 filemap_invalidate_lock_shared(mapping);
6756
6757 err = ext4_convert_inline_data(inode);
6758 if (err)
6759 goto out_ret;
6760
6761 /*
6762 * On data journalling we skip straight to the transaction handle:
6763 * there's no delalloc; page truncated will be checked later; the
6764 * early return w/ all buffers mapped (calculates size/len) can't
6765 * be used; and there's no dioread_nolock, so only ext4_get_block.
6766 */
6767 if (ext4_should_journal_data(inode))
6768 goto retry_alloc;
6769
6770 /* Delalloc case is easy... */
6771 if (test_opt(inode->i_sb, DELALLOC) &&
6772 !ext4_nonda_switch(inode->i_sb)) {
6773 do {
6774 err = block_page_mkwrite(vma, vmf,
6775 ext4_da_get_block_prep);
6776 } while (err == -ENOSPC &&
6777 ext4_should_retry_alloc(inode->i_sb, &retries));
6778 goto out_ret;
6779 }
6780
6781 folio_lock(folio);
6782 size = i_size_read(inode);
6783 /* Page got truncated from under us? */
6784 if (folio->mapping != mapping || folio_pos(folio) > size) {
6785 folio_unlock(folio);
6786 ret = VM_FAULT_NOPAGE;
6787 goto out;
6788 }
6789
6790 len = folio_size(folio);
6791 if (folio_pos(folio) + len > size)
6792 len = size - folio_pos(folio);
6793 /*
6794 * Return if we have all the buffers mapped. This avoids the need to do
6795 * journal_start/journal_stop which can block and take a long time
6796 *
6797 * This cannot be done for data journalling, as we have to add the
6798 * inode to the transaction's list to writeprotect pages on commit.
6799 */
6800 if (folio_buffers(folio)) {
6801 if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6802 0, len, NULL,
6803 ext4_bh_unmapped)) {
6804 /* Wait so that we don't change page under IO */
6805 folio_wait_stable(folio);
6806 ret = VM_FAULT_LOCKED;
6807 goto out;
6808 }
6809 }
6810 folio_unlock(folio);
6811 /* OK, we need to fill the hole... */
6812 if (ext4_should_dioread_nolock(inode))
6813 get_block = ext4_get_block_unwritten;
6814 retry_alloc:
6815 /* Start journal and allocate blocks */
6816 err = ext4_block_page_mkwrite(inode, folio, get_block);
6817 if (err == -EAGAIN ||
6818 (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)))
6819 goto retry_alloc;
6820 out_ret:
6821 ret = vmf_fs_error(err);
6822 out:
6823 filemap_invalidate_unlock_shared(mapping);
6824 sb_end_pagefault(inode->i_sb);
6825 return ret;
6826 }
6827