xref: /linux/fs/ext4/inode.c (revision af1f459233d4edeef634f559539e7f4b64cb1d25)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/inode.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/inode.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  64-bit file support on 64-bit platforms by Jakub Jelinek
17  *	(jj@sunsite.ms.mff.cuni.cz)
18  *
19  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20  */
21 
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/time.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/string.h>
30 #include <linux/buffer_head.h>
31 #include <linux/writeback.h>
32 #include <linux/pagevec.h>
33 #include <linux/mpage.h>
34 #include <linux/namei.h>
35 #include <linux/uio.h>
36 #include <linux/bio.h>
37 #include <linux/workqueue.h>
38 #include <linux/kernel.h>
39 #include <linux/printk.h>
40 #include <linux/slab.h>
41 #include <linux/bitops.h>
42 #include <linux/iomap.h>
43 #include <linux/iversion.h>
44 
45 #include "ext4_jbd2.h"
46 #include "xattr.h"
47 #include "acl.h"
48 #include "truncate.h"
49 
50 #include <trace/events/ext4.h>
51 
52 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 			      struct ext4_inode_info *ei)
54 {
55 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56 	__u32 csum;
57 	__u16 dummy_csum = 0;
58 	int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 	unsigned int csum_size = sizeof(dummy_csum);
60 
61 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 	offset += csum_size;
64 	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 			   EXT4_GOOD_OLD_INODE_SIZE - offset);
66 
67 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 		offset = offsetof(struct ext4_inode, i_checksum_hi);
69 		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 				   EXT4_GOOD_OLD_INODE_SIZE,
71 				   offset - EXT4_GOOD_OLD_INODE_SIZE);
72 		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74 					   csum_size);
75 			offset += csum_size;
76 		}
77 		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 				   EXT4_INODE_SIZE(inode->i_sb) - offset);
79 	}
80 
81 	return csum;
82 }
83 
84 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 				  struct ext4_inode_info *ei)
86 {
87 	__u32 provided, calculated;
88 
89 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 	    cpu_to_le32(EXT4_OS_LINUX) ||
91 	    !ext4_has_metadata_csum(inode->i_sb))
92 		return 1;
93 
94 	provided = le16_to_cpu(raw->i_checksum_lo);
95 	calculated = ext4_inode_csum(inode, raw, ei);
96 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99 	else
100 		calculated &= 0xFFFF;
101 
102 	return provided == calculated;
103 }
104 
105 void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 			 struct ext4_inode_info *ei)
107 {
108 	__u32 csum;
109 
110 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 	    cpu_to_le32(EXT4_OS_LINUX) ||
112 	    !ext4_has_metadata_csum(inode->i_sb))
113 		return;
114 
115 	csum = ext4_inode_csum(inode, raw, ei);
116 	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120 }
121 
122 static inline int ext4_begin_ordered_truncate(struct inode *inode,
123 					      loff_t new_size)
124 {
125 	trace_ext4_begin_ordered_truncate(inode, new_size);
126 	/*
127 	 * If jinode is zero, then we never opened the file for
128 	 * writing, so there's no need to call
129 	 * jbd2_journal_begin_ordered_truncate() since there's no
130 	 * outstanding writes we need to flush.
131 	 */
132 	if (!EXT4_I(inode)->jinode)
133 		return 0;
134 	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 						   EXT4_I(inode)->jinode,
136 						   new_size);
137 }
138 
139 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
140 				  int pextents);
141 
142 /*
143  * Test whether an inode is a fast symlink.
144  * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
145  */
146 int ext4_inode_is_fast_symlink(struct inode *inode)
147 {
148 	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
149 		int ea_blocks = EXT4_I(inode)->i_file_acl ?
150 				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
151 
152 		if (ext4_has_inline_data(inode))
153 			return 0;
154 
155 		return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
156 	}
157 	return S_ISLNK(inode->i_mode) && inode->i_size &&
158 	       (inode->i_size < EXT4_N_BLOCKS * 4);
159 }
160 
161 /*
162  * Called at the last iput() if i_nlink is zero.
163  */
164 void ext4_evict_inode(struct inode *inode)
165 {
166 	handle_t *handle;
167 	int err;
168 	/*
169 	 * Credits for final inode cleanup and freeing:
170 	 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
171 	 * (xattr block freeing), bitmap, group descriptor (inode freeing)
172 	 */
173 	int extra_credits = 6;
174 	struct ext4_xattr_inode_array *ea_inode_array = NULL;
175 	bool freeze_protected = false;
176 
177 	trace_ext4_evict_inode(inode);
178 
179 	if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
180 		ext4_evict_ea_inode(inode);
181 	if (inode->i_nlink) {
182 		truncate_inode_pages_final(&inode->i_data);
183 
184 		goto no_delete;
185 	}
186 
187 	if (is_bad_inode(inode))
188 		goto no_delete;
189 	dquot_initialize(inode);
190 
191 	if (ext4_should_order_data(inode))
192 		ext4_begin_ordered_truncate(inode, 0);
193 	truncate_inode_pages_final(&inode->i_data);
194 
195 	/*
196 	 * For inodes with journalled data, transaction commit could have
197 	 * dirtied the inode. And for inodes with dioread_nolock, unwritten
198 	 * extents converting worker could merge extents and also have dirtied
199 	 * the inode. Flush worker is ignoring it because of I_FREEING flag but
200 	 * we still need to remove the inode from the writeback lists.
201 	 */
202 	if (!list_empty_careful(&inode->i_io_list))
203 		inode_io_list_del(inode);
204 
205 	/*
206 	 * Protect us against freezing - iput() caller didn't have to have any
207 	 * protection against it. When we are in a running transaction though,
208 	 * we are already protected against freezing and we cannot grab further
209 	 * protection due to lock ordering constraints.
210 	 */
211 	if (!ext4_journal_current_handle()) {
212 		sb_start_intwrite(inode->i_sb);
213 		freeze_protected = true;
214 	}
215 
216 	if (!IS_NOQUOTA(inode))
217 		extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
218 
219 	/*
220 	 * Block bitmap, group descriptor, and inode are accounted in both
221 	 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
222 	 */
223 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
224 			 ext4_blocks_for_truncate(inode) + extra_credits - 3);
225 	if (IS_ERR(handle)) {
226 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
227 		/*
228 		 * If we're going to skip the normal cleanup, we still need to
229 		 * make sure that the in-core orphan linked list is properly
230 		 * cleaned up.
231 		 */
232 		ext4_orphan_del(NULL, inode);
233 		if (freeze_protected)
234 			sb_end_intwrite(inode->i_sb);
235 		goto no_delete;
236 	}
237 
238 	if (IS_SYNC(inode))
239 		ext4_handle_sync(handle);
240 
241 	/*
242 	 * Set inode->i_size to 0 before calling ext4_truncate(). We need
243 	 * special handling of symlinks here because i_size is used to
244 	 * determine whether ext4_inode_info->i_data contains symlink data or
245 	 * block mappings. Setting i_size to 0 will remove its fast symlink
246 	 * status. Erase i_data so that it becomes a valid empty block map.
247 	 */
248 	if (ext4_inode_is_fast_symlink(inode))
249 		memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
250 	inode->i_size = 0;
251 	err = ext4_mark_inode_dirty(handle, inode);
252 	if (err) {
253 		ext4_warning(inode->i_sb,
254 			     "couldn't mark inode dirty (err %d)", err);
255 		goto stop_handle;
256 	}
257 	if (inode->i_blocks) {
258 		err = ext4_truncate(inode);
259 		if (err) {
260 			ext4_error_err(inode->i_sb, -err,
261 				       "couldn't truncate inode %lu (err %d)",
262 				       inode->i_ino, err);
263 			goto stop_handle;
264 		}
265 	}
266 
267 	/* Remove xattr references. */
268 	err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
269 				      extra_credits);
270 	if (err) {
271 		ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
272 stop_handle:
273 		ext4_journal_stop(handle);
274 		ext4_orphan_del(NULL, inode);
275 		if (freeze_protected)
276 			sb_end_intwrite(inode->i_sb);
277 		ext4_xattr_inode_array_free(ea_inode_array);
278 		goto no_delete;
279 	}
280 
281 	/*
282 	 * Kill off the orphan record which ext4_truncate created.
283 	 * AKPM: I think this can be inside the above `if'.
284 	 * Note that ext4_orphan_del() has to be able to cope with the
285 	 * deletion of a non-existent orphan - this is because we don't
286 	 * know if ext4_truncate() actually created an orphan record.
287 	 * (Well, we could do this if we need to, but heck - it works)
288 	 */
289 	ext4_orphan_del(handle, inode);
290 	EXT4_I(inode)->i_dtime	= (__u32)ktime_get_real_seconds();
291 
292 	/*
293 	 * One subtle ordering requirement: if anything has gone wrong
294 	 * (transaction abort, IO errors, whatever), then we can still
295 	 * do these next steps (the fs will already have been marked as
296 	 * having errors), but we can't free the inode if the mark_dirty
297 	 * fails.
298 	 */
299 	if (ext4_mark_inode_dirty(handle, inode))
300 		/* If that failed, just do the required in-core inode clear. */
301 		ext4_clear_inode(inode);
302 	else
303 		ext4_free_inode(handle, inode);
304 	ext4_journal_stop(handle);
305 	if (freeze_protected)
306 		sb_end_intwrite(inode->i_sb);
307 	ext4_xattr_inode_array_free(ea_inode_array);
308 	return;
309 no_delete:
310 	/*
311 	 * Check out some where else accidentally dirty the evicting inode,
312 	 * which may probably cause inode use-after-free issues later.
313 	 */
314 	WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list));
315 
316 	if (!list_empty(&EXT4_I(inode)->i_fc_list))
317 		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
318 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
319 }
320 
321 #ifdef CONFIG_QUOTA
322 qsize_t *ext4_get_reserved_space(struct inode *inode)
323 {
324 	return &EXT4_I(inode)->i_reserved_quota;
325 }
326 #endif
327 
328 /*
329  * Called with i_data_sem down, which is important since we can call
330  * ext4_discard_preallocations() from here.
331  */
332 void ext4_da_update_reserve_space(struct inode *inode,
333 					int used, int quota_claim)
334 {
335 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 	struct ext4_inode_info *ei = EXT4_I(inode);
337 
338 	spin_lock(&ei->i_block_reservation_lock);
339 	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340 	if (unlikely(used > ei->i_reserved_data_blocks)) {
341 		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
342 			 "with only %d reserved data blocks",
343 			 __func__, inode->i_ino, used,
344 			 ei->i_reserved_data_blocks);
345 		WARN_ON(1);
346 		used = ei->i_reserved_data_blocks;
347 	}
348 
349 	/* Update per-inode reservations */
350 	ei->i_reserved_data_blocks -= used;
351 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
352 
353 	spin_unlock(&ei->i_block_reservation_lock);
354 
355 	/* Update quota subsystem for data blocks */
356 	if (quota_claim)
357 		dquot_claim_block(inode, EXT4_C2B(sbi, used));
358 	else {
359 		/*
360 		 * We did fallocate with an offset that is already delayed
361 		 * allocated. So on delayed allocated writeback we should
362 		 * not re-claim the quota for fallocated blocks.
363 		 */
364 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
365 	}
366 
367 	/*
368 	 * If we have done all the pending block allocations and if
369 	 * there aren't any writers on the inode, we can discard the
370 	 * inode's preallocations.
371 	 */
372 	if ((ei->i_reserved_data_blocks == 0) &&
373 	    !inode_is_open_for_write(inode))
374 		ext4_discard_preallocations(inode, 0);
375 }
376 
377 static int __check_block_validity(struct inode *inode, const char *func,
378 				unsigned int line,
379 				struct ext4_map_blocks *map)
380 {
381 	if (ext4_has_feature_journal(inode->i_sb) &&
382 	    (inode->i_ino ==
383 	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
384 		return 0;
385 	if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 		ext4_error_inode(inode, func, line, map->m_pblk,
387 				 "lblock %lu mapped to illegal pblock %llu "
388 				 "(length %d)", (unsigned long) map->m_lblk,
389 				 map->m_pblk, map->m_len);
390 		return -EFSCORRUPTED;
391 	}
392 	return 0;
393 }
394 
395 int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
396 		       ext4_lblk_t len)
397 {
398 	int ret;
399 
400 	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
401 		return fscrypt_zeroout_range(inode, lblk, pblk, len);
402 
403 	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
404 	if (ret > 0)
405 		ret = 0;
406 
407 	return ret;
408 }
409 
410 #define check_block_validity(inode, map)	\
411 	__check_block_validity((inode), __func__, __LINE__, (map))
412 
413 #ifdef ES_AGGRESSIVE_TEST
414 static void ext4_map_blocks_es_recheck(handle_t *handle,
415 				       struct inode *inode,
416 				       struct ext4_map_blocks *es_map,
417 				       struct ext4_map_blocks *map,
418 				       int flags)
419 {
420 	int retval;
421 
422 	map->m_flags = 0;
423 	/*
424 	 * There is a race window that the result is not the same.
425 	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
426 	 * is that we lookup a block mapping in extent status tree with
427 	 * out taking i_data_sem.  So at the time the unwritten extent
428 	 * could be converted.
429 	 */
430 	down_read(&EXT4_I(inode)->i_data_sem);
431 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
432 		retval = ext4_ext_map_blocks(handle, inode, map, 0);
433 	} else {
434 		retval = ext4_ind_map_blocks(handle, inode, map, 0);
435 	}
436 	up_read((&EXT4_I(inode)->i_data_sem));
437 
438 	/*
439 	 * We don't check m_len because extent will be collpased in status
440 	 * tree.  So the m_len might not equal.
441 	 */
442 	if (es_map->m_lblk != map->m_lblk ||
443 	    es_map->m_flags != map->m_flags ||
444 	    es_map->m_pblk != map->m_pblk) {
445 		printk("ES cache assertion failed for inode: %lu "
446 		       "es_cached ex [%d/%d/%llu/%x] != "
447 		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
448 		       inode->i_ino, es_map->m_lblk, es_map->m_len,
449 		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 		       map->m_len, map->m_pblk, map->m_flags,
451 		       retval, flags);
452 	}
453 }
454 #endif /* ES_AGGRESSIVE_TEST */
455 
456 /*
457  * The ext4_map_blocks() function tries to look up the requested blocks,
458  * and returns if the blocks are already mapped.
459  *
460  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
461  * and store the allocated blocks in the result buffer head and mark it
462  * mapped.
463  *
464  * If file type is extents based, it will call ext4_ext_map_blocks(),
465  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
466  * based files
467  *
468  * On success, it returns the number of blocks being mapped or allocated.  if
469  * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
470  * is marked as unwritten. If the create == 1, it will mark @map as mapped.
471  *
472  * It returns 0 if plain look up failed (blocks have not been allocated), in
473  * that case, @map is returned as unmapped but we still do fill map->m_len to
474  * indicate the length of a hole starting at map->m_lblk.
475  *
476  * It returns the error in case of allocation failure.
477  */
478 int ext4_map_blocks(handle_t *handle, struct inode *inode,
479 		    struct ext4_map_blocks *map, int flags)
480 {
481 	struct extent_status es;
482 	int retval;
483 	int ret = 0;
484 #ifdef ES_AGGRESSIVE_TEST
485 	struct ext4_map_blocks orig_map;
486 
487 	memcpy(&orig_map, map, sizeof(*map));
488 #endif
489 
490 	map->m_flags = 0;
491 	ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
492 		  flags, map->m_len, (unsigned long) map->m_lblk);
493 
494 	/*
495 	 * ext4_map_blocks returns an int, and m_len is an unsigned int
496 	 */
497 	if (unlikely(map->m_len > INT_MAX))
498 		map->m_len = INT_MAX;
499 
500 	/* We can handle the block number less than EXT_MAX_BLOCKS */
501 	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
502 		return -EFSCORRUPTED;
503 
504 	/* Lookup extent status tree firstly */
505 	if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
506 	    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
507 		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
508 			map->m_pblk = ext4_es_pblock(&es) +
509 					map->m_lblk - es.es_lblk;
510 			map->m_flags |= ext4_es_is_written(&es) ?
511 					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
512 			retval = es.es_len - (map->m_lblk - es.es_lblk);
513 			if (retval > map->m_len)
514 				retval = map->m_len;
515 			map->m_len = retval;
516 		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
517 			map->m_pblk = 0;
518 			retval = es.es_len - (map->m_lblk - es.es_lblk);
519 			if (retval > map->m_len)
520 				retval = map->m_len;
521 			map->m_len = retval;
522 			retval = 0;
523 		} else {
524 			BUG();
525 		}
526 
527 		if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
528 			return retval;
529 #ifdef ES_AGGRESSIVE_TEST
530 		ext4_map_blocks_es_recheck(handle, inode, map,
531 					   &orig_map, flags);
532 #endif
533 		goto found;
534 	}
535 	/*
536 	 * In the query cache no-wait mode, nothing we can do more if we
537 	 * cannot find extent in the cache.
538 	 */
539 	if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT)
540 		return 0;
541 
542 	/*
543 	 * Try to see if we can get the block without requesting a new
544 	 * file system block.
545 	 */
546 	down_read(&EXT4_I(inode)->i_data_sem);
547 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
548 		retval = ext4_ext_map_blocks(handle, inode, map, 0);
549 	} else {
550 		retval = ext4_ind_map_blocks(handle, inode, map, 0);
551 	}
552 	if (retval > 0) {
553 		unsigned int status;
554 
555 		if (unlikely(retval != map->m_len)) {
556 			ext4_warning(inode->i_sb,
557 				     "ES len assertion failed for inode "
558 				     "%lu: retval %d != map->m_len %d",
559 				     inode->i_ino, retval, map->m_len);
560 			WARN_ON(1);
561 		}
562 
563 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
564 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
565 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
566 		    !(status & EXTENT_STATUS_WRITTEN) &&
567 		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
568 				       map->m_lblk + map->m_len - 1))
569 			status |= EXTENT_STATUS_DELAYED;
570 		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
571 				      map->m_pblk, status);
572 	}
573 	up_read((&EXT4_I(inode)->i_data_sem));
574 
575 found:
576 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577 		ret = check_block_validity(inode, map);
578 		if (ret != 0)
579 			return ret;
580 	}
581 
582 	/* If it is only a block(s) look up */
583 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
584 		return retval;
585 
586 	/*
587 	 * Returns if the blocks have already allocated
588 	 *
589 	 * Note that if blocks have been preallocated
590 	 * ext4_ext_get_block() returns the create = 0
591 	 * with buffer head unmapped.
592 	 */
593 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
594 		/*
595 		 * If we need to convert extent to unwritten
596 		 * we continue and do the actual work in
597 		 * ext4_ext_map_blocks()
598 		 */
599 		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
600 			return retval;
601 
602 	/*
603 	 * Here we clear m_flags because after allocating an new extent,
604 	 * it will be set again.
605 	 */
606 	map->m_flags &= ~EXT4_MAP_FLAGS;
607 
608 	/*
609 	 * New blocks allocate and/or writing to unwritten extent
610 	 * will possibly result in updating i_data, so we take
611 	 * the write lock of i_data_sem, and call get_block()
612 	 * with create == 1 flag.
613 	 */
614 	down_write(&EXT4_I(inode)->i_data_sem);
615 
616 	/*
617 	 * We need to check for EXT4 here because migrate
618 	 * could have changed the inode type in between
619 	 */
620 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
621 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
622 	} else {
623 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
624 
625 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
626 			/*
627 			 * We allocated new blocks which will result in
628 			 * i_data's format changing.  Force the migrate
629 			 * to fail by clearing migrate flags
630 			 */
631 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
632 		}
633 	}
634 
635 	if (retval > 0) {
636 		unsigned int status;
637 
638 		if (unlikely(retval != map->m_len)) {
639 			ext4_warning(inode->i_sb,
640 				     "ES len assertion failed for inode "
641 				     "%lu: retval %d != map->m_len %d",
642 				     inode->i_ino, retval, map->m_len);
643 			WARN_ON(1);
644 		}
645 
646 		/*
647 		 * We have to zeroout blocks before inserting them into extent
648 		 * status tree. Otherwise someone could look them up there and
649 		 * use them before they are really zeroed. We also have to
650 		 * unmap metadata before zeroing as otherwise writeback can
651 		 * overwrite zeros with stale data from block device.
652 		 */
653 		if (flags & EXT4_GET_BLOCKS_ZERO &&
654 		    map->m_flags & EXT4_MAP_MAPPED &&
655 		    map->m_flags & EXT4_MAP_NEW) {
656 			ret = ext4_issue_zeroout(inode, map->m_lblk,
657 						 map->m_pblk, map->m_len);
658 			if (ret) {
659 				retval = ret;
660 				goto out_sem;
661 			}
662 		}
663 
664 		/*
665 		 * If the extent has been zeroed out, we don't need to update
666 		 * extent status tree.
667 		 */
668 		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
669 		    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
670 			if (ext4_es_is_written(&es))
671 				goto out_sem;
672 		}
673 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
674 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
675 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
676 		    !(status & EXTENT_STATUS_WRITTEN) &&
677 		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
678 				       map->m_lblk + map->m_len - 1))
679 			status |= EXTENT_STATUS_DELAYED;
680 		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
681 				      map->m_pblk, status);
682 	}
683 
684 out_sem:
685 	up_write((&EXT4_I(inode)->i_data_sem));
686 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
687 		ret = check_block_validity(inode, map);
688 		if (ret != 0)
689 			return ret;
690 
691 		/*
692 		 * Inodes with freshly allocated blocks where contents will be
693 		 * visible after transaction commit must be on transaction's
694 		 * ordered data list.
695 		 */
696 		if (map->m_flags & EXT4_MAP_NEW &&
697 		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
698 		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
699 		    !ext4_is_quota_file(inode) &&
700 		    ext4_should_order_data(inode)) {
701 			loff_t start_byte =
702 				(loff_t)map->m_lblk << inode->i_blkbits;
703 			loff_t length = (loff_t)map->m_len << inode->i_blkbits;
704 
705 			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
706 				ret = ext4_jbd2_inode_add_wait(handle, inode,
707 						start_byte, length);
708 			else
709 				ret = ext4_jbd2_inode_add_write(handle, inode,
710 						start_byte, length);
711 			if (ret)
712 				return ret;
713 		}
714 	}
715 	if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
716 				map->m_flags & EXT4_MAP_MAPPED))
717 		ext4_fc_track_range(handle, inode, map->m_lblk,
718 					map->m_lblk + map->m_len - 1);
719 	if (retval < 0)
720 		ext_debug(inode, "failed with err %d\n", retval);
721 	return retval;
722 }
723 
724 /*
725  * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
726  * we have to be careful as someone else may be manipulating b_state as well.
727  */
728 static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
729 {
730 	unsigned long old_state;
731 	unsigned long new_state;
732 
733 	flags &= EXT4_MAP_FLAGS;
734 
735 	/* Dummy buffer_head? Set non-atomically. */
736 	if (!bh->b_page) {
737 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
738 		return;
739 	}
740 	/*
741 	 * Someone else may be modifying b_state. Be careful! This is ugly but
742 	 * once we get rid of using bh as a container for mapping information
743 	 * to pass to / from get_block functions, this can go away.
744 	 */
745 	old_state = READ_ONCE(bh->b_state);
746 	do {
747 		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
748 	} while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
749 }
750 
751 static int _ext4_get_block(struct inode *inode, sector_t iblock,
752 			   struct buffer_head *bh, int flags)
753 {
754 	struct ext4_map_blocks map;
755 	int ret = 0;
756 
757 	if (ext4_has_inline_data(inode))
758 		return -ERANGE;
759 
760 	map.m_lblk = iblock;
761 	map.m_len = bh->b_size >> inode->i_blkbits;
762 
763 	ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
764 			      flags);
765 	if (ret > 0) {
766 		map_bh(bh, inode->i_sb, map.m_pblk);
767 		ext4_update_bh_state(bh, map.m_flags);
768 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
769 		ret = 0;
770 	} else if (ret == 0) {
771 		/* hole case, need to fill in bh->b_size */
772 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
773 	}
774 	return ret;
775 }
776 
777 int ext4_get_block(struct inode *inode, sector_t iblock,
778 		   struct buffer_head *bh, int create)
779 {
780 	return _ext4_get_block(inode, iblock, bh,
781 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
782 }
783 
784 /*
785  * Get block function used when preparing for buffered write if we require
786  * creating an unwritten extent if blocks haven't been allocated.  The extent
787  * will be converted to written after the IO is complete.
788  */
789 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
790 			     struct buffer_head *bh_result, int create)
791 {
792 	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
793 		   inode->i_ino, create);
794 	return _ext4_get_block(inode, iblock, bh_result,
795 			       EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
796 }
797 
798 /* Maximum number of blocks we map for direct IO at once. */
799 #define DIO_MAX_BLOCKS 4096
800 
801 /*
802  * `handle' can be NULL if create is zero
803  */
804 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
805 				ext4_lblk_t block, int map_flags)
806 {
807 	struct ext4_map_blocks map;
808 	struct buffer_head *bh;
809 	int create = map_flags & EXT4_GET_BLOCKS_CREATE;
810 	bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT;
811 	int err;
812 
813 	ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
814 		    || handle != NULL || create == 0);
815 	ASSERT(create == 0 || !nowait);
816 
817 	map.m_lblk = block;
818 	map.m_len = 1;
819 	err = ext4_map_blocks(handle, inode, &map, map_flags);
820 
821 	if (err == 0)
822 		return create ? ERR_PTR(-ENOSPC) : NULL;
823 	if (err < 0)
824 		return ERR_PTR(err);
825 
826 	if (nowait)
827 		return sb_find_get_block(inode->i_sb, map.m_pblk);
828 
829 	bh = sb_getblk(inode->i_sb, map.m_pblk);
830 	if (unlikely(!bh))
831 		return ERR_PTR(-ENOMEM);
832 	if (map.m_flags & EXT4_MAP_NEW) {
833 		ASSERT(create != 0);
834 		ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
835 			    || (handle != NULL));
836 
837 		/*
838 		 * Now that we do not always journal data, we should
839 		 * keep in mind whether this should always journal the
840 		 * new buffer as metadata.  For now, regular file
841 		 * writes use ext4_get_block instead, so it's not a
842 		 * problem.
843 		 */
844 		lock_buffer(bh);
845 		BUFFER_TRACE(bh, "call get_create_access");
846 		err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
847 						     EXT4_JTR_NONE);
848 		if (unlikely(err)) {
849 			unlock_buffer(bh);
850 			goto errout;
851 		}
852 		if (!buffer_uptodate(bh)) {
853 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
854 			set_buffer_uptodate(bh);
855 		}
856 		unlock_buffer(bh);
857 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
858 		err = ext4_handle_dirty_metadata(handle, inode, bh);
859 		if (unlikely(err))
860 			goto errout;
861 	} else
862 		BUFFER_TRACE(bh, "not a new buffer");
863 	return bh;
864 errout:
865 	brelse(bh);
866 	return ERR_PTR(err);
867 }
868 
869 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
870 			       ext4_lblk_t block, int map_flags)
871 {
872 	struct buffer_head *bh;
873 	int ret;
874 
875 	bh = ext4_getblk(handle, inode, block, map_flags);
876 	if (IS_ERR(bh))
877 		return bh;
878 	if (!bh || ext4_buffer_uptodate(bh))
879 		return bh;
880 
881 	ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
882 	if (ret) {
883 		put_bh(bh);
884 		return ERR_PTR(ret);
885 	}
886 	return bh;
887 }
888 
889 /* Read a contiguous batch of blocks. */
890 int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
891 		     bool wait, struct buffer_head **bhs)
892 {
893 	int i, err;
894 
895 	for (i = 0; i < bh_count; i++) {
896 		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
897 		if (IS_ERR(bhs[i])) {
898 			err = PTR_ERR(bhs[i]);
899 			bh_count = i;
900 			goto out_brelse;
901 		}
902 	}
903 
904 	for (i = 0; i < bh_count; i++)
905 		/* Note that NULL bhs[i] is valid because of holes. */
906 		if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
907 			ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
908 
909 	if (!wait)
910 		return 0;
911 
912 	for (i = 0; i < bh_count; i++)
913 		if (bhs[i])
914 			wait_on_buffer(bhs[i]);
915 
916 	for (i = 0; i < bh_count; i++) {
917 		if (bhs[i] && !buffer_uptodate(bhs[i])) {
918 			err = -EIO;
919 			goto out_brelse;
920 		}
921 	}
922 	return 0;
923 
924 out_brelse:
925 	for (i = 0; i < bh_count; i++) {
926 		brelse(bhs[i]);
927 		bhs[i] = NULL;
928 	}
929 	return err;
930 }
931 
932 int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
933 			   struct buffer_head *head,
934 			   unsigned from,
935 			   unsigned to,
936 			   int *partial,
937 			   int (*fn)(handle_t *handle, struct inode *inode,
938 				     struct buffer_head *bh))
939 {
940 	struct buffer_head *bh;
941 	unsigned block_start, block_end;
942 	unsigned blocksize = head->b_size;
943 	int err, ret = 0;
944 	struct buffer_head *next;
945 
946 	for (bh = head, block_start = 0;
947 	     ret == 0 && (bh != head || !block_start);
948 	     block_start = block_end, bh = next) {
949 		next = bh->b_this_page;
950 		block_end = block_start + blocksize;
951 		if (block_end <= from || block_start >= to) {
952 			if (partial && !buffer_uptodate(bh))
953 				*partial = 1;
954 			continue;
955 		}
956 		err = (*fn)(handle, inode, bh);
957 		if (!ret)
958 			ret = err;
959 	}
960 	return ret;
961 }
962 
963 /*
964  * Helper for handling dirtying of journalled data. We also mark the folio as
965  * dirty so that writeback code knows about this page (and inode) contains
966  * dirty data. ext4_writepages() then commits appropriate transaction to
967  * make data stable.
968  */
969 static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
970 {
971 	folio_mark_dirty(bh->b_folio);
972 	return ext4_handle_dirty_metadata(handle, NULL, bh);
973 }
974 
975 int do_journal_get_write_access(handle_t *handle, struct inode *inode,
976 				struct buffer_head *bh)
977 {
978 	int dirty = buffer_dirty(bh);
979 	int ret;
980 
981 	if (!buffer_mapped(bh) || buffer_freed(bh))
982 		return 0;
983 	/*
984 	 * __block_write_begin() could have dirtied some buffers. Clean
985 	 * the dirty bit as jbd2_journal_get_write_access() could complain
986 	 * otherwise about fs integrity issues. Setting of the dirty bit
987 	 * by __block_write_begin() isn't a real problem here as we clear
988 	 * the bit before releasing a page lock and thus writeback cannot
989 	 * ever write the buffer.
990 	 */
991 	if (dirty)
992 		clear_buffer_dirty(bh);
993 	BUFFER_TRACE(bh, "get write access");
994 	ret = ext4_journal_get_write_access(handle, inode->i_sb, bh,
995 					    EXT4_JTR_NONE);
996 	if (!ret && dirty)
997 		ret = ext4_dirty_journalled_data(handle, bh);
998 	return ret;
999 }
1000 
1001 #ifdef CONFIG_FS_ENCRYPTION
1002 static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
1003 				  get_block_t *get_block)
1004 {
1005 	unsigned from = pos & (PAGE_SIZE - 1);
1006 	unsigned to = from + len;
1007 	struct inode *inode = folio->mapping->host;
1008 	unsigned block_start, block_end;
1009 	sector_t block;
1010 	int err = 0;
1011 	unsigned blocksize = inode->i_sb->s_blocksize;
1012 	unsigned bbits;
1013 	struct buffer_head *bh, *head, *wait[2];
1014 	int nr_wait = 0;
1015 	int i;
1016 
1017 	BUG_ON(!folio_test_locked(folio));
1018 	BUG_ON(from > PAGE_SIZE);
1019 	BUG_ON(to > PAGE_SIZE);
1020 	BUG_ON(from > to);
1021 
1022 	head = folio_buffers(folio);
1023 	if (!head) {
1024 		create_empty_buffers(&folio->page, blocksize, 0);
1025 		head = folio_buffers(folio);
1026 	}
1027 	bbits = ilog2(blocksize);
1028 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1029 
1030 	for (bh = head, block_start = 0; bh != head || !block_start;
1031 	    block++, block_start = block_end, bh = bh->b_this_page) {
1032 		block_end = block_start + blocksize;
1033 		if (block_end <= from || block_start >= to) {
1034 			if (folio_test_uptodate(folio)) {
1035 				set_buffer_uptodate(bh);
1036 			}
1037 			continue;
1038 		}
1039 		if (buffer_new(bh))
1040 			clear_buffer_new(bh);
1041 		if (!buffer_mapped(bh)) {
1042 			WARN_ON(bh->b_size != blocksize);
1043 			err = get_block(inode, block, bh, 1);
1044 			if (err)
1045 				break;
1046 			if (buffer_new(bh)) {
1047 				if (folio_test_uptodate(folio)) {
1048 					clear_buffer_new(bh);
1049 					set_buffer_uptodate(bh);
1050 					mark_buffer_dirty(bh);
1051 					continue;
1052 				}
1053 				if (block_end > to || block_start < from)
1054 					folio_zero_segments(folio, to,
1055 							    block_end,
1056 							    block_start, from);
1057 				continue;
1058 			}
1059 		}
1060 		if (folio_test_uptodate(folio)) {
1061 			set_buffer_uptodate(bh);
1062 			continue;
1063 		}
1064 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1065 		    !buffer_unwritten(bh) &&
1066 		    (block_start < from || block_end > to)) {
1067 			ext4_read_bh_lock(bh, 0, false);
1068 			wait[nr_wait++] = bh;
1069 		}
1070 	}
1071 	/*
1072 	 * If we issued read requests, let them complete.
1073 	 */
1074 	for (i = 0; i < nr_wait; i++) {
1075 		wait_on_buffer(wait[i]);
1076 		if (!buffer_uptodate(wait[i]))
1077 			err = -EIO;
1078 	}
1079 	if (unlikely(err)) {
1080 		folio_zero_new_buffers(folio, from, to);
1081 	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1082 		for (i = 0; i < nr_wait; i++) {
1083 			int err2;
1084 
1085 			err2 = fscrypt_decrypt_pagecache_blocks(folio,
1086 						blocksize, bh_offset(wait[i]));
1087 			if (err2) {
1088 				clear_buffer_uptodate(wait[i]);
1089 				err = err2;
1090 			}
1091 		}
1092 	}
1093 
1094 	return err;
1095 }
1096 #endif
1097 
1098 /*
1099  * To preserve ordering, it is essential that the hole instantiation and
1100  * the data write be encapsulated in a single transaction.  We cannot
1101  * close off a transaction and start a new one between the ext4_get_block()
1102  * and the ext4_write_end().  So doing the jbd2_journal_start at the start of
1103  * ext4_write_begin() is the right place.
1104  */
1105 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1106 			    loff_t pos, unsigned len,
1107 			    struct page **pagep, void **fsdata)
1108 {
1109 	struct inode *inode = mapping->host;
1110 	int ret, needed_blocks;
1111 	handle_t *handle;
1112 	int retries = 0;
1113 	struct folio *folio;
1114 	pgoff_t index;
1115 	unsigned from, to;
1116 
1117 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
1118 		return -EIO;
1119 
1120 	trace_ext4_write_begin(inode, pos, len);
1121 	/*
1122 	 * Reserve one block more for addition to orphan list in case
1123 	 * we allocate blocks but write fails for some reason
1124 	 */
1125 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1126 	index = pos >> PAGE_SHIFT;
1127 	from = pos & (PAGE_SIZE - 1);
1128 	to = from + len;
1129 
1130 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1131 		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1132 						    pagep);
1133 		if (ret < 0)
1134 			return ret;
1135 		if (ret == 1)
1136 			return 0;
1137 	}
1138 
1139 	/*
1140 	 * __filemap_get_folio() can take a long time if the
1141 	 * system is thrashing due to memory pressure, or if the folio
1142 	 * is being written back.  So grab it first before we start
1143 	 * the transaction handle.  This also allows us to allocate
1144 	 * the folio (if needed) without using GFP_NOFS.
1145 	 */
1146 retry_grab:
1147 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1148 					mapping_gfp_mask(mapping));
1149 	if (IS_ERR(folio))
1150 		return PTR_ERR(folio);
1151 	/*
1152 	 * The same as page allocation, we prealloc buffer heads before
1153 	 * starting the handle.
1154 	 */
1155 	if (!folio_buffers(folio))
1156 		create_empty_buffers(&folio->page, inode->i_sb->s_blocksize, 0);
1157 
1158 	folio_unlock(folio);
1159 
1160 retry_journal:
1161 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1162 	if (IS_ERR(handle)) {
1163 		folio_put(folio);
1164 		return PTR_ERR(handle);
1165 	}
1166 
1167 	folio_lock(folio);
1168 	if (folio->mapping != mapping) {
1169 		/* The folio got truncated from under us */
1170 		folio_unlock(folio);
1171 		folio_put(folio);
1172 		ext4_journal_stop(handle);
1173 		goto retry_grab;
1174 	}
1175 	/* In case writeback began while the folio was unlocked */
1176 	folio_wait_stable(folio);
1177 
1178 #ifdef CONFIG_FS_ENCRYPTION
1179 	if (ext4_should_dioread_nolock(inode))
1180 		ret = ext4_block_write_begin(folio, pos, len,
1181 					     ext4_get_block_unwritten);
1182 	else
1183 		ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
1184 #else
1185 	if (ext4_should_dioread_nolock(inode))
1186 		ret = __block_write_begin(&folio->page, pos, len,
1187 					  ext4_get_block_unwritten);
1188 	else
1189 		ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
1190 #endif
1191 	if (!ret && ext4_should_journal_data(inode)) {
1192 		ret = ext4_walk_page_buffers(handle, inode,
1193 					     folio_buffers(folio), from, to,
1194 					     NULL, do_journal_get_write_access);
1195 	}
1196 
1197 	if (ret) {
1198 		bool extended = (pos + len > inode->i_size) &&
1199 				!ext4_verity_in_progress(inode);
1200 
1201 		folio_unlock(folio);
1202 		/*
1203 		 * __block_write_begin may have instantiated a few blocks
1204 		 * outside i_size.  Trim these off again. Don't need
1205 		 * i_size_read because we hold i_rwsem.
1206 		 *
1207 		 * Add inode to orphan list in case we crash before
1208 		 * truncate finishes
1209 		 */
1210 		if (extended && ext4_can_truncate(inode))
1211 			ext4_orphan_add(handle, inode);
1212 
1213 		ext4_journal_stop(handle);
1214 		if (extended) {
1215 			ext4_truncate_failed_write(inode);
1216 			/*
1217 			 * If truncate failed early the inode might
1218 			 * still be on the orphan list; we need to
1219 			 * make sure the inode is removed from the
1220 			 * orphan list in that case.
1221 			 */
1222 			if (inode->i_nlink)
1223 				ext4_orphan_del(NULL, inode);
1224 		}
1225 
1226 		if (ret == -ENOSPC &&
1227 		    ext4_should_retry_alloc(inode->i_sb, &retries))
1228 			goto retry_journal;
1229 		folio_put(folio);
1230 		return ret;
1231 	}
1232 	*pagep = &folio->page;
1233 	return ret;
1234 }
1235 
1236 /* For write_end() in data=journal mode */
1237 static int write_end_fn(handle_t *handle, struct inode *inode,
1238 			struct buffer_head *bh)
1239 {
1240 	int ret;
1241 	if (!buffer_mapped(bh) || buffer_freed(bh))
1242 		return 0;
1243 	set_buffer_uptodate(bh);
1244 	ret = ext4_dirty_journalled_data(handle, bh);
1245 	clear_buffer_meta(bh);
1246 	clear_buffer_prio(bh);
1247 	return ret;
1248 }
1249 
1250 /*
1251  * We need to pick up the new inode size which generic_commit_write gave us
1252  * `file' can be NULL - eg, when called from page_symlink().
1253  *
1254  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1255  * buffers are managed internally.
1256  */
1257 static int ext4_write_end(struct file *file,
1258 			  struct address_space *mapping,
1259 			  loff_t pos, unsigned len, unsigned copied,
1260 			  struct page *page, void *fsdata)
1261 {
1262 	struct folio *folio = page_folio(page);
1263 	handle_t *handle = ext4_journal_current_handle();
1264 	struct inode *inode = mapping->host;
1265 	loff_t old_size = inode->i_size;
1266 	int ret = 0, ret2;
1267 	int i_size_changed = 0;
1268 	bool verity = ext4_verity_in_progress(inode);
1269 
1270 	trace_ext4_write_end(inode, pos, len, copied);
1271 
1272 	if (ext4_has_inline_data(inode) &&
1273 	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
1274 		return ext4_write_inline_data_end(inode, pos, len, copied,
1275 						  folio);
1276 
1277 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1278 	/*
1279 	 * it's important to update i_size while still holding folio lock:
1280 	 * page writeout could otherwise come in and zero beyond i_size.
1281 	 *
1282 	 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1283 	 * blocks are being written past EOF, so skip the i_size update.
1284 	 */
1285 	if (!verity)
1286 		i_size_changed = ext4_update_inode_size(inode, pos + copied);
1287 	folio_unlock(folio);
1288 	folio_put(folio);
1289 
1290 	if (old_size < pos && !verity)
1291 		pagecache_isize_extended(inode, old_size, pos);
1292 	/*
1293 	 * Don't mark the inode dirty under folio lock. First, it unnecessarily
1294 	 * makes the holding time of folio lock longer. Second, it forces lock
1295 	 * ordering of folio lock and transaction start for journaling
1296 	 * filesystems.
1297 	 */
1298 	if (i_size_changed)
1299 		ret = ext4_mark_inode_dirty(handle, inode);
1300 
1301 	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1302 		/* if we have allocated more blocks and copied
1303 		 * less. We will have blocks allocated outside
1304 		 * inode->i_size. So truncate them
1305 		 */
1306 		ext4_orphan_add(handle, inode);
1307 
1308 	ret2 = ext4_journal_stop(handle);
1309 	if (!ret)
1310 		ret = ret2;
1311 
1312 	if (pos + len > inode->i_size && !verity) {
1313 		ext4_truncate_failed_write(inode);
1314 		/*
1315 		 * If truncate failed early the inode might still be
1316 		 * on the orphan list; we need to make sure the inode
1317 		 * is removed from the orphan list in that case.
1318 		 */
1319 		if (inode->i_nlink)
1320 			ext4_orphan_del(NULL, inode);
1321 	}
1322 
1323 	return ret ? ret : copied;
1324 }
1325 
1326 /*
1327  * This is a private version of folio_zero_new_buffers() which doesn't
1328  * set the buffer to be dirty, since in data=journalled mode we need
1329  * to call ext4_dirty_journalled_data() instead.
1330  */
1331 static void ext4_journalled_zero_new_buffers(handle_t *handle,
1332 					    struct inode *inode,
1333 					    struct folio *folio,
1334 					    unsigned from, unsigned to)
1335 {
1336 	unsigned int block_start = 0, block_end;
1337 	struct buffer_head *head, *bh;
1338 
1339 	bh = head = folio_buffers(folio);
1340 	do {
1341 		block_end = block_start + bh->b_size;
1342 		if (buffer_new(bh)) {
1343 			if (block_end > from && block_start < to) {
1344 				if (!folio_test_uptodate(folio)) {
1345 					unsigned start, size;
1346 
1347 					start = max(from, block_start);
1348 					size = min(to, block_end) - start;
1349 
1350 					folio_zero_range(folio, start, size);
1351 					write_end_fn(handle, inode, bh);
1352 				}
1353 				clear_buffer_new(bh);
1354 			}
1355 		}
1356 		block_start = block_end;
1357 		bh = bh->b_this_page;
1358 	} while (bh != head);
1359 }
1360 
1361 static int ext4_journalled_write_end(struct file *file,
1362 				     struct address_space *mapping,
1363 				     loff_t pos, unsigned len, unsigned copied,
1364 				     struct page *page, void *fsdata)
1365 {
1366 	struct folio *folio = page_folio(page);
1367 	handle_t *handle = ext4_journal_current_handle();
1368 	struct inode *inode = mapping->host;
1369 	loff_t old_size = inode->i_size;
1370 	int ret = 0, ret2;
1371 	int partial = 0;
1372 	unsigned from, to;
1373 	int size_changed = 0;
1374 	bool verity = ext4_verity_in_progress(inode);
1375 
1376 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1377 	from = pos & (PAGE_SIZE - 1);
1378 	to = from + len;
1379 
1380 	BUG_ON(!ext4_handle_valid(handle));
1381 
1382 	if (ext4_has_inline_data(inode))
1383 		return ext4_write_inline_data_end(inode, pos, len, copied,
1384 						  folio);
1385 
1386 	if (unlikely(copied < len) && !folio_test_uptodate(folio)) {
1387 		copied = 0;
1388 		ext4_journalled_zero_new_buffers(handle, inode, folio,
1389 						 from, to);
1390 	} else {
1391 		if (unlikely(copied < len))
1392 			ext4_journalled_zero_new_buffers(handle, inode, folio,
1393 							 from + copied, to);
1394 		ret = ext4_walk_page_buffers(handle, inode,
1395 					     folio_buffers(folio),
1396 					     from, from + copied, &partial,
1397 					     write_end_fn);
1398 		if (!partial)
1399 			folio_mark_uptodate(folio);
1400 	}
1401 	if (!verity)
1402 		size_changed = ext4_update_inode_size(inode, pos + copied);
1403 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1404 	folio_unlock(folio);
1405 	folio_put(folio);
1406 
1407 	if (old_size < pos && !verity)
1408 		pagecache_isize_extended(inode, old_size, pos);
1409 
1410 	if (size_changed) {
1411 		ret2 = ext4_mark_inode_dirty(handle, inode);
1412 		if (!ret)
1413 			ret = ret2;
1414 	}
1415 
1416 	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1417 		/* if we have allocated more blocks and copied
1418 		 * less. We will have blocks allocated outside
1419 		 * inode->i_size. So truncate them
1420 		 */
1421 		ext4_orphan_add(handle, inode);
1422 
1423 	ret2 = ext4_journal_stop(handle);
1424 	if (!ret)
1425 		ret = ret2;
1426 	if (pos + len > inode->i_size && !verity) {
1427 		ext4_truncate_failed_write(inode);
1428 		/*
1429 		 * If truncate failed early the inode might still be
1430 		 * on the orphan list; we need to make sure the inode
1431 		 * is removed from the orphan list in that case.
1432 		 */
1433 		if (inode->i_nlink)
1434 			ext4_orphan_del(NULL, inode);
1435 	}
1436 
1437 	return ret ? ret : copied;
1438 }
1439 
1440 /*
1441  * Reserve space for a single cluster
1442  */
1443 static int ext4_da_reserve_space(struct inode *inode)
1444 {
1445 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1446 	struct ext4_inode_info *ei = EXT4_I(inode);
1447 	int ret;
1448 
1449 	/*
1450 	 * We will charge metadata quota at writeout time; this saves
1451 	 * us from metadata over-estimation, though we may go over by
1452 	 * a small amount in the end.  Here we just reserve for data.
1453 	 */
1454 	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1455 	if (ret)
1456 		return ret;
1457 
1458 	spin_lock(&ei->i_block_reservation_lock);
1459 	if (ext4_claim_free_clusters(sbi, 1, 0)) {
1460 		spin_unlock(&ei->i_block_reservation_lock);
1461 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1462 		return -ENOSPC;
1463 	}
1464 	ei->i_reserved_data_blocks++;
1465 	trace_ext4_da_reserve_space(inode);
1466 	spin_unlock(&ei->i_block_reservation_lock);
1467 
1468 	return 0;       /* success */
1469 }
1470 
1471 void ext4_da_release_space(struct inode *inode, int to_free)
1472 {
1473 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1474 	struct ext4_inode_info *ei = EXT4_I(inode);
1475 
1476 	if (!to_free)
1477 		return;		/* Nothing to release, exit */
1478 
1479 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1480 
1481 	trace_ext4_da_release_space(inode, to_free);
1482 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1483 		/*
1484 		 * if there aren't enough reserved blocks, then the
1485 		 * counter is messed up somewhere.  Since this
1486 		 * function is called from invalidate page, it's
1487 		 * harmless to return without any action.
1488 		 */
1489 		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1490 			 "ino %lu, to_free %d with only %d reserved "
1491 			 "data blocks", inode->i_ino, to_free,
1492 			 ei->i_reserved_data_blocks);
1493 		WARN_ON(1);
1494 		to_free = ei->i_reserved_data_blocks;
1495 	}
1496 	ei->i_reserved_data_blocks -= to_free;
1497 
1498 	/* update fs dirty data blocks counter */
1499 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1500 
1501 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1502 
1503 	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1504 }
1505 
1506 /*
1507  * Delayed allocation stuff
1508  */
1509 
1510 struct mpage_da_data {
1511 	/* These are input fields for ext4_do_writepages() */
1512 	struct inode *inode;
1513 	struct writeback_control *wbc;
1514 	unsigned int can_map:1;	/* Can writepages call map blocks? */
1515 
1516 	/* These are internal state of ext4_do_writepages() */
1517 	pgoff_t first_page;	/* The first page to write */
1518 	pgoff_t next_page;	/* Current page to examine */
1519 	pgoff_t last_page;	/* Last page to examine */
1520 	/*
1521 	 * Extent to map - this can be after first_page because that can be
1522 	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1523 	 * is delalloc or unwritten.
1524 	 */
1525 	struct ext4_map_blocks map;
1526 	struct ext4_io_submit io_submit;	/* IO submission data */
1527 	unsigned int do_map:1;
1528 	unsigned int scanned_until_end:1;
1529 	unsigned int journalled_more_data:1;
1530 };
1531 
1532 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1533 				       bool invalidate)
1534 {
1535 	unsigned nr, i;
1536 	pgoff_t index, end;
1537 	struct folio_batch fbatch;
1538 	struct inode *inode = mpd->inode;
1539 	struct address_space *mapping = inode->i_mapping;
1540 
1541 	/* This is necessary when next_page == 0. */
1542 	if (mpd->first_page >= mpd->next_page)
1543 		return;
1544 
1545 	mpd->scanned_until_end = 0;
1546 	index = mpd->first_page;
1547 	end   = mpd->next_page - 1;
1548 	if (invalidate) {
1549 		ext4_lblk_t start, last;
1550 		start = index << (PAGE_SHIFT - inode->i_blkbits);
1551 		last = end << (PAGE_SHIFT - inode->i_blkbits);
1552 
1553 		/*
1554 		 * avoid racing with extent status tree scans made by
1555 		 * ext4_insert_delayed_block()
1556 		 */
1557 		down_write(&EXT4_I(inode)->i_data_sem);
1558 		ext4_es_remove_extent(inode, start, last - start + 1);
1559 		up_write(&EXT4_I(inode)->i_data_sem);
1560 	}
1561 
1562 	folio_batch_init(&fbatch);
1563 	while (index <= end) {
1564 		nr = filemap_get_folios(mapping, &index, end, &fbatch);
1565 		if (nr == 0)
1566 			break;
1567 		for (i = 0; i < nr; i++) {
1568 			struct folio *folio = fbatch.folios[i];
1569 
1570 			if (folio->index < mpd->first_page)
1571 				continue;
1572 			if (folio_next_index(folio) - 1 > end)
1573 				continue;
1574 			BUG_ON(!folio_test_locked(folio));
1575 			BUG_ON(folio_test_writeback(folio));
1576 			if (invalidate) {
1577 				if (folio_mapped(folio))
1578 					folio_clear_dirty_for_io(folio);
1579 				block_invalidate_folio(folio, 0,
1580 						folio_size(folio));
1581 				folio_clear_uptodate(folio);
1582 			}
1583 			folio_unlock(folio);
1584 		}
1585 		folio_batch_release(&fbatch);
1586 	}
1587 }
1588 
1589 static void ext4_print_free_blocks(struct inode *inode)
1590 {
1591 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1592 	struct super_block *sb = inode->i_sb;
1593 	struct ext4_inode_info *ei = EXT4_I(inode);
1594 
1595 	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1596 	       EXT4_C2B(EXT4_SB(inode->i_sb),
1597 			ext4_count_free_clusters(sb)));
1598 	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1599 	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1600 	       (long long) EXT4_C2B(EXT4_SB(sb),
1601 		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1602 	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1603 	       (long long) EXT4_C2B(EXT4_SB(sb),
1604 		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1605 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1606 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1607 		 ei->i_reserved_data_blocks);
1608 	return;
1609 }
1610 
1611 /*
1612  * ext4_insert_delayed_block - adds a delayed block to the extents status
1613  *                             tree, incrementing the reserved cluster/block
1614  *                             count or making a pending reservation
1615  *                             where needed
1616  *
1617  * @inode - file containing the newly added block
1618  * @lblk - logical block to be added
1619  *
1620  * Returns 0 on success, negative error code on failure.
1621  */
1622 static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1623 {
1624 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1625 	int ret;
1626 	bool allocated = false;
1627 
1628 	/*
1629 	 * If the cluster containing lblk is shared with a delayed,
1630 	 * written, or unwritten extent in a bigalloc file system, it's
1631 	 * already been accounted for and does not need to be reserved.
1632 	 * A pending reservation must be made for the cluster if it's
1633 	 * shared with a written or unwritten extent and doesn't already
1634 	 * have one.  Written and unwritten extents can be purged from the
1635 	 * extents status tree if the system is under memory pressure, so
1636 	 * it's necessary to examine the extent tree if a search of the
1637 	 * extents status tree doesn't get a match.
1638 	 */
1639 	if (sbi->s_cluster_ratio == 1) {
1640 		ret = ext4_da_reserve_space(inode);
1641 		if (ret != 0)   /* ENOSPC */
1642 			return ret;
1643 	} else {   /* bigalloc */
1644 		if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1645 			if (!ext4_es_scan_clu(inode,
1646 					      &ext4_es_is_mapped, lblk)) {
1647 				ret = ext4_clu_mapped(inode,
1648 						      EXT4_B2C(sbi, lblk));
1649 				if (ret < 0)
1650 					return ret;
1651 				if (ret == 0) {
1652 					ret = ext4_da_reserve_space(inode);
1653 					if (ret != 0)   /* ENOSPC */
1654 						return ret;
1655 				} else {
1656 					allocated = true;
1657 				}
1658 			} else {
1659 				allocated = true;
1660 			}
1661 		}
1662 	}
1663 
1664 	ext4_es_insert_delayed_block(inode, lblk, allocated);
1665 	return 0;
1666 }
1667 
1668 /*
1669  * This function is grabs code from the very beginning of
1670  * ext4_map_blocks, but assumes that the caller is from delayed write
1671  * time. This function looks up the requested blocks and sets the
1672  * buffer delay bit under the protection of i_data_sem.
1673  */
1674 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1675 			      struct ext4_map_blocks *map,
1676 			      struct buffer_head *bh)
1677 {
1678 	struct extent_status es;
1679 	int retval;
1680 	sector_t invalid_block = ~((sector_t) 0xffff);
1681 #ifdef ES_AGGRESSIVE_TEST
1682 	struct ext4_map_blocks orig_map;
1683 
1684 	memcpy(&orig_map, map, sizeof(*map));
1685 #endif
1686 
1687 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1688 		invalid_block = ~0;
1689 
1690 	map->m_flags = 0;
1691 	ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1692 		  (unsigned long) map->m_lblk);
1693 
1694 	/* Lookup extent status tree firstly */
1695 	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1696 		if (ext4_es_is_hole(&es)) {
1697 			retval = 0;
1698 			down_read(&EXT4_I(inode)->i_data_sem);
1699 			goto add_delayed;
1700 		}
1701 
1702 		/*
1703 		 * Delayed extent could be allocated by fallocate.
1704 		 * So we need to check it.
1705 		 */
1706 		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1707 			map_bh(bh, inode->i_sb, invalid_block);
1708 			set_buffer_new(bh);
1709 			set_buffer_delay(bh);
1710 			return 0;
1711 		}
1712 
1713 		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1714 		retval = es.es_len - (iblock - es.es_lblk);
1715 		if (retval > map->m_len)
1716 			retval = map->m_len;
1717 		map->m_len = retval;
1718 		if (ext4_es_is_written(&es))
1719 			map->m_flags |= EXT4_MAP_MAPPED;
1720 		else if (ext4_es_is_unwritten(&es))
1721 			map->m_flags |= EXT4_MAP_UNWRITTEN;
1722 		else
1723 			BUG();
1724 
1725 #ifdef ES_AGGRESSIVE_TEST
1726 		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1727 #endif
1728 		return retval;
1729 	}
1730 
1731 	/*
1732 	 * Try to see if we can get the block without requesting a new
1733 	 * file system block.
1734 	 */
1735 	down_read(&EXT4_I(inode)->i_data_sem);
1736 	if (ext4_has_inline_data(inode))
1737 		retval = 0;
1738 	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1739 		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1740 	else
1741 		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1742 
1743 add_delayed:
1744 	if (retval == 0) {
1745 		int ret;
1746 
1747 		/*
1748 		 * XXX: __block_prepare_write() unmaps passed block,
1749 		 * is it OK?
1750 		 */
1751 
1752 		ret = ext4_insert_delayed_block(inode, map->m_lblk);
1753 		if (ret != 0) {
1754 			retval = ret;
1755 			goto out_unlock;
1756 		}
1757 
1758 		map_bh(bh, inode->i_sb, invalid_block);
1759 		set_buffer_new(bh);
1760 		set_buffer_delay(bh);
1761 	} else if (retval > 0) {
1762 		unsigned int status;
1763 
1764 		if (unlikely(retval != map->m_len)) {
1765 			ext4_warning(inode->i_sb,
1766 				     "ES len assertion failed for inode "
1767 				     "%lu: retval %d != map->m_len %d",
1768 				     inode->i_ino, retval, map->m_len);
1769 			WARN_ON(1);
1770 		}
1771 
1772 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1773 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1774 		ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1775 				      map->m_pblk, status);
1776 	}
1777 
1778 out_unlock:
1779 	up_read((&EXT4_I(inode)->i_data_sem));
1780 
1781 	return retval;
1782 }
1783 
1784 /*
1785  * This is a special get_block_t callback which is used by
1786  * ext4_da_write_begin().  It will either return mapped block or
1787  * reserve space for a single block.
1788  *
1789  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1790  * We also have b_blocknr = -1 and b_bdev initialized properly
1791  *
1792  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1793  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1794  * initialized properly.
1795  */
1796 int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1797 			   struct buffer_head *bh, int create)
1798 {
1799 	struct ext4_map_blocks map;
1800 	int ret = 0;
1801 
1802 	BUG_ON(create == 0);
1803 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1804 
1805 	map.m_lblk = iblock;
1806 	map.m_len = 1;
1807 
1808 	/*
1809 	 * first, we need to know whether the block is allocated already
1810 	 * preallocated blocks are unmapped but should treated
1811 	 * the same as allocated blocks.
1812 	 */
1813 	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1814 	if (ret <= 0)
1815 		return ret;
1816 
1817 	map_bh(bh, inode->i_sb, map.m_pblk);
1818 	ext4_update_bh_state(bh, map.m_flags);
1819 
1820 	if (buffer_unwritten(bh)) {
1821 		/* A delayed write to unwritten bh should be marked
1822 		 * new and mapped.  Mapped ensures that we don't do
1823 		 * get_block multiple times when we write to the same
1824 		 * offset and new ensures that we do proper zero out
1825 		 * for partial write.
1826 		 */
1827 		set_buffer_new(bh);
1828 		set_buffer_mapped(bh);
1829 	}
1830 	return 0;
1831 }
1832 
1833 static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio)
1834 {
1835 	mpd->first_page += folio_nr_pages(folio);
1836 	folio_unlock(folio);
1837 }
1838 
1839 static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
1840 {
1841 	size_t len;
1842 	loff_t size;
1843 	int err;
1844 
1845 	BUG_ON(folio->index != mpd->first_page);
1846 	folio_clear_dirty_for_io(folio);
1847 	/*
1848 	 * We have to be very careful here!  Nothing protects writeback path
1849 	 * against i_size changes and the page can be writeably mapped into
1850 	 * page tables. So an application can be growing i_size and writing
1851 	 * data through mmap while writeback runs. folio_clear_dirty_for_io()
1852 	 * write-protects our page in page tables and the page cannot get
1853 	 * written to again until we release folio lock. So only after
1854 	 * folio_clear_dirty_for_io() we are safe to sample i_size for
1855 	 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
1856 	 * on the barrier provided by folio_test_clear_dirty() in
1857 	 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
1858 	 * after page tables are updated.
1859 	 */
1860 	size = i_size_read(mpd->inode);
1861 	len = folio_size(folio);
1862 	if (folio_pos(folio) + len > size &&
1863 	    !ext4_verity_in_progress(mpd->inode))
1864 		len = size & ~PAGE_MASK;
1865 	err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
1866 	if (!err)
1867 		mpd->wbc->nr_to_write--;
1868 
1869 	return err;
1870 }
1871 
1872 #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
1873 
1874 /*
1875  * mballoc gives us at most this number of blocks...
1876  * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1877  * The rest of mballoc seems to handle chunks up to full group size.
1878  */
1879 #define MAX_WRITEPAGES_EXTENT_LEN 2048
1880 
1881 /*
1882  * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1883  *
1884  * @mpd - extent of blocks
1885  * @lblk - logical number of the block in the file
1886  * @bh - buffer head we want to add to the extent
1887  *
1888  * The function is used to collect contig. blocks in the same state. If the
1889  * buffer doesn't require mapping for writeback and we haven't started the
1890  * extent of buffers to map yet, the function returns 'true' immediately - the
1891  * caller can write the buffer right away. Otherwise the function returns true
1892  * if the block has been added to the extent, false if the block couldn't be
1893  * added.
1894  */
1895 static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1896 				   struct buffer_head *bh)
1897 {
1898 	struct ext4_map_blocks *map = &mpd->map;
1899 
1900 	/* Buffer that doesn't need mapping for writeback? */
1901 	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1902 	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1903 		/* So far no extent to map => we write the buffer right away */
1904 		if (map->m_len == 0)
1905 			return true;
1906 		return false;
1907 	}
1908 
1909 	/* First block in the extent? */
1910 	if (map->m_len == 0) {
1911 		/* We cannot map unless handle is started... */
1912 		if (!mpd->do_map)
1913 			return false;
1914 		map->m_lblk = lblk;
1915 		map->m_len = 1;
1916 		map->m_flags = bh->b_state & BH_FLAGS;
1917 		return true;
1918 	}
1919 
1920 	/* Don't go larger than mballoc is willing to allocate */
1921 	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1922 		return false;
1923 
1924 	/* Can we merge the block to our big extent? */
1925 	if (lblk == map->m_lblk + map->m_len &&
1926 	    (bh->b_state & BH_FLAGS) == map->m_flags) {
1927 		map->m_len++;
1928 		return true;
1929 	}
1930 	return false;
1931 }
1932 
1933 /*
1934  * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1935  *
1936  * @mpd - extent of blocks for mapping
1937  * @head - the first buffer in the page
1938  * @bh - buffer we should start processing from
1939  * @lblk - logical number of the block in the file corresponding to @bh
1940  *
1941  * Walk through page buffers from @bh upto @head (exclusive) and either submit
1942  * the page for IO if all buffers in this page were mapped and there's no
1943  * accumulated extent of buffers to map or add buffers in the page to the
1944  * extent of buffers to map. The function returns 1 if the caller can continue
1945  * by processing the next page, 0 if it should stop adding buffers to the
1946  * extent to map because we cannot extend it anymore. It can also return value
1947  * < 0 in case of error during IO submission.
1948  */
1949 static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1950 				   struct buffer_head *head,
1951 				   struct buffer_head *bh,
1952 				   ext4_lblk_t lblk)
1953 {
1954 	struct inode *inode = mpd->inode;
1955 	int err;
1956 	ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
1957 							>> inode->i_blkbits;
1958 
1959 	if (ext4_verity_in_progress(inode))
1960 		blocks = EXT_MAX_BLOCKS;
1961 
1962 	do {
1963 		BUG_ON(buffer_locked(bh));
1964 
1965 		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1966 			/* Found extent to map? */
1967 			if (mpd->map.m_len)
1968 				return 0;
1969 			/* Buffer needs mapping and handle is not started? */
1970 			if (!mpd->do_map)
1971 				return 0;
1972 			/* Everything mapped so far and we hit EOF */
1973 			break;
1974 		}
1975 	} while (lblk++, (bh = bh->b_this_page) != head);
1976 	/* So far everything mapped? Submit the page for IO. */
1977 	if (mpd->map.m_len == 0) {
1978 		err = mpage_submit_folio(mpd, head->b_folio);
1979 		if (err < 0)
1980 			return err;
1981 		mpage_folio_done(mpd, head->b_folio);
1982 	}
1983 	if (lblk >= blocks) {
1984 		mpd->scanned_until_end = 1;
1985 		return 0;
1986 	}
1987 	return 1;
1988 }
1989 
1990 /*
1991  * mpage_process_folio - update folio buffers corresponding to changed extent
1992  *			 and may submit fully mapped page for IO
1993  * @mpd: description of extent to map, on return next extent to map
1994  * @folio: Contains these buffers.
1995  * @m_lblk: logical block mapping.
1996  * @m_pblk: corresponding physical mapping.
1997  * @map_bh: determines on return whether this page requires any further
1998  *		  mapping or not.
1999  *
2000  * Scan given folio buffers corresponding to changed extent and update buffer
2001  * state according to new extent state.
2002  * We map delalloc buffers to their physical location, clear unwritten bits.
2003  * If the given folio is not fully mapped, we update @mpd to the next extent in
2004  * the given folio that needs mapping & return @map_bh as true.
2005  */
2006 static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio,
2007 			      ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2008 			      bool *map_bh)
2009 {
2010 	struct buffer_head *head, *bh;
2011 	ext4_io_end_t *io_end = mpd->io_submit.io_end;
2012 	ext4_lblk_t lblk = *m_lblk;
2013 	ext4_fsblk_t pblock = *m_pblk;
2014 	int err = 0;
2015 	int blkbits = mpd->inode->i_blkbits;
2016 	ssize_t io_end_size = 0;
2017 	struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2018 
2019 	bh = head = folio_buffers(folio);
2020 	do {
2021 		if (lblk < mpd->map.m_lblk)
2022 			continue;
2023 		if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2024 			/*
2025 			 * Buffer after end of mapped extent.
2026 			 * Find next buffer in the folio to map.
2027 			 */
2028 			mpd->map.m_len = 0;
2029 			mpd->map.m_flags = 0;
2030 			io_end_vec->size += io_end_size;
2031 
2032 			err = mpage_process_page_bufs(mpd, head, bh, lblk);
2033 			if (err > 0)
2034 				err = 0;
2035 			if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2036 				io_end_vec = ext4_alloc_io_end_vec(io_end);
2037 				if (IS_ERR(io_end_vec)) {
2038 					err = PTR_ERR(io_end_vec);
2039 					goto out;
2040 				}
2041 				io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2042 			}
2043 			*map_bh = true;
2044 			goto out;
2045 		}
2046 		if (buffer_delay(bh)) {
2047 			clear_buffer_delay(bh);
2048 			bh->b_blocknr = pblock++;
2049 		}
2050 		clear_buffer_unwritten(bh);
2051 		io_end_size += (1 << blkbits);
2052 	} while (lblk++, (bh = bh->b_this_page) != head);
2053 
2054 	io_end_vec->size += io_end_size;
2055 	*map_bh = false;
2056 out:
2057 	*m_lblk = lblk;
2058 	*m_pblk = pblock;
2059 	return err;
2060 }
2061 
2062 /*
2063  * mpage_map_buffers - update buffers corresponding to changed extent and
2064  *		       submit fully mapped pages for IO
2065  *
2066  * @mpd - description of extent to map, on return next extent to map
2067  *
2068  * Scan buffers corresponding to changed extent (we expect corresponding pages
2069  * to be already locked) and update buffer state according to new extent state.
2070  * We map delalloc buffers to their physical location, clear unwritten bits,
2071  * and mark buffers as uninit when we perform writes to unwritten extents
2072  * and do extent conversion after IO is finished. If the last page is not fully
2073  * mapped, we update @map to the next extent in the last page that needs
2074  * mapping. Otherwise we submit the page for IO.
2075  */
2076 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2077 {
2078 	struct folio_batch fbatch;
2079 	unsigned nr, i;
2080 	struct inode *inode = mpd->inode;
2081 	int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2082 	pgoff_t start, end;
2083 	ext4_lblk_t lblk;
2084 	ext4_fsblk_t pblock;
2085 	int err;
2086 	bool map_bh = false;
2087 
2088 	start = mpd->map.m_lblk >> bpp_bits;
2089 	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2090 	lblk = start << bpp_bits;
2091 	pblock = mpd->map.m_pblk;
2092 
2093 	folio_batch_init(&fbatch);
2094 	while (start <= end) {
2095 		nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
2096 		if (nr == 0)
2097 			break;
2098 		for (i = 0; i < nr; i++) {
2099 			struct folio *folio = fbatch.folios[i];
2100 
2101 			err = mpage_process_folio(mpd, folio, &lblk, &pblock,
2102 						 &map_bh);
2103 			/*
2104 			 * If map_bh is true, means page may require further bh
2105 			 * mapping, or maybe the page was submitted for IO.
2106 			 * So we return to call further extent mapping.
2107 			 */
2108 			if (err < 0 || map_bh)
2109 				goto out;
2110 			/* Page fully mapped - let IO run! */
2111 			err = mpage_submit_folio(mpd, folio);
2112 			if (err < 0)
2113 				goto out;
2114 			mpage_folio_done(mpd, folio);
2115 		}
2116 		folio_batch_release(&fbatch);
2117 	}
2118 	/* Extent fully mapped and matches with page boundary. We are done. */
2119 	mpd->map.m_len = 0;
2120 	mpd->map.m_flags = 0;
2121 	return 0;
2122 out:
2123 	folio_batch_release(&fbatch);
2124 	return err;
2125 }
2126 
2127 static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2128 {
2129 	struct inode *inode = mpd->inode;
2130 	struct ext4_map_blocks *map = &mpd->map;
2131 	int get_blocks_flags;
2132 	int err, dioread_nolock;
2133 
2134 	trace_ext4_da_write_pages_extent(inode, map);
2135 	/*
2136 	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2137 	 * to convert an unwritten extent to be initialized (in the case
2138 	 * where we have written into one or more preallocated blocks).  It is
2139 	 * possible that we're going to need more metadata blocks than
2140 	 * previously reserved. However we must not fail because we're in
2141 	 * writeback and there is nothing we can do about it so it might result
2142 	 * in data loss.  So use reserved blocks to allocate metadata if
2143 	 * possible.
2144 	 *
2145 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2146 	 * the blocks in question are delalloc blocks.  This indicates
2147 	 * that the blocks and quotas has already been checked when
2148 	 * the data was copied into the page cache.
2149 	 */
2150 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2151 			   EXT4_GET_BLOCKS_METADATA_NOFAIL |
2152 			   EXT4_GET_BLOCKS_IO_SUBMIT;
2153 	dioread_nolock = ext4_should_dioread_nolock(inode);
2154 	if (dioread_nolock)
2155 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2156 	if (map->m_flags & BIT(BH_Delay))
2157 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2158 
2159 	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2160 	if (err < 0)
2161 		return err;
2162 	if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2163 		if (!mpd->io_submit.io_end->handle &&
2164 		    ext4_handle_valid(handle)) {
2165 			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2166 			handle->h_rsv_handle = NULL;
2167 		}
2168 		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2169 	}
2170 
2171 	BUG_ON(map->m_len == 0);
2172 	return 0;
2173 }
2174 
2175 /*
2176  * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2177  *				 mpd->len and submit pages underlying it for IO
2178  *
2179  * @handle - handle for journal operations
2180  * @mpd - extent to map
2181  * @give_up_on_write - we set this to true iff there is a fatal error and there
2182  *                     is no hope of writing the data. The caller should discard
2183  *                     dirty pages to avoid infinite loops.
2184  *
2185  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2186  * delayed, blocks are allocated, if it is unwritten, we may need to convert
2187  * them to initialized or split the described range from larger unwritten
2188  * extent. Note that we need not map all the described range since allocation
2189  * can return less blocks or the range is covered by more unwritten extents. We
2190  * cannot map more because we are limited by reserved transaction credits. On
2191  * the other hand we always make sure that the last touched page is fully
2192  * mapped so that it can be written out (and thus forward progress is
2193  * guaranteed). After mapping we submit all mapped pages for IO.
2194  */
2195 static int mpage_map_and_submit_extent(handle_t *handle,
2196 				       struct mpage_da_data *mpd,
2197 				       bool *give_up_on_write)
2198 {
2199 	struct inode *inode = mpd->inode;
2200 	struct ext4_map_blocks *map = &mpd->map;
2201 	int err;
2202 	loff_t disksize;
2203 	int progress = 0;
2204 	ext4_io_end_t *io_end = mpd->io_submit.io_end;
2205 	struct ext4_io_end_vec *io_end_vec;
2206 
2207 	io_end_vec = ext4_alloc_io_end_vec(io_end);
2208 	if (IS_ERR(io_end_vec))
2209 		return PTR_ERR(io_end_vec);
2210 	io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2211 	do {
2212 		err = mpage_map_one_extent(handle, mpd);
2213 		if (err < 0) {
2214 			struct super_block *sb = inode->i_sb;
2215 
2216 			if (ext4_forced_shutdown(sb))
2217 				goto invalidate_dirty_pages;
2218 			/*
2219 			 * Let the uper layers retry transient errors.
2220 			 * In the case of ENOSPC, if ext4_count_free_blocks()
2221 			 * is non-zero, a commit should free up blocks.
2222 			 */
2223 			if ((err == -ENOMEM) ||
2224 			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2225 				if (progress)
2226 					goto update_disksize;
2227 				return err;
2228 			}
2229 			ext4_msg(sb, KERN_CRIT,
2230 				 "Delayed block allocation failed for "
2231 				 "inode %lu at logical offset %llu with"
2232 				 " max blocks %u with error %d",
2233 				 inode->i_ino,
2234 				 (unsigned long long)map->m_lblk,
2235 				 (unsigned)map->m_len, -err);
2236 			ext4_msg(sb, KERN_CRIT,
2237 				 "This should not happen!! Data will "
2238 				 "be lost\n");
2239 			if (err == -ENOSPC)
2240 				ext4_print_free_blocks(inode);
2241 		invalidate_dirty_pages:
2242 			*give_up_on_write = true;
2243 			return err;
2244 		}
2245 		progress = 1;
2246 		/*
2247 		 * Update buffer state, submit mapped pages, and get us new
2248 		 * extent to map
2249 		 */
2250 		err = mpage_map_and_submit_buffers(mpd);
2251 		if (err < 0)
2252 			goto update_disksize;
2253 	} while (map->m_len);
2254 
2255 update_disksize:
2256 	/*
2257 	 * Update on-disk size after IO is submitted.  Races with
2258 	 * truncate are avoided by checking i_size under i_data_sem.
2259 	 */
2260 	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2261 	if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2262 		int err2;
2263 		loff_t i_size;
2264 
2265 		down_write(&EXT4_I(inode)->i_data_sem);
2266 		i_size = i_size_read(inode);
2267 		if (disksize > i_size)
2268 			disksize = i_size;
2269 		if (disksize > EXT4_I(inode)->i_disksize)
2270 			EXT4_I(inode)->i_disksize = disksize;
2271 		up_write(&EXT4_I(inode)->i_data_sem);
2272 		err2 = ext4_mark_inode_dirty(handle, inode);
2273 		if (err2) {
2274 			ext4_error_err(inode->i_sb, -err2,
2275 				       "Failed to mark inode %lu dirty",
2276 				       inode->i_ino);
2277 		}
2278 		if (!err)
2279 			err = err2;
2280 	}
2281 	return err;
2282 }
2283 
2284 /*
2285  * Calculate the total number of credits to reserve for one writepages
2286  * iteration. This is called from ext4_writepages(). We map an extent of
2287  * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2288  * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2289  * bpp - 1 blocks in bpp different extents.
2290  */
2291 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2292 {
2293 	int bpp = ext4_journal_blocks_per_page(inode);
2294 
2295 	return ext4_meta_trans_blocks(inode,
2296 				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2297 }
2298 
2299 static int ext4_journal_folio_buffers(handle_t *handle, struct folio *folio,
2300 				     size_t len)
2301 {
2302 	struct buffer_head *page_bufs = folio_buffers(folio);
2303 	struct inode *inode = folio->mapping->host;
2304 	int ret, err;
2305 
2306 	ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2307 				     NULL, do_journal_get_write_access);
2308 	err = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
2309 				     NULL, write_end_fn);
2310 	if (ret == 0)
2311 		ret = err;
2312 	err = ext4_jbd2_inode_add_write(handle, inode, folio_pos(folio), len);
2313 	if (ret == 0)
2314 		ret = err;
2315 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
2316 
2317 	return ret;
2318 }
2319 
2320 static int mpage_journal_page_buffers(handle_t *handle,
2321 				      struct mpage_da_data *mpd,
2322 				      struct folio *folio)
2323 {
2324 	struct inode *inode = mpd->inode;
2325 	loff_t size = i_size_read(inode);
2326 	size_t len = folio_size(folio);
2327 
2328 	folio_clear_checked(folio);
2329 	mpd->wbc->nr_to_write--;
2330 
2331 	if (folio_pos(folio) + len > size &&
2332 	    !ext4_verity_in_progress(inode))
2333 		len = size - folio_pos(folio);
2334 
2335 	return ext4_journal_folio_buffers(handle, folio, len);
2336 }
2337 
2338 /*
2339  * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2340  * 				 needing mapping, submit mapped pages
2341  *
2342  * @mpd - where to look for pages
2343  *
2344  * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2345  * IO immediately. If we cannot map blocks, we submit just already mapped
2346  * buffers in the page for IO and keep page dirty. When we can map blocks and
2347  * we find a page which isn't mapped we start accumulating extent of buffers
2348  * underlying these pages that needs mapping (formed by either delayed or
2349  * unwritten buffers). We also lock the pages containing these buffers. The
2350  * extent found is returned in @mpd structure (starting at mpd->lblk with
2351  * length mpd->len blocks).
2352  *
2353  * Note that this function can attach bios to one io_end structure which are
2354  * neither logically nor physically contiguous. Although it may seem as an
2355  * unnecessary complication, it is actually inevitable in blocksize < pagesize
2356  * case as we need to track IO to all buffers underlying a page in one io_end.
2357  */
2358 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2359 {
2360 	struct address_space *mapping = mpd->inode->i_mapping;
2361 	struct folio_batch fbatch;
2362 	unsigned int nr_folios;
2363 	pgoff_t index = mpd->first_page;
2364 	pgoff_t end = mpd->last_page;
2365 	xa_mark_t tag;
2366 	int i, err = 0;
2367 	int blkbits = mpd->inode->i_blkbits;
2368 	ext4_lblk_t lblk;
2369 	struct buffer_head *head;
2370 	handle_t *handle = NULL;
2371 	int bpp = ext4_journal_blocks_per_page(mpd->inode);
2372 
2373 	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2374 		tag = PAGECACHE_TAG_TOWRITE;
2375 	else
2376 		tag = PAGECACHE_TAG_DIRTY;
2377 
2378 	mpd->map.m_len = 0;
2379 	mpd->next_page = index;
2380 	if (ext4_should_journal_data(mpd->inode)) {
2381 		handle = ext4_journal_start(mpd->inode, EXT4_HT_WRITE_PAGE,
2382 					    bpp);
2383 		if (IS_ERR(handle))
2384 			return PTR_ERR(handle);
2385 	}
2386 	folio_batch_init(&fbatch);
2387 	while (index <= end) {
2388 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
2389 				tag, &fbatch);
2390 		if (nr_folios == 0)
2391 			break;
2392 
2393 		for (i = 0; i < nr_folios; i++) {
2394 			struct folio *folio = fbatch.folios[i];
2395 
2396 			/*
2397 			 * Accumulated enough dirty pages? This doesn't apply
2398 			 * to WB_SYNC_ALL mode. For integrity sync we have to
2399 			 * keep going because someone may be concurrently
2400 			 * dirtying pages, and we might have synced a lot of
2401 			 * newly appeared dirty pages, but have not synced all
2402 			 * of the old dirty pages.
2403 			 */
2404 			if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
2405 			    mpd->wbc->nr_to_write <=
2406 			    mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2407 				goto out;
2408 
2409 			/* If we can't merge this page, we are done. */
2410 			if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2411 				goto out;
2412 
2413 			if (handle) {
2414 				err = ext4_journal_ensure_credits(handle, bpp,
2415 								  0);
2416 				if (err < 0)
2417 					goto out;
2418 			}
2419 
2420 			folio_lock(folio);
2421 			/*
2422 			 * If the page is no longer dirty, or its mapping no
2423 			 * longer corresponds to inode we are writing (which
2424 			 * means it has been truncated or invalidated), or the
2425 			 * page is already under writeback and we are not doing
2426 			 * a data integrity writeback, skip the page
2427 			 */
2428 			if (!folio_test_dirty(folio) ||
2429 			    (folio_test_writeback(folio) &&
2430 			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2431 			    unlikely(folio->mapping != mapping)) {
2432 				folio_unlock(folio);
2433 				continue;
2434 			}
2435 
2436 			folio_wait_writeback(folio);
2437 			BUG_ON(folio_test_writeback(folio));
2438 
2439 			/*
2440 			 * Should never happen but for buggy code in
2441 			 * other subsystems that call
2442 			 * set_page_dirty() without properly warning
2443 			 * the file system first.  See [1] for more
2444 			 * information.
2445 			 *
2446 			 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2447 			 */
2448 			if (!folio_buffers(folio)) {
2449 				ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
2450 				folio_clear_dirty(folio);
2451 				folio_unlock(folio);
2452 				continue;
2453 			}
2454 
2455 			if (mpd->map.m_len == 0)
2456 				mpd->first_page = folio->index;
2457 			mpd->next_page = folio_next_index(folio);
2458 			/*
2459 			 * Writeout when we cannot modify metadata is simple.
2460 			 * Just submit the page. For data=journal mode we
2461 			 * first handle writeout of the page for checkpoint and
2462 			 * only after that handle delayed page dirtying. This
2463 			 * makes sure current data is checkpointed to the final
2464 			 * location before possibly journalling it again which
2465 			 * is desirable when the page is frequently dirtied
2466 			 * through a pin.
2467 			 */
2468 			if (!mpd->can_map) {
2469 				err = mpage_submit_folio(mpd, folio);
2470 				if (err < 0)
2471 					goto out;
2472 				/* Pending dirtying of journalled data? */
2473 				if (folio_test_checked(folio)) {
2474 					err = mpage_journal_page_buffers(handle,
2475 						mpd, folio);
2476 					if (err < 0)
2477 						goto out;
2478 					mpd->journalled_more_data = 1;
2479 				}
2480 				mpage_folio_done(mpd, folio);
2481 			} else {
2482 				/* Add all dirty buffers to mpd */
2483 				lblk = ((ext4_lblk_t)folio->index) <<
2484 					(PAGE_SHIFT - blkbits);
2485 				head = folio_buffers(folio);
2486 				err = mpage_process_page_bufs(mpd, head, head,
2487 						lblk);
2488 				if (err <= 0)
2489 					goto out;
2490 				err = 0;
2491 			}
2492 		}
2493 		folio_batch_release(&fbatch);
2494 		cond_resched();
2495 	}
2496 	mpd->scanned_until_end = 1;
2497 	if (handle)
2498 		ext4_journal_stop(handle);
2499 	return 0;
2500 out:
2501 	folio_batch_release(&fbatch);
2502 	if (handle)
2503 		ext4_journal_stop(handle);
2504 	return err;
2505 }
2506 
2507 static int ext4_do_writepages(struct mpage_da_data *mpd)
2508 {
2509 	struct writeback_control *wbc = mpd->wbc;
2510 	pgoff_t	writeback_index = 0;
2511 	long nr_to_write = wbc->nr_to_write;
2512 	int range_whole = 0;
2513 	int cycled = 1;
2514 	handle_t *handle = NULL;
2515 	struct inode *inode = mpd->inode;
2516 	struct address_space *mapping = inode->i_mapping;
2517 	int needed_blocks, rsv_blocks = 0, ret = 0;
2518 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2519 	struct blk_plug plug;
2520 	bool give_up_on_write = false;
2521 
2522 	trace_ext4_writepages(inode, wbc);
2523 
2524 	/*
2525 	 * No pages to write? This is mainly a kludge to avoid starting
2526 	 * a transaction for special inodes like journal inode on last iput()
2527 	 * because that could violate lock ordering on umount
2528 	 */
2529 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2530 		goto out_writepages;
2531 
2532 	/*
2533 	 * If the filesystem has aborted, it is read-only, so return
2534 	 * right away instead of dumping stack traces later on that
2535 	 * will obscure the real source of the problem.  We test
2536 	 * fs shutdown state instead of sb->s_flag's SB_RDONLY because
2537 	 * the latter could be true if the filesystem is mounted
2538 	 * read-only, and in that case, ext4_writepages should
2539 	 * *never* be called, so if that ever happens, we would want
2540 	 * the stack trace.
2541 	 */
2542 	if (unlikely(ext4_forced_shutdown(mapping->host->i_sb))) {
2543 		ret = -EROFS;
2544 		goto out_writepages;
2545 	}
2546 
2547 	/*
2548 	 * If we have inline data and arrive here, it means that
2549 	 * we will soon create the block for the 1st page, so
2550 	 * we'd better clear the inline data here.
2551 	 */
2552 	if (ext4_has_inline_data(inode)) {
2553 		/* Just inode will be modified... */
2554 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2555 		if (IS_ERR(handle)) {
2556 			ret = PTR_ERR(handle);
2557 			goto out_writepages;
2558 		}
2559 		BUG_ON(ext4_test_inode_state(inode,
2560 				EXT4_STATE_MAY_INLINE_DATA));
2561 		ext4_destroy_inline_data(handle, inode);
2562 		ext4_journal_stop(handle);
2563 	}
2564 
2565 	/*
2566 	 * data=journal mode does not do delalloc so we just need to writeout /
2567 	 * journal already mapped buffers. On the other hand we need to commit
2568 	 * transaction to make data stable. We expect all the data to be
2569 	 * already in the journal (the only exception are DMA pinned pages
2570 	 * dirtied behind our back) so we commit transaction here and run the
2571 	 * writeback loop to checkpoint them. The checkpointing is not actually
2572 	 * necessary to make data persistent *but* quite a few places (extent
2573 	 * shifting operations, fsverity, ...) depend on being able to drop
2574 	 * pagecache pages after calling filemap_write_and_wait() and for that
2575 	 * checkpointing needs to happen.
2576 	 */
2577 	if (ext4_should_journal_data(inode)) {
2578 		mpd->can_map = 0;
2579 		if (wbc->sync_mode == WB_SYNC_ALL)
2580 			ext4_fc_commit(sbi->s_journal,
2581 				       EXT4_I(inode)->i_datasync_tid);
2582 	}
2583 	mpd->journalled_more_data = 0;
2584 
2585 	if (ext4_should_dioread_nolock(inode)) {
2586 		/*
2587 		 * We may need to convert up to one extent per block in
2588 		 * the page and we may dirty the inode.
2589 		 */
2590 		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2591 						PAGE_SIZE >> inode->i_blkbits);
2592 	}
2593 
2594 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2595 		range_whole = 1;
2596 
2597 	if (wbc->range_cyclic) {
2598 		writeback_index = mapping->writeback_index;
2599 		if (writeback_index)
2600 			cycled = 0;
2601 		mpd->first_page = writeback_index;
2602 		mpd->last_page = -1;
2603 	} else {
2604 		mpd->first_page = wbc->range_start >> PAGE_SHIFT;
2605 		mpd->last_page = wbc->range_end >> PAGE_SHIFT;
2606 	}
2607 
2608 	ext4_io_submit_init(&mpd->io_submit, wbc);
2609 retry:
2610 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2611 		tag_pages_for_writeback(mapping, mpd->first_page,
2612 					mpd->last_page);
2613 	blk_start_plug(&plug);
2614 
2615 	/*
2616 	 * First writeback pages that don't need mapping - we can avoid
2617 	 * starting a transaction unnecessarily and also avoid being blocked
2618 	 * in the block layer on device congestion while having transaction
2619 	 * started.
2620 	 */
2621 	mpd->do_map = 0;
2622 	mpd->scanned_until_end = 0;
2623 	mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2624 	if (!mpd->io_submit.io_end) {
2625 		ret = -ENOMEM;
2626 		goto unplug;
2627 	}
2628 	ret = mpage_prepare_extent_to_map(mpd);
2629 	/* Unlock pages we didn't use */
2630 	mpage_release_unused_pages(mpd, false);
2631 	/* Submit prepared bio */
2632 	ext4_io_submit(&mpd->io_submit);
2633 	ext4_put_io_end_defer(mpd->io_submit.io_end);
2634 	mpd->io_submit.io_end = NULL;
2635 	if (ret < 0)
2636 		goto unplug;
2637 
2638 	while (!mpd->scanned_until_end && wbc->nr_to_write > 0) {
2639 		/* For each extent of pages we use new io_end */
2640 		mpd->io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2641 		if (!mpd->io_submit.io_end) {
2642 			ret = -ENOMEM;
2643 			break;
2644 		}
2645 
2646 		WARN_ON_ONCE(!mpd->can_map);
2647 		/*
2648 		 * We have two constraints: We find one extent to map and we
2649 		 * must always write out whole page (makes a difference when
2650 		 * blocksize < pagesize) so that we don't block on IO when we
2651 		 * try to write out the rest of the page. Journalled mode is
2652 		 * not supported by delalloc.
2653 		 */
2654 		BUG_ON(ext4_should_journal_data(inode));
2655 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2656 
2657 		/* start a new transaction */
2658 		handle = ext4_journal_start_with_reserve(inode,
2659 				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2660 		if (IS_ERR(handle)) {
2661 			ret = PTR_ERR(handle);
2662 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2663 			       "%ld pages, ino %lu; err %d", __func__,
2664 				wbc->nr_to_write, inode->i_ino, ret);
2665 			/* Release allocated io_end */
2666 			ext4_put_io_end(mpd->io_submit.io_end);
2667 			mpd->io_submit.io_end = NULL;
2668 			break;
2669 		}
2670 		mpd->do_map = 1;
2671 
2672 		trace_ext4_da_write_pages(inode, mpd->first_page, wbc);
2673 		ret = mpage_prepare_extent_to_map(mpd);
2674 		if (!ret && mpd->map.m_len)
2675 			ret = mpage_map_and_submit_extent(handle, mpd,
2676 					&give_up_on_write);
2677 		/*
2678 		 * Caution: If the handle is synchronous,
2679 		 * ext4_journal_stop() can wait for transaction commit
2680 		 * to finish which may depend on writeback of pages to
2681 		 * complete or on page lock to be released.  In that
2682 		 * case, we have to wait until after we have
2683 		 * submitted all the IO, released page locks we hold,
2684 		 * and dropped io_end reference (for extent conversion
2685 		 * to be able to complete) before stopping the handle.
2686 		 */
2687 		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2688 			ext4_journal_stop(handle);
2689 			handle = NULL;
2690 			mpd->do_map = 0;
2691 		}
2692 		/* Unlock pages we didn't use */
2693 		mpage_release_unused_pages(mpd, give_up_on_write);
2694 		/* Submit prepared bio */
2695 		ext4_io_submit(&mpd->io_submit);
2696 
2697 		/*
2698 		 * Drop our io_end reference we got from init. We have
2699 		 * to be careful and use deferred io_end finishing if
2700 		 * we are still holding the transaction as we can
2701 		 * release the last reference to io_end which may end
2702 		 * up doing unwritten extent conversion.
2703 		 */
2704 		if (handle) {
2705 			ext4_put_io_end_defer(mpd->io_submit.io_end);
2706 			ext4_journal_stop(handle);
2707 		} else
2708 			ext4_put_io_end(mpd->io_submit.io_end);
2709 		mpd->io_submit.io_end = NULL;
2710 
2711 		if (ret == -ENOSPC && sbi->s_journal) {
2712 			/*
2713 			 * Commit the transaction which would
2714 			 * free blocks released in the transaction
2715 			 * and try again
2716 			 */
2717 			jbd2_journal_force_commit_nested(sbi->s_journal);
2718 			ret = 0;
2719 			continue;
2720 		}
2721 		/* Fatal error - ENOMEM, EIO... */
2722 		if (ret)
2723 			break;
2724 	}
2725 unplug:
2726 	blk_finish_plug(&plug);
2727 	if (!ret && !cycled && wbc->nr_to_write > 0) {
2728 		cycled = 1;
2729 		mpd->last_page = writeback_index - 1;
2730 		mpd->first_page = 0;
2731 		goto retry;
2732 	}
2733 
2734 	/* Update index */
2735 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2736 		/*
2737 		 * Set the writeback_index so that range_cyclic
2738 		 * mode will write it back later
2739 		 */
2740 		mapping->writeback_index = mpd->first_page;
2741 
2742 out_writepages:
2743 	trace_ext4_writepages_result(inode, wbc, ret,
2744 				     nr_to_write - wbc->nr_to_write);
2745 	return ret;
2746 }
2747 
2748 static int ext4_writepages(struct address_space *mapping,
2749 			   struct writeback_control *wbc)
2750 {
2751 	struct super_block *sb = mapping->host->i_sb;
2752 	struct mpage_da_data mpd = {
2753 		.inode = mapping->host,
2754 		.wbc = wbc,
2755 		.can_map = 1,
2756 	};
2757 	int ret;
2758 	int alloc_ctx;
2759 
2760 	if (unlikely(ext4_forced_shutdown(sb)))
2761 		return -EIO;
2762 
2763 	alloc_ctx = ext4_writepages_down_read(sb);
2764 	ret = ext4_do_writepages(&mpd);
2765 	/*
2766 	 * For data=journal writeback we could have come across pages marked
2767 	 * for delayed dirtying (PageChecked) which were just added to the
2768 	 * running transaction. Try once more to get them to stable storage.
2769 	 */
2770 	if (!ret && mpd.journalled_more_data)
2771 		ret = ext4_do_writepages(&mpd);
2772 	ext4_writepages_up_read(sb, alloc_ctx);
2773 
2774 	return ret;
2775 }
2776 
2777 int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode)
2778 {
2779 	struct writeback_control wbc = {
2780 		.sync_mode = WB_SYNC_ALL,
2781 		.nr_to_write = LONG_MAX,
2782 		.range_start = jinode->i_dirty_start,
2783 		.range_end = jinode->i_dirty_end,
2784 	};
2785 	struct mpage_da_data mpd = {
2786 		.inode = jinode->i_vfs_inode,
2787 		.wbc = &wbc,
2788 		.can_map = 0,
2789 	};
2790 	return ext4_do_writepages(&mpd);
2791 }
2792 
2793 static int ext4_dax_writepages(struct address_space *mapping,
2794 			       struct writeback_control *wbc)
2795 {
2796 	int ret;
2797 	long nr_to_write = wbc->nr_to_write;
2798 	struct inode *inode = mapping->host;
2799 	int alloc_ctx;
2800 
2801 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2802 		return -EIO;
2803 
2804 	alloc_ctx = ext4_writepages_down_read(inode->i_sb);
2805 	trace_ext4_writepages(inode, wbc);
2806 
2807 	ret = dax_writeback_mapping_range(mapping,
2808 					  EXT4_SB(inode->i_sb)->s_daxdev, wbc);
2809 	trace_ext4_writepages_result(inode, wbc, ret,
2810 				     nr_to_write - wbc->nr_to_write);
2811 	ext4_writepages_up_read(inode->i_sb, alloc_ctx);
2812 	return ret;
2813 }
2814 
2815 static int ext4_nonda_switch(struct super_block *sb)
2816 {
2817 	s64 free_clusters, dirty_clusters;
2818 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2819 
2820 	/*
2821 	 * switch to non delalloc mode if we are running low
2822 	 * on free block. The free block accounting via percpu
2823 	 * counters can get slightly wrong with percpu_counter_batch getting
2824 	 * accumulated on each CPU without updating global counters
2825 	 * Delalloc need an accurate free block accounting. So switch
2826 	 * to non delalloc when we are near to error range.
2827 	 */
2828 	free_clusters =
2829 		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2830 	dirty_clusters =
2831 		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2832 	/*
2833 	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2834 	 */
2835 	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2836 		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2837 
2838 	if (2 * free_clusters < 3 * dirty_clusters ||
2839 	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2840 		/*
2841 		 * free block count is less than 150% of dirty blocks
2842 		 * or free blocks is less than watermark
2843 		 */
2844 		return 1;
2845 	}
2846 	return 0;
2847 }
2848 
2849 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2850 			       loff_t pos, unsigned len,
2851 			       struct page **pagep, void **fsdata)
2852 {
2853 	int ret, retries = 0;
2854 	struct folio *folio;
2855 	pgoff_t index;
2856 	struct inode *inode = mapping->host;
2857 
2858 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
2859 		return -EIO;
2860 
2861 	index = pos >> PAGE_SHIFT;
2862 
2863 	if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
2864 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2865 		return ext4_write_begin(file, mapping, pos,
2866 					len, pagep, fsdata);
2867 	}
2868 	*fsdata = (void *)0;
2869 	trace_ext4_da_write_begin(inode, pos, len);
2870 
2871 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2872 		ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
2873 						      pagep, fsdata);
2874 		if (ret < 0)
2875 			return ret;
2876 		if (ret == 1)
2877 			return 0;
2878 	}
2879 
2880 retry:
2881 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2882 			mapping_gfp_mask(mapping));
2883 	if (IS_ERR(folio))
2884 		return PTR_ERR(folio);
2885 
2886 	/* In case writeback began while the folio was unlocked */
2887 	folio_wait_stable(folio);
2888 
2889 #ifdef CONFIG_FS_ENCRYPTION
2890 	ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
2891 #else
2892 	ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
2893 #endif
2894 	if (ret < 0) {
2895 		folio_unlock(folio);
2896 		folio_put(folio);
2897 		/*
2898 		 * block_write_begin may have instantiated a few blocks
2899 		 * outside i_size.  Trim these off again. Don't need
2900 		 * i_size_read because we hold inode lock.
2901 		 */
2902 		if (pos + len > inode->i_size)
2903 			ext4_truncate_failed_write(inode);
2904 
2905 		if (ret == -ENOSPC &&
2906 		    ext4_should_retry_alloc(inode->i_sb, &retries))
2907 			goto retry;
2908 		return ret;
2909 	}
2910 
2911 	*pagep = &folio->page;
2912 	return ret;
2913 }
2914 
2915 /*
2916  * Check if we should update i_disksize
2917  * when write to the end of file but not require block allocation
2918  */
2919 static int ext4_da_should_update_i_disksize(struct folio *folio,
2920 					    unsigned long offset)
2921 {
2922 	struct buffer_head *bh;
2923 	struct inode *inode = folio->mapping->host;
2924 	unsigned int idx;
2925 	int i;
2926 
2927 	bh = folio_buffers(folio);
2928 	idx = offset >> inode->i_blkbits;
2929 
2930 	for (i = 0; i < idx; i++)
2931 		bh = bh->b_this_page;
2932 
2933 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2934 		return 0;
2935 	return 1;
2936 }
2937 
2938 static int ext4_da_do_write_end(struct address_space *mapping,
2939 			loff_t pos, unsigned len, unsigned copied,
2940 			struct page *page)
2941 {
2942 	struct inode *inode = mapping->host;
2943 	loff_t old_size = inode->i_size;
2944 	bool disksize_changed = false;
2945 	loff_t new_i_size;
2946 
2947 	/*
2948 	 * block_write_end() will mark the inode as dirty with I_DIRTY_PAGES
2949 	 * flag, which all that's needed to trigger page writeback.
2950 	 */
2951 	copied = block_write_end(NULL, mapping, pos, len, copied, page, NULL);
2952 	new_i_size = pos + copied;
2953 
2954 	/*
2955 	 * It's important to update i_size while still holding page lock,
2956 	 * because page writeout could otherwise come in and zero beyond
2957 	 * i_size.
2958 	 *
2959 	 * Since we are holding inode lock, we are sure i_disksize <=
2960 	 * i_size. We also know that if i_disksize < i_size, there are
2961 	 * delalloc writes pending in the range up to i_size. If the end of
2962 	 * the current write is <= i_size, there's no need to touch
2963 	 * i_disksize since writeback will push i_disksize up to i_size
2964 	 * eventually. If the end of the current write is > i_size and
2965 	 * inside an allocated block which ext4_da_should_update_i_disksize()
2966 	 * checked, we need to update i_disksize here as certain
2967 	 * ext4_writepages() paths not allocating blocks and update i_disksize.
2968 	 */
2969 	if (new_i_size > inode->i_size) {
2970 		unsigned long end;
2971 
2972 		i_size_write(inode, new_i_size);
2973 		end = (new_i_size - 1) & (PAGE_SIZE - 1);
2974 		if (copied && ext4_da_should_update_i_disksize(page_folio(page), end)) {
2975 			ext4_update_i_disksize(inode, new_i_size);
2976 			disksize_changed = true;
2977 		}
2978 	}
2979 
2980 	unlock_page(page);
2981 	put_page(page);
2982 
2983 	if (old_size < pos)
2984 		pagecache_isize_extended(inode, old_size, pos);
2985 
2986 	if (disksize_changed) {
2987 		handle_t *handle;
2988 
2989 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
2990 		if (IS_ERR(handle))
2991 			return PTR_ERR(handle);
2992 		ext4_mark_inode_dirty(handle, inode);
2993 		ext4_journal_stop(handle);
2994 	}
2995 
2996 	return copied;
2997 }
2998 
2999 static int ext4_da_write_end(struct file *file,
3000 			     struct address_space *mapping,
3001 			     loff_t pos, unsigned len, unsigned copied,
3002 			     struct page *page, void *fsdata)
3003 {
3004 	struct inode *inode = mapping->host;
3005 	int write_mode = (int)(unsigned long)fsdata;
3006 	struct folio *folio = page_folio(page);
3007 
3008 	if (write_mode == FALL_BACK_TO_NONDELALLOC)
3009 		return ext4_write_end(file, mapping, pos,
3010 				      len, copied, &folio->page, fsdata);
3011 
3012 	trace_ext4_da_write_end(inode, pos, len, copied);
3013 
3014 	if (write_mode != CONVERT_INLINE_DATA &&
3015 	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3016 	    ext4_has_inline_data(inode))
3017 		return ext4_write_inline_data_end(inode, pos, len, copied,
3018 						  folio);
3019 
3020 	if (unlikely(copied < len) && !PageUptodate(page))
3021 		copied = 0;
3022 
3023 	return ext4_da_do_write_end(mapping, pos, len, copied, &folio->page);
3024 }
3025 
3026 /*
3027  * Force all delayed allocation blocks to be allocated for a given inode.
3028  */
3029 int ext4_alloc_da_blocks(struct inode *inode)
3030 {
3031 	trace_ext4_alloc_da_blocks(inode);
3032 
3033 	if (!EXT4_I(inode)->i_reserved_data_blocks)
3034 		return 0;
3035 
3036 	/*
3037 	 * We do something simple for now.  The filemap_flush() will
3038 	 * also start triggering a write of the data blocks, which is
3039 	 * not strictly speaking necessary (and for users of
3040 	 * laptop_mode, not even desirable).  However, to do otherwise
3041 	 * would require replicating code paths in:
3042 	 *
3043 	 * ext4_writepages() ->
3044 	 *    write_cache_pages() ---> (via passed in callback function)
3045 	 *        __mpage_da_writepage() -->
3046 	 *           mpage_add_bh_to_extent()
3047 	 *           mpage_da_map_blocks()
3048 	 *
3049 	 * The problem is that write_cache_pages(), located in
3050 	 * mm/page-writeback.c, marks pages clean in preparation for
3051 	 * doing I/O, which is not desirable if we're not planning on
3052 	 * doing I/O at all.
3053 	 *
3054 	 * We could call write_cache_pages(), and then redirty all of
3055 	 * the pages by calling redirty_page_for_writepage() but that
3056 	 * would be ugly in the extreme.  So instead we would need to
3057 	 * replicate parts of the code in the above functions,
3058 	 * simplifying them because we wouldn't actually intend to
3059 	 * write out the pages, but rather only collect contiguous
3060 	 * logical block extents, call the multi-block allocator, and
3061 	 * then update the buffer heads with the block allocations.
3062 	 *
3063 	 * For now, though, we'll cheat by calling filemap_flush(),
3064 	 * which will map the blocks, and start the I/O, but not
3065 	 * actually wait for the I/O to complete.
3066 	 */
3067 	return filemap_flush(inode->i_mapping);
3068 }
3069 
3070 /*
3071  * bmap() is special.  It gets used by applications such as lilo and by
3072  * the swapper to find the on-disk block of a specific piece of data.
3073  *
3074  * Naturally, this is dangerous if the block concerned is still in the
3075  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3076  * filesystem and enables swap, then they may get a nasty shock when the
3077  * data getting swapped to that swapfile suddenly gets overwritten by
3078  * the original zero's written out previously to the journal and
3079  * awaiting writeback in the kernel's buffer cache.
3080  *
3081  * So, if we see any bmap calls here on a modified, data-journaled file,
3082  * take extra steps to flush any blocks which might be in the cache.
3083  */
3084 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3085 {
3086 	struct inode *inode = mapping->host;
3087 	sector_t ret = 0;
3088 
3089 	inode_lock_shared(inode);
3090 	/*
3091 	 * We can get here for an inline file via the FIBMAP ioctl
3092 	 */
3093 	if (ext4_has_inline_data(inode))
3094 		goto out;
3095 
3096 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3097 	    (test_opt(inode->i_sb, DELALLOC) ||
3098 	     ext4_should_journal_data(inode))) {
3099 		/*
3100 		 * With delalloc or journalled data we want to sync the file so
3101 		 * that we can make sure we allocate blocks for file and data
3102 		 * is in place for the user to see it
3103 		 */
3104 		filemap_write_and_wait(mapping);
3105 	}
3106 
3107 	ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3108 
3109 out:
3110 	inode_unlock_shared(inode);
3111 	return ret;
3112 }
3113 
3114 static int ext4_read_folio(struct file *file, struct folio *folio)
3115 {
3116 	int ret = -EAGAIN;
3117 	struct inode *inode = folio->mapping->host;
3118 
3119 	trace_ext4_read_folio(inode, folio);
3120 
3121 	if (ext4_has_inline_data(inode))
3122 		ret = ext4_readpage_inline(inode, folio);
3123 
3124 	if (ret == -EAGAIN)
3125 		return ext4_mpage_readpages(inode, NULL, folio);
3126 
3127 	return ret;
3128 }
3129 
3130 static void ext4_readahead(struct readahead_control *rac)
3131 {
3132 	struct inode *inode = rac->mapping->host;
3133 
3134 	/* If the file has inline data, no need to do readahead. */
3135 	if (ext4_has_inline_data(inode))
3136 		return;
3137 
3138 	ext4_mpage_readpages(inode, rac, NULL);
3139 }
3140 
3141 static void ext4_invalidate_folio(struct folio *folio, size_t offset,
3142 				size_t length)
3143 {
3144 	trace_ext4_invalidate_folio(folio, offset, length);
3145 
3146 	/* No journalling happens on data buffers when this function is used */
3147 	WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
3148 
3149 	block_invalidate_folio(folio, offset, length);
3150 }
3151 
3152 static int __ext4_journalled_invalidate_folio(struct folio *folio,
3153 					    size_t offset, size_t length)
3154 {
3155 	journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
3156 
3157 	trace_ext4_journalled_invalidate_folio(folio, offset, length);
3158 
3159 	/*
3160 	 * If it's a full truncate we just forget about the pending dirtying
3161 	 */
3162 	if (offset == 0 && length == folio_size(folio))
3163 		folio_clear_checked(folio);
3164 
3165 	return jbd2_journal_invalidate_folio(journal, folio, offset, length);
3166 }
3167 
3168 /* Wrapper for aops... */
3169 static void ext4_journalled_invalidate_folio(struct folio *folio,
3170 					   size_t offset,
3171 					   size_t length)
3172 {
3173 	WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
3174 }
3175 
3176 static bool ext4_release_folio(struct folio *folio, gfp_t wait)
3177 {
3178 	struct inode *inode = folio->mapping->host;
3179 	journal_t *journal = EXT4_JOURNAL(inode);
3180 
3181 	trace_ext4_release_folio(inode, folio);
3182 
3183 	/* Page has dirty journalled data -> cannot release */
3184 	if (folio_test_checked(folio))
3185 		return false;
3186 	if (journal)
3187 		return jbd2_journal_try_to_free_buffers(journal, folio);
3188 	else
3189 		return try_to_free_buffers(folio);
3190 }
3191 
3192 static bool ext4_inode_datasync_dirty(struct inode *inode)
3193 {
3194 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3195 
3196 	if (journal) {
3197 		if (jbd2_transaction_committed(journal,
3198 			EXT4_I(inode)->i_datasync_tid))
3199 			return false;
3200 		if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3201 			return !list_empty(&EXT4_I(inode)->i_fc_list);
3202 		return true;
3203 	}
3204 
3205 	/* Any metadata buffers to write? */
3206 	if (!list_empty(&inode->i_mapping->private_list))
3207 		return true;
3208 	return inode->i_state & I_DIRTY_DATASYNC;
3209 }
3210 
3211 static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3212 			   struct ext4_map_blocks *map, loff_t offset,
3213 			   loff_t length, unsigned int flags)
3214 {
3215 	u8 blkbits = inode->i_blkbits;
3216 
3217 	/*
3218 	 * Writes that span EOF might trigger an I/O size update on completion,
3219 	 * so consider them to be dirty for the purpose of O_DSYNC, even if
3220 	 * there is no other metadata changes being made or are pending.
3221 	 */
3222 	iomap->flags = 0;
3223 	if (ext4_inode_datasync_dirty(inode) ||
3224 	    offset + length > i_size_read(inode))
3225 		iomap->flags |= IOMAP_F_DIRTY;
3226 
3227 	if (map->m_flags & EXT4_MAP_NEW)
3228 		iomap->flags |= IOMAP_F_NEW;
3229 
3230 	if (flags & IOMAP_DAX)
3231 		iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3232 	else
3233 		iomap->bdev = inode->i_sb->s_bdev;
3234 	iomap->offset = (u64) map->m_lblk << blkbits;
3235 	iomap->length = (u64) map->m_len << blkbits;
3236 
3237 	if ((map->m_flags & EXT4_MAP_MAPPED) &&
3238 	    !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3239 		iomap->flags |= IOMAP_F_MERGED;
3240 
3241 	/*
3242 	 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3243 	 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3244 	 * set. In order for any allocated unwritten extents to be converted
3245 	 * into written extents correctly within the ->end_io() handler, we
3246 	 * need to ensure that the iomap->type is set appropriately. Hence, the
3247 	 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3248 	 * been set first.
3249 	 */
3250 	if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3251 		iomap->type = IOMAP_UNWRITTEN;
3252 		iomap->addr = (u64) map->m_pblk << blkbits;
3253 		if (flags & IOMAP_DAX)
3254 			iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3255 	} else if (map->m_flags & EXT4_MAP_MAPPED) {
3256 		iomap->type = IOMAP_MAPPED;
3257 		iomap->addr = (u64) map->m_pblk << blkbits;
3258 		if (flags & IOMAP_DAX)
3259 			iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
3260 	} else {
3261 		iomap->type = IOMAP_HOLE;
3262 		iomap->addr = IOMAP_NULL_ADDR;
3263 	}
3264 }
3265 
3266 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3267 			    unsigned int flags)
3268 {
3269 	handle_t *handle;
3270 	u8 blkbits = inode->i_blkbits;
3271 	int ret, dio_credits, m_flags = 0, retries = 0;
3272 
3273 	/*
3274 	 * Trim the mapping request to the maximum value that we can map at
3275 	 * once for direct I/O.
3276 	 */
3277 	if (map->m_len > DIO_MAX_BLOCKS)
3278 		map->m_len = DIO_MAX_BLOCKS;
3279 	dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3280 
3281 retry:
3282 	/*
3283 	 * Either we allocate blocks and then don't get an unwritten extent, so
3284 	 * in that case we have reserved enough credits. Or, the blocks are
3285 	 * already allocated and unwritten. In that case, the extent conversion
3286 	 * fits into the credits as well.
3287 	 */
3288 	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3289 	if (IS_ERR(handle))
3290 		return PTR_ERR(handle);
3291 
3292 	/*
3293 	 * DAX and direct I/O are the only two operations that are currently
3294 	 * supported with IOMAP_WRITE.
3295 	 */
3296 	WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
3297 	if (flags & IOMAP_DAX)
3298 		m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3299 	/*
3300 	 * We use i_size instead of i_disksize here because delalloc writeback
3301 	 * can complete at any point during the I/O and subsequently push the
3302 	 * i_disksize out to i_size. This could be beyond where direct I/O is
3303 	 * happening and thus expose allocated blocks to direct I/O reads.
3304 	 */
3305 	else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3306 		m_flags = EXT4_GET_BLOCKS_CREATE;
3307 	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3308 		m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3309 
3310 	ret = ext4_map_blocks(handle, inode, map, m_flags);
3311 
3312 	/*
3313 	 * We cannot fill holes in indirect tree based inodes as that could
3314 	 * expose stale data in the case of a crash. Use the magic error code
3315 	 * to fallback to buffered I/O.
3316 	 */
3317 	if (!m_flags && !ret)
3318 		ret = -ENOTBLK;
3319 
3320 	ext4_journal_stop(handle);
3321 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3322 		goto retry;
3323 
3324 	return ret;
3325 }
3326 
3327 
3328 static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3329 		unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3330 {
3331 	int ret;
3332 	struct ext4_map_blocks map;
3333 	u8 blkbits = inode->i_blkbits;
3334 
3335 	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3336 		return -EINVAL;
3337 
3338 	if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3339 		return -ERANGE;
3340 
3341 	/*
3342 	 * Calculate the first and last logical blocks respectively.
3343 	 */
3344 	map.m_lblk = offset >> blkbits;
3345 	map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3346 			  EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3347 
3348 	if (flags & IOMAP_WRITE) {
3349 		/*
3350 		 * We check here if the blocks are already allocated, then we
3351 		 * don't need to start a journal txn and we can directly return
3352 		 * the mapping information. This could boost performance
3353 		 * especially in multi-threaded overwrite requests.
3354 		 */
3355 		if (offset + length <= i_size_read(inode)) {
3356 			ret = ext4_map_blocks(NULL, inode, &map, 0);
3357 			if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3358 				goto out;
3359 		}
3360 		ret = ext4_iomap_alloc(inode, &map, flags);
3361 	} else {
3362 		ret = ext4_map_blocks(NULL, inode, &map, 0);
3363 	}
3364 
3365 	if (ret < 0)
3366 		return ret;
3367 out:
3368 	/*
3369 	 * When inline encryption is enabled, sometimes I/O to an encrypted file
3370 	 * has to be broken up to guarantee DUN contiguity.  Handle this by
3371 	 * limiting the length of the mapping returned.
3372 	 */
3373 	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3374 
3375 	ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3376 
3377 	return 0;
3378 }
3379 
3380 static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3381 		loff_t length, unsigned flags, struct iomap *iomap,
3382 		struct iomap *srcmap)
3383 {
3384 	int ret;
3385 
3386 	/*
3387 	 * Even for writes we don't need to allocate blocks, so just pretend
3388 	 * we are reading to save overhead of starting a transaction.
3389 	 */
3390 	flags &= ~IOMAP_WRITE;
3391 	ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3392 	WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
3393 	return ret;
3394 }
3395 
3396 static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3397 			  ssize_t written, unsigned flags, struct iomap *iomap)
3398 {
3399 	/*
3400 	 * Check to see whether an error occurred while writing out the data to
3401 	 * the allocated blocks. If so, return the magic error code so that we
3402 	 * fallback to buffered I/O and attempt to complete the remainder of
3403 	 * the I/O. Any blocks that may have been allocated in preparation for
3404 	 * the direct I/O will be reused during buffered I/O.
3405 	 */
3406 	if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3407 		return -ENOTBLK;
3408 
3409 	return 0;
3410 }
3411 
3412 const struct iomap_ops ext4_iomap_ops = {
3413 	.iomap_begin		= ext4_iomap_begin,
3414 	.iomap_end		= ext4_iomap_end,
3415 };
3416 
3417 const struct iomap_ops ext4_iomap_overwrite_ops = {
3418 	.iomap_begin		= ext4_iomap_overwrite_begin,
3419 	.iomap_end		= ext4_iomap_end,
3420 };
3421 
3422 static bool ext4_iomap_is_delalloc(struct inode *inode,
3423 				   struct ext4_map_blocks *map)
3424 {
3425 	struct extent_status es;
3426 	ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3427 
3428 	ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3429 				  map->m_lblk, end, &es);
3430 
3431 	if (!es.es_len || es.es_lblk > end)
3432 		return false;
3433 
3434 	if (es.es_lblk > map->m_lblk) {
3435 		map->m_len = es.es_lblk - map->m_lblk;
3436 		return false;
3437 	}
3438 
3439 	offset = map->m_lblk - es.es_lblk;
3440 	map->m_len = es.es_len - offset;
3441 
3442 	return true;
3443 }
3444 
3445 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3446 				   loff_t length, unsigned int flags,
3447 				   struct iomap *iomap, struct iomap *srcmap)
3448 {
3449 	int ret;
3450 	bool delalloc = false;
3451 	struct ext4_map_blocks map;
3452 	u8 blkbits = inode->i_blkbits;
3453 
3454 	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3455 		return -EINVAL;
3456 
3457 	if (ext4_has_inline_data(inode)) {
3458 		ret = ext4_inline_data_iomap(inode, iomap);
3459 		if (ret != -EAGAIN) {
3460 			if (ret == 0 && offset >= iomap->length)
3461 				ret = -ENOENT;
3462 			return ret;
3463 		}
3464 	}
3465 
3466 	/*
3467 	 * Calculate the first and last logical block respectively.
3468 	 */
3469 	map.m_lblk = offset >> blkbits;
3470 	map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3471 			  EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3472 
3473 	/*
3474 	 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3475 	 * So handle it here itself instead of querying ext4_map_blocks().
3476 	 * Since ext4_map_blocks() will warn about it and will return
3477 	 * -EIO error.
3478 	 */
3479 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3480 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3481 
3482 		if (offset >= sbi->s_bitmap_maxbytes) {
3483 			map.m_flags = 0;
3484 			goto set_iomap;
3485 		}
3486 	}
3487 
3488 	ret = ext4_map_blocks(NULL, inode, &map, 0);
3489 	if (ret < 0)
3490 		return ret;
3491 	if (ret == 0)
3492 		delalloc = ext4_iomap_is_delalloc(inode, &map);
3493 
3494 set_iomap:
3495 	ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3496 	if (delalloc && iomap->type == IOMAP_HOLE)
3497 		iomap->type = IOMAP_DELALLOC;
3498 
3499 	return 0;
3500 }
3501 
3502 const struct iomap_ops ext4_iomap_report_ops = {
3503 	.iomap_begin = ext4_iomap_begin_report,
3504 };
3505 
3506 /*
3507  * For data=journal mode, folio should be marked dirty only when it was
3508  * writeably mapped. When that happens, it was already attached to the
3509  * transaction and marked as jbddirty (we take care of this in
3510  * ext4_page_mkwrite()). On transaction commit, we writeprotect page mappings
3511  * so we should have nothing to do here, except for the case when someone
3512  * had the page pinned and dirtied the page through this pin (e.g. by doing
3513  * direct IO to it). In that case we'd need to attach buffers here to the
3514  * transaction but we cannot due to lock ordering.  We cannot just dirty the
3515  * folio and leave attached buffers clean, because the buffers' dirty state is
3516  * "definitive".  We cannot just set the buffers dirty or jbddirty because all
3517  * the journalling code will explode.  So what we do is to mark the folio
3518  * "pending dirty" and next time ext4_writepages() is called, attach buffers
3519  * to the transaction appropriately.
3520  */
3521 static bool ext4_journalled_dirty_folio(struct address_space *mapping,
3522 		struct folio *folio)
3523 {
3524 	WARN_ON_ONCE(!folio_buffers(folio));
3525 	if (folio_maybe_dma_pinned(folio))
3526 		folio_set_checked(folio);
3527 	return filemap_dirty_folio(mapping, folio);
3528 }
3529 
3530 static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
3531 {
3532 	WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
3533 	WARN_ON_ONCE(!folio_buffers(folio));
3534 	return block_dirty_folio(mapping, folio);
3535 }
3536 
3537 static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3538 				    struct file *file, sector_t *span)
3539 {
3540 	return iomap_swapfile_activate(sis, file, span,
3541 				       &ext4_iomap_report_ops);
3542 }
3543 
3544 static const struct address_space_operations ext4_aops = {
3545 	.read_folio		= ext4_read_folio,
3546 	.readahead		= ext4_readahead,
3547 	.writepages		= ext4_writepages,
3548 	.write_begin		= ext4_write_begin,
3549 	.write_end		= ext4_write_end,
3550 	.dirty_folio		= ext4_dirty_folio,
3551 	.bmap			= ext4_bmap,
3552 	.invalidate_folio	= ext4_invalidate_folio,
3553 	.release_folio		= ext4_release_folio,
3554 	.direct_IO		= noop_direct_IO,
3555 	.migrate_folio		= buffer_migrate_folio,
3556 	.is_partially_uptodate  = block_is_partially_uptodate,
3557 	.error_remove_page	= generic_error_remove_page,
3558 	.swap_activate		= ext4_iomap_swap_activate,
3559 };
3560 
3561 static const struct address_space_operations ext4_journalled_aops = {
3562 	.read_folio		= ext4_read_folio,
3563 	.readahead		= ext4_readahead,
3564 	.writepages		= ext4_writepages,
3565 	.write_begin		= ext4_write_begin,
3566 	.write_end		= ext4_journalled_write_end,
3567 	.dirty_folio		= ext4_journalled_dirty_folio,
3568 	.bmap			= ext4_bmap,
3569 	.invalidate_folio	= ext4_journalled_invalidate_folio,
3570 	.release_folio		= ext4_release_folio,
3571 	.direct_IO		= noop_direct_IO,
3572 	.migrate_folio		= buffer_migrate_folio_norefs,
3573 	.is_partially_uptodate  = block_is_partially_uptodate,
3574 	.error_remove_page	= generic_error_remove_page,
3575 	.swap_activate		= ext4_iomap_swap_activate,
3576 };
3577 
3578 static const struct address_space_operations ext4_da_aops = {
3579 	.read_folio		= ext4_read_folio,
3580 	.readahead		= ext4_readahead,
3581 	.writepages		= ext4_writepages,
3582 	.write_begin		= ext4_da_write_begin,
3583 	.write_end		= ext4_da_write_end,
3584 	.dirty_folio		= ext4_dirty_folio,
3585 	.bmap			= ext4_bmap,
3586 	.invalidate_folio	= ext4_invalidate_folio,
3587 	.release_folio		= ext4_release_folio,
3588 	.direct_IO		= noop_direct_IO,
3589 	.migrate_folio		= buffer_migrate_folio,
3590 	.is_partially_uptodate  = block_is_partially_uptodate,
3591 	.error_remove_page	= generic_error_remove_page,
3592 	.swap_activate		= ext4_iomap_swap_activate,
3593 };
3594 
3595 static const struct address_space_operations ext4_dax_aops = {
3596 	.writepages		= ext4_dax_writepages,
3597 	.direct_IO		= noop_direct_IO,
3598 	.dirty_folio		= noop_dirty_folio,
3599 	.bmap			= ext4_bmap,
3600 	.swap_activate		= ext4_iomap_swap_activate,
3601 };
3602 
3603 void ext4_set_aops(struct inode *inode)
3604 {
3605 	switch (ext4_inode_journal_mode(inode)) {
3606 	case EXT4_INODE_ORDERED_DATA_MODE:
3607 	case EXT4_INODE_WRITEBACK_DATA_MODE:
3608 		break;
3609 	case EXT4_INODE_JOURNAL_DATA_MODE:
3610 		inode->i_mapping->a_ops = &ext4_journalled_aops;
3611 		return;
3612 	default:
3613 		BUG();
3614 	}
3615 	if (IS_DAX(inode))
3616 		inode->i_mapping->a_ops = &ext4_dax_aops;
3617 	else if (test_opt(inode->i_sb, DELALLOC))
3618 		inode->i_mapping->a_ops = &ext4_da_aops;
3619 	else
3620 		inode->i_mapping->a_ops = &ext4_aops;
3621 }
3622 
3623 static int __ext4_block_zero_page_range(handle_t *handle,
3624 		struct address_space *mapping, loff_t from, loff_t length)
3625 {
3626 	ext4_fsblk_t index = from >> PAGE_SHIFT;
3627 	unsigned offset = from & (PAGE_SIZE-1);
3628 	unsigned blocksize, pos;
3629 	ext4_lblk_t iblock;
3630 	struct inode *inode = mapping->host;
3631 	struct buffer_head *bh;
3632 	struct folio *folio;
3633 	int err = 0;
3634 
3635 	folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
3636 				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3637 				    mapping_gfp_constraint(mapping, ~__GFP_FS));
3638 	if (IS_ERR(folio))
3639 		return PTR_ERR(folio);
3640 
3641 	blocksize = inode->i_sb->s_blocksize;
3642 
3643 	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3644 
3645 	bh = folio_buffers(folio);
3646 	if (!bh) {
3647 		create_empty_buffers(&folio->page, blocksize, 0);
3648 		bh = folio_buffers(folio);
3649 	}
3650 
3651 	/* Find the buffer that contains "offset" */
3652 	pos = blocksize;
3653 	while (offset >= pos) {
3654 		bh = bh->b_this_page;
3655 		iblock++;
3656 		pos += blocksize;
3657 	}
3658 	if (buffer_freed(bh)) {
3659 		BUFFER_TRACE(bh, "freed: skip");
3660 		goto unlock;
3661 	}
3662 	if (!buffer_mapped(bh)) {
3663 		BUFFER_TRACE(bh, "unmapped");
3664 		ext4_get_block(inode, iblock, bh, 0);
3665 		/* unmapped? It's a hole - nothing to do */
3666 		if (!buffer_mapped(bh)) {
3667 			BUFFER_TRACE(bh, "still unmapped");
3668 			goto unlock;
3669 		}
3670 	}
3671 
3672 	/* Ok, it's mapped. Make sure it's up-to-date */
3673 	if (folio_test_uptodate(folio))
3674 		set_buffer_uptodate(bh);
3675 
3676 	if (!buffer_uptodate(bh)) {
3677 		err = ext4_read_bh_lock(bh, 0, true);
3678 		if (err)
3679 			goto unlock;
3680 		if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3681 			/* We expect the key to be set. */
3682 			BUG_ON(!fscrypt_has_encryption_key(inode));
3683 			err = fscrypt_decrypt_pagecache_blocks(folio,
3684 							       blocksize,
3685 							       bh_offset(bh));
3686 			if (err) {
3687 				clear_buffer_uptodate(bh);
3688 				goto unlock;
3689 			}
3690 		}
3691 	}
3692 	if (ext4_should_journal_data(inode)) {
3693 		BUFFER_TRACE(bh, "get write access");
3694 		err = ext4_journal_get_write_access(handle, inode->i_sb, bh,
3695 						    EXT4_JTR_NONE);
3696 		if (err)
3697 			goto unlock;
3698 	}
3699 	folio_zero_range(folio, offset, length);
3700 	BUFFER_TRACE(bh, "zeroed end of block");
3701 
3702 	if (ext4_should_journal_data(inode)) {
3703 		err = ext4_dirty_journalled_data(handle, bh);
3704 	} else {
3705 		err = 0;
3706 		mark_buffer_dirty(bh);
3707 		if (ext4_should_order_data(inode))
3708 			err = ext4_jbd2_inode_add_write(handle, inode, from,
3709 					length);
3710 	}
3711 
3712 unlock:
3713 	folio_unlock(folio);
3714 	folio_put(folio);
3715 	return err;
3716 }
3717 
3718 /*
3719  * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3720  * starting from file offset 'from'.  The range to be zero'd must
3721  * be contained with in one block.  If the specified range exceeds
3722  * the end of the block it will be shortened to end of the block
3723  * that corresponds to 'from'
3724  */
3725 static int ext4_block_zero_page_range(handle_t *handle,
3726 		struct address_space *mapping, loff_t from, loff_t length)
3727 {
3728 	struct inode *inode = mapping->host;
3729 	unsigned offset = from & (PAGE_SIZE-1);
3730 	unsigned blocksize = inode->i_sb->s_blocksize;
3731 	unsigned max = blocksize - (offset & (blocksize - 1));
3732 
3733 	/*
3734 	 * correct length if it does not fall between
3735 	 * 'from' and the end of the block
3736 	 */
3737 	if (length > max || length < 0)
3738 		length = max;
3739 
3740 	if (IS_DAX(inode)) {
3741 		return dax_zero_range(inode, from, length, NULL,
3742 				      &ext4_iomap_ops);
3743 	}
3744 	return __ext4_block_zero_page_range(handle, mapping, from, length);
3745 }
3746 
3747 /*
3748  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3749  * up to the end of the block which corresponds to `from'.
3750  * This required during truncate. We need to physically zero the tail end
3751  * of that block so it doesn't yield old data if the file is later grown.
3752  */
3753 static int ext4_block_truncate_page(handle_t *handle,
3754 		struct address_space *mapping, loff_t from)
3755 {
3756 	unsigned offset = from & (PAGE_SIZE-1);
3757 	unsigned length;
3758 	unsigned blocksize;
3759 	struct inode *inode = mapping->host;
3760 
3761 	/* If we are processing an encrypted inode during orphan list handling */
3762 	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3763 		return 0;
3764 
3765 	blocksize = inode->i_sb->s_blocksize;
3766 	length = blocksize - (offset & (blocksize - 1));
3767 
3768 	return ext4_block_zero_page_range(handle, mapping, from, length);
3769 }
3770 
3771 int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3772 			     loff_t lstart, loff_t length)
3773 {
3774 	struct super_block *sb = inode->i_sb;
3775 	struct address_space *mapping = inode->i_mapping;
3776 	unsigned partial_start, partial_end;
3777 	ext4_fsblk_t start, end;
3778 	loff_t byte_end = (lstart + length - 1);
3779 	int err = 0;
3780 
3781 	partial_start = lstart & (sb->s_blocksize - 1);
3782 	partial_end = byte_end & (sb->s_blocksize - 1);
3783 
3784 	start = lstart >> sb->s_blocksize_bits;
3785 	end = byte_end >> sb->s_blocksize_bits;
3786 
3787 	/* Handle partial zero within the single block */
3788 	if (start == end &&
3789 	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
3790 		err = ext4_block_zero_page_range(handle, mapping,
3791 						 lstart, length);
3792 		return err;
3793 	}
3794 	/* Handle partial zero out on the start of the range */
3795 	if (partial_start) {
3796 		err = ext4_block_zero_page_range(handle, mapping,
3797 						 lstart, sb->s_blocksize);
3798 		if (err)
3799 			return err;
3800 	}
3801 	/* Handle partial zero out on the end of the range */
3802 	if (partial_end != sb->s_blocksize - 1)
3803 		err = ext4_block_zero_page_range(handle, mapping,
3804 						 byte_end - partial_end,
3805 						 partial_end + 1);
3806 	return err;
3807 }
3808 
3809 int ext4_can_truncate(struct inode *inode)
3810 {
3811 	if (S_ISREG(inode->i_mode))
3812 		return 1;
3813 	if (S_ISDIR(inode->i_mode))
3814 		return 1;
3815 	if (S_ISLNK(inode->i_mode))
3816 		return !ext4_inode_is_fast_symlink(inode);
3817 	return 0;
3818 }
3819 
3820 /*
3821  * We have to make sure i_disksize gets properly updated before we truncate
3822  * page cache due to hole punching or zero range. Otherwise i_disksize update
3823  * can get lost as it may have been postponed to submission of writeback but
3824  * that will never happen after we truncate page cache.
3825  */
3826 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3827 				      loff_t len)
3828 {
3829 	handle_t *handle;
3830 	int ret;
3831 
3832 	loff_t size = i_size_read(inode);
3833 
3834 	WARN_ON(!inode_is_locked(inode));
3835 	if (offset > size || offset + len < size)
3836 		return 0;
3837 
3838 	if (EXT4_I(inode)->i_disksize >= size)
3839 		return 0;
3840 
3841 	handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3842 	if (IS_ERR(handle))
3843 		return PTR_ERR(handle);
3844 	ext4_update_i_disksize(inode, size);
3845 	ret = ext4_mark_inode_dirty(handle, inode);
3846 	ext4_journal_stop(handle);
3847 
3848 	return ret;
3849 }
3850 
3851 static void ext4_wait_dax_page(struct inode *inode)
3852 {
3853 	filemap_invalidate_unlock(inode->i_mapping);
3854 	schedule();
3855 	filemap_invalidate_lock(inode->i_mapping);
3856 }
3857 
3858 int ext4_break_layouts(struct inode *inode)
3859 {
3860 	struct page *page;
3861 	int error;
3862 
3863 	if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
3864 		return -EINVAL;
3865 
3866 	do {
3867 		page = dax_layout_busy_page(inode->i_mapping);
3868 		if (!page)
3869 			return 0;
3870 
3871 		error = ___wait_var_event(&page->_refcount,
3872 				atomic_read(&page->_refcount) == 1,
3873 				TASK_INTERRUPTIBLE, 0, 0,
3874 				ext4_wait_dax_page(inode));
3875 	} while (error == 0);
3876 
3877 	return error;
3878 }
3879 
3880 /*
3881  * ext4_punch_hole: punches a hole in a file by releasing the blocks
3882  * associated with the given offset and length
3883  *
3884  * @inode:  File inode
3885  * @offset: The offset where the hole will begin
3886  * @len:    The length of the hole
3887  *
3888  * Returns: 0 on success or negative on failure
3889  */
3890 
3891 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3892 {
3893 	struct inode *inode = file_inode(file);
3894 	struct super_block *sb = inode->i_sb;
3895 	ext4_lblk_t first_block, stop_block;
3896 	struct address_space *mapping = inode->i_mapping;
3897 	loff_t first_block_offset, last_block_offset, max_length;
3898 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3899 	handle_t *handle;
3900 	unsigned int credits;
3901 	int ret = 0, ret2 = 0;
3902 
3903 	trace_ext4_punch_hole(inode, offset, length, 0);
3904 
3905 	/*
3906 	 * Write out all dirty pages to avoid race conditions
3907 	 * Then release them.
3908 	 */
3909 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3910 		ret = filemap_write_and_wait_range(mapping, offset,
3911 						   offset + length - 1);
3912 		if (ret)
3913 			return ret;
3914 	}
3915 
3916 	inode_lock(inode);
3917 
3918 	/* No need to punch hole beyond i_size */
3919 	if (offset >= inode->i_size)
3920 		goto out_mutex;
3921 
3922 	/*
3923 	 * If the hole extends beyond i_size, set the hole
3924 	 * to end after the page that contains i_size
3925 	 */
3926 	if (offset + length > inode->i_size) {
3927 		length = inode->i_size +
3928 		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
3929 		   offset;
3930 	}
3931 
3932 	/*
3933 	 * For punch hole the length + offset needs to be within one block
3934 	 * before last range. Adjust the length if it goes beyond that limit.
3935 	 */
3936 	max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
3937 	if (offset + length > max_length)
3938 		length = max_length - offset;
3939 
3940 	if (offset & (sb->s_blocksize - 1) ||
3941 	    (offset + length) & (sb->s_blocksize - 1)) {
3942 		/*
3943 		 * Attach jinode to inode for jbd2 if we do any zeroing of
3944 		 * partial block
3945 		 */
3946 		ret = ext4_inode_attach_jinode(inode);
3947 		if (ret < 0)
3948 			goto out_mutex;
3949 
3950 	}
3951 
3952 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
3953 	inode_dio_wait(inode);
3954 
3955 	ret = file_modified(file);
3956 	if (ret)
3957 		goto out_mutex;
3958 
3959 	/*
3960 	 * Prevent page faults from reinstantiating pages we have released from
3961 	 * page cache.
3962 	 */
3963 	filemap_invalidate_lock(mapping);
3964 
3965 	ret = ext4_break_layouts(inode);
3966 	if (ret)
3967 		goto out_dio;
3968 
3969 	first_block_offset = round_up(offset, sb->s_blocksize);
3970 	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3971 
3972 	/* Now release the pages and zero block aligned part of pages*/
3973 	if (last_block_offset > first_block_offset) {
3974 		ret = ext4_update_disksize_before_punch(inode, offset, length);
3975 		if (ret)
3976 			goto out_dio;
3977 		truncate_pagecache_range(inode, first_block_offset,
3978 					 last_block_offset);
3979 	}
3980 
3981 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3982 		credits = ext4_writepage_trans_blocks(inode);
3983 	else
3984 		credits = ext4_blocks_for_truncate(inode);
3985 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3986 	if (IS_ERR(handle)) {
3987 		ret = PTR_ERR(handle);
3988 		ext4_std_error(sb, ret);
3989 		goto out_dio;
3990 	}
3991 
3992 	ret = ext4_zero_partial_blocks(handle, inode, offset,
3993 				       length);
3994 	if (ret)
3995 		goto out_stop;
3996 
3997 	first_block = (offset + sb->s_blocksize - 1) >>
3998 		EXT4_BLOCK_SIZE_BITS(sb);
3999 	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4000 
4001 	/* If there are blocks to remove, do it */
4002 	if (stop_block > first_block) {
4003 
4004 		down_write(&EXT4_I(inode)->i_data_sem);
4005 		ext4_discard_preallocations(inode, 0);
4006 
4007 		ext4_es_remove_extent(inode, first_block,
4008 				      stop_block - first_block);
4009 
4010 		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4011 			ret = ext4_ext_remove_space(inode, first_block,
4012 						    stop_block - 1);
4013 		else
4014 			ret = ext4_ind_remove_space(handle, inode, first_block,
4015 						    stop_block);
4016 
4017 		up_write(&EXT4_I(inode)->i_data_sem);
4018 	}
4019 	ext4_fc_track_range(handle, inode, first_block, stop_block);
4020 	if (IS_SYNC(inode))
4021 		ext4_handle_sync(handle);
4022 
4023 	inode->i_mtime = inode_set_ctime_current(inode);
4024 	ret2 = ext4_mark_inode_dirty(handle, inode);
4025 	if (unlikely(ret2))
4026 		ret = ret2;
4027 	if (ret >= 0)
4028 		ext4_update_inode_fsync_trans(handle, inode, 1);
4029 out_stop:
4030 	ext4_journal_stop(handle);
4031 out_dio:
4032 	filemap_invalidate_unlock(mapping);
4033 out_mutex:
4034 	inode_unlock(inode);
4035 	return ret;
4036 }
4037 
4038 int ext4_inode_attach_jinode(struct inode *inode)
4039 {
4040 	struct ext4_inode_info *ei = EXT4_I(inode);
4041 	struct jbd2_inode *jinode;
4042 
4043 	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4044 		return 0;
4045 
4046 	jinode = jbd2_alloc_inode(GFP_KERNEL);
4047 	spin_lock(&inode->i_lock);
4048 	if (!ei->jinode) {
4049 		if (!jinode) {
4050 			spin_unlock(&inode->i_lock);
4051 			return -ENOMEM;
4052 		}
4053 		ei->jinode = jinode;
4054 		jbd2_journal_init_jbd_inode(ei->jinode, inode);
4055 		jinode = NULL;
4056 	}
4057 	spin_unlock(&inode->i_lock);
4058 	if (unlikely(jinode != NULL))
4059 		jbd2_free_inode(jinode);
4060 	return 0;
4061 }
4062 
4063 /*
4064  * ext4_truncate()
4065  *
4066  * We block out ext4_get_block() block instantiations across the entire
4067  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4068  * simultaneously on behalf of the same inode.
4069  *
4070  * As we work through the truncate and commit bits of it to the journal there
4071  * is one core, guiding principle: the file's tree must always be consistent on
4072  * disk.  We must be able to restart the truncate after a crash.
4073  *
4074  * The file's tree may be transiently inconsistent in memory (although it
4075  * probably isn't), but whenever we close off and commit a journal transaction,
4076  * the contents of (the filesystem + the journal) must be consistent and
4077  * restartable.  It's pretty simple, really: bottom up, right to left (although
4078  * left-to-right works OK too).
4079  *
4080  * Note that at recovery time, journal replay occurs *before* the restart of
4081  * truncate against the orphan inode list.
4082  *
4083  * The committed inode has the new, desired i_size (which is the same as
4084  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4085  * that this inode's truncate did not complete and it will again call
4086  * ext4_truncate() to have another go.  So there will be instantiated blocks
4087  * to the right of the truncation point in a crashed ext4 filesystem.  But
4088  * that's fine - as long as they are linked from the inode, the post-crash
4089  * ext4_truncate() run will find them and release them.
4090  */
4091 int ext4_truncate(struct inode *inode)
4092 {
4093 	struct ext4_inode_info *ei = EXT4_I(inode);
4094 	unsigned int credits;
4095 	int err = 0, err2;
4096 	handle_t *handle;
4097 	struct address_space *mapping = inode->i_mapping;
4098 
4099 	/*
4100 	 * There is a possibility that we're either freeing the inode
4101 	 * or it's a completely new inode. In those cases we might not
4102 	 * have i_rwsem locked because it's not necessary.
4103 	 */
4104 	if (!(inode->i_state & (I_NEW|I_FREEING)))
4105 		WARN_ON(!inode_is_locked(inode));
4106 	trace_ext4_truncate_enter(inode);
4107 
4108 	if (!ext4_can_truncate(inode))
4109 		goto out_trace;
4110 
4111 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4112 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4113 
4114 	if (ext4_has_inline_data(inode)) {
4115 		int has_inline = 1;
4116 
4117 		err = ext4_inline_data_truncate(inode, &has_inline);
4118 		if (err || has_inline)
4119 			goto out_trace;
4120 	}
4121 
4122 	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
4123 	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4124 		err = ext4_inode_attach_jinode(inode);
4125 		if (err)
4126 			goto out_trace;
4127 	}
4128 
4129 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4130 		credits = ext4_writepage_trans_blocks(inode);
4131 	else
4132 		credits = ext4_blocks_for_truncate(inode);
4133 
4134 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4135 	if (IS_ERR(handle)) {
4136 		err = PTR_ERR(handle);
4137 		goto out_trace;
4138 	}
4139 
4140 	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4141 		ext4_block_truncate_page(handle, mapping, inode->i_size);
4142 
4143 	/*
4144 	 * We add the inode to the orphan list, so that if this
4145 	 * truncate spans multiple transactions, and we crash, we will
4146 	 * resume the truncate when the filesystem recovers.  It also
4147 	 * marks the inode dirty, to catch the new size.
4148 	 *
4149 	 * Implication: the file must always be in a sane, consistent
4150 	 * truncatable state while each transaction commits.
4151 	 */
4152 	err = ext4_orphan_add(handle, inode);
4153 	if (err)
4154 		goto out_stop;
4155 
4156 	down_write(&EXT4_I(inode)->i_data_sem);
4157 
4158 	ext4_discard_preallocations(inode, 0);
4159 
4160 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4161 		err = ext4_ext_truncate(handle, inode);
4162 	else
4163 		ext4_ind_truncate(handle, inode);
4164 
4165 	up_write(&ei->i_data_sem);
4166 	if (err)
4167 		goto out_stop;
4168 
4169 	if (IS_SYNC(inode))
4170 		ext4_handle_sync(handle);
4171 
4172 out_stop:
4173 	/*
4174 	 * If this was a simple ftruncate() and the file will remain alive,
4175 	 * then we need to clear up the orphan record which we created above.
4176 	 * However, if this was a real unlink then we were called by
4177 	 * ext4_evict_inode(), and we allow that function to clean up the
4178 	 * orphan info for us.
4179 	 */
4180 	if (inode->i_nlink)
4181 		ext4_orphan_del(handle, inode);
4182 
4183 	inode->i_mtime = inode_set_ctime_current(inode);
4184 	err2 = ext4_mark_inode_dirty(handle, inode);
4185 	if (unlikely(err2 && !err))
4186 		err = err2;
4187 	ext4_journal_stop(handle);
4188 
4189 out_trace:
4190 	trace_ext4_truncate_exit(inode);
4191 	return err;
4192 }
4193 
4194 static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4195 {
4196 	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4197 		return inode_peek_iversion_raw(inode);
4198 	else
4199 		return inode_peek_iversion(inode);
4200 }
4201 
4202 static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
4203 				 struct ext4_inode_info *ei)
4204 {
4205 	struct inode *inode = &(ei->vfs_inode);
4206 	u64 i_blocks = READ_ONCE(inode->i_blocks);
4207 	struct super_block *sb = inode->i_sb;
4208 
4209 	if (i_blocks <= ~0U) {
4210 		/*
4211 		 * i_blocks can be represented in a 32 bit variable
4212 		 * as multiple of 512 bytes
4213 		 */
4214 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4215 		raw_inode->i_blocks_high = 0;
4216 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4217 		return 0;
4218 	}
4219 
4220 	/*
4221 	 * This should never happen since sb->s_maxbytes should not have
4222 	 * allowed this, sb->s_maxbytes was set according to the huge_file
4223 	 * feature in ext4_fill_super().
4224 	 */
4225 	if (!ext4_has_feature_huge_file(sb))
4226 		return -EFSCORRUPTED;
4227 
4228 	if (i_blocks <= 0xffffffffffffULL) {
4229 		/*
4230 		 * i_blocks can be represented in a 48 bit variable
4231 		 * as multiple of 512 bytes
4232 		 */
4233 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4234 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4235 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4236 	} else {
4237 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4238 		/* i_block is stored in file system block size */
4239 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
4240 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4241 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4242 	}
4243 	return 0;
4244 }
4245 
4246 static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
4247 {
4248 	struct ext4_inode_info *ei = EXT4_I(inode);
4249 	uid_t i_uid;
4250 	gid_t i_gid;
4251 	projid_t i_projid;
4252 	int block;
4253 	int err;
4254 
4255 	err = ext4_inode_blocks_set(raw_inode, ei);
4256 
4257 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4258 	i_uid = i_uid_read(inode);
4259 	i_gid = i_gid_read(inode);
4260 	i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4261 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4262 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4263 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4264 		/*
4265 		 * Fix up interoperability with old kernels. Otherwise,
4266 		 * old inodes get re-used with the upper 16 bits of the
4267 		 * uid/gid intact.
4268 		 */
4269 		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4270 			raw_inode->i_uid_high = 0;
4271 			raw_inode->i_gid_high = 0;
4272 		} else {
4273 			raw_inode->i_uid_high =
4274 				cpu_to_le16(high_16_bits(i_uid));
4275 			raw_inode->i_gid_high =
4276 				cpu_to_le16(high_16_bits(i_gid));
4277 		}
4278 	} else {
4279 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4280 		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4281 		raw_inode->i_uid_high = 0;
4282 		raw_inode->i_gid_high = 0;
4283 	}
4284 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4285 
4286 	EXT4_INODE_SET_CTIME(inode, raw_inode);
4287 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4288 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4289 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4290 
4291 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4292 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4293 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4294 		raw_inode->i_file_acl_high =
4295 			cpu_to_le16(ei->i_file_acl >> 32);
4296 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4297 	ext4_isize_set(raw_inode, ei->i_disksize);
4298 
4299 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4300 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4301 		if (old_valid_dev(inode->i_rdev)) {
4302 			raw_inode->i_block[0] =
4303 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4304 			raw_inode->i_block[1] = 0;
4305 		} else {
4306 			raw_inode->i_block[0] = 0;
4307 			raw_inode->i_block[1] =
4308 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4309 			raw_inode->i_block[2] = 0;
4310 		}
4311 	} else if (!ext4_has_inline_data(inode)) {
4312 		for (block = 0; block < EXT4_N_BLOCKS; block++)
4313 			raw_inode->i_block[block] = ei->i_data[block];
4314 	}
4315 
4316 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4317 		u64 ivers = ext4_inode_peek_iversion(inode);
4318 
4319 		raw_inode->i_disk_version = cpu_to_le32(ivers);
4320 		if (ei->i_extra_isize) {
4321 			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4322 				raw_inode->i_version_hi =
4323 					cpu_to_le32(ivers >> 32);
4324 			raw_inode->i_extra_isize =
4325 				cpu_to_le16(ei->i_extra_isize);
4326 		}
4327 	}
4328 
4329 	if (i_projid != EXT4_DEF_PROJID &&
4330 	    !ext4_has_feature_project(inode->i_sb))
4331 		err = err ?: -EFSCORRUPTED;
4332 
4333 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4334 	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4335 		raw_inode->i_projid = cpu_to_le32(i_projid);
4336 
4337 	ext4_inode_csum_set(inode, raw_inode, ei);
4338 	return err;
4339 }
4340 
4341 /*
4342  * ext4_get_inode_loc returns with an extra refcount against the inode's
4343  * underlying buffer_head on success. If we pass 'inode' and it does not
4344  * have in-inode xattr, we have all inode data in memory that is needed
4345  * to recreate the on-disk version of this inode.
4346  */
4347 static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4348 				struct inode *inode, struct ext4_iloc *iloc,
4349 				ext4_fsblk_t *ret_block)
4350 {
4351 	struct ext4_group_desc	*gdp;
4352 	struct buffer_head	*bh;
4353 	ext4_fsblk_t		block;
4354 	struct blk_plug		plug;
4355 	int			inodes_per_block, inode_offset;
4356 
4357 	iloc->bh = NULL;
4358 	if (ino < EXT4_ROOT_INO ||
4359 	    ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4360 		return -EFSCORRUPTED;
4361 
4362 	iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4363 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4364 	if (!gdp)
4365 		return -EIO;
4366 
4367 	/*
4368 	 * Figure out the offset within the block group inode table
4369 	 */
4370 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4371 	inode_offset = ((ino - 1) %
4372 			EXT4_INODES_PER_GROUP(sb));
4373 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4374 
4375 	block = ext4_inode_table(sb, gdp);
4376 	if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
4377 	    (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
4378 		ext4_error(sb, "Invalid inode table block %llu in "
4379 			   "block_group %u", block, iloc->block_group);
4380 		return -EFSCORRUPTED;
4381 	}
4382 	block += (inode_offset / inodes_per_block);
4383 
4384 	bh = sb_getblk(sb, block);
4385 	if (unlikely(!bh))
4386 		return -ENOMEM;
4387 	if (ext4_buffer_uptodate(bh))
4388 		goto has_buffer;
4389 
4390 	lock_buffer(bh);
4391 	if (ext4_buffer_uptodate(bh)) {
4392 		/* Someone brought it uptodate while we waited */
4393 		unlock_buffer(bh);
4394 		goto has_buffer;
4395 	}
4396 
4397 	/*
4398 	 * If we have all information of the inode in memory and this
4399 	 * is the only valid inode in the block, we need not read the
4400 	 * block.
4401 	 */
4402 	if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4403 		struct buffer_head *bitmap_bh;
4404 		int i, start;
4405 
4406 		start = inode_offset & ~(inodes_per_block - 1);
4407 
4408 		/* Is the inode bitmap in cache? */
4409 		bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4410 		if (unlikely(!bitmap_bh))
4411 			goto make_io;
4412 
4413 		/*
4414 		 * If the inode bitmap isn't in cache then the
4415 		 * optimisation may end up performing two reads instead
4416 		 * of one, so skip it.
4417 		 */
4418 		if (!buffer_uptodate(bitmap_bh)) {
4419 			brelse(bitmap_bh);
4420 			goto make_io;
4421 		}
4422 		for (i = start; i < start + inodes_per_block; i++) {
4423 			if (i == inode_offset)
4424 				continue;
4425 			if (ext4_test_bit(i, bitmap_bh->b_data))
4426 				break;
4427 		}
4428 		brelse(bitmap_bh);
4429 		if (i == start + inodes_per_block) {
4430 			struct ext4_inode *raw_inode =
4431 				(struct ext4_inode *) (bh->b_data + iloc->offset);
4432 
4433 			/* all other inodes are free, so skip I/O */
4434 			memset(bh->b_data, 0, bh->b_size);
4435 			if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
4436 				ext4_fill_raw_inode(inode, raw_inode);
4437 			set_buffer_uptodate(bh);
4438 			unlock_buffer(bh);
4439 			goto has_buffer;
4440 		}
4441 	}
4442 
4443 make_io:
4444 	/*
4445 	 * If we need to do any I/O, try to pre-readahead extra
4446 	 * blocks from the inode table.
4447 	 */
4448 	blk_start_plug(&plug);
4449 	if (EXT4_SB(sb)->s_inode_readahead_blks) {
4450 		ext4_fsblk_t b, end, table;
4451 		unsigned num;
4452 		__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4453 
4454 		table = ext4_inode_table(sb, gdp);
4455 		/* s_inode_readahead_blks is always a power of 2 */
4456 		b = block & ~((ext4_fsblk_t) ra_blks - 1);
4457 		if (table > b)
4458 			b = table;
4459 		end = b + ra_blks;
4460 		num = EXT4_INODES_PER_GROUP(sb);
4461 		if (ext4_has_group_desc_csum(sb))
4462 			num -= ext4_itable_unused_count(sb, gdp);
4463 		table += num / inodes_per_block;
4464 		if (end > table)
4465 			end = table;
4466 		while (b <= end)
4467 			ext4_sb_breadahead_unmovable(sb, b++);
4468 	}
4469 
4470 	/*
4471 	 * There are other valid inodes in the buffer, this inode
4472 	 * has in-inode xattrs, or we don't have this inode in memory.
4473 	 * Read the block from disk.
4474 	 */
4475 	trace_ext4_load_inode(sb, ino);
4476 	ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4477 	blk_finish_plug(&plug);
4478 	wait_on_buffer(bh);
4479 	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_INODE_EIO);
4480 	if (!buffer_uptodate(bh)) {
4481 		if (ret_block)
4482 			*ret_block = block;
4483 		brelse(bh);
4484 		return -EIO;
4485 	}
4486 has_buffer:
4487 	iloc->bh = bh;
4488 	return 0;
4489 }
4490 
4491 static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4492 					struct ext4_iloc *iloc)
4493 {
4494 	ext4_fsblk_t err_blk = 0;
4495 	int ret;
4496 
4497 	ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
4498 					&err_blk);
4499 
4500 	if (ret == -EIO)
4501 		ext4_error_inode_block(inode, err_blk, EIO,
4502 					"unable to read itable block");
4503 
4504 	return ret;
4505 }
4506 
4507 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4508 {
4509 	ext4_fsblk_t err_blk = 0;
4510 	int ret;
4511 
4512 	ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
4513 					&err_blk);
4514 
4515 	if (ret == -EIO)
4516 		ext4_error_inode_block(inode, err_blk, EIO,
4517 					"unable to read itable block");
4518 
4519 	return ret;
4520 }
4521 
4522 
4523 int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4524 			  struct ext4_iloc *iloc)
4525 {
4526 	return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
4527 }
4528 
4529 static bool ext4_should_enable_dax(struct inode *inode)
4530 {
4531 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4532 
4533 	if (test_opt2(inode->i_sb, DAX_NEVER))
4534 		return false;
4535 	if (!S_ISREG(inode->i_mode))
4536 		return false;
4537 	if (ext4_should_journal_data(inode))
4538 		return false;
4539 	if (ext4_has_inline_data(inode))
4540 		return false;
4541 	if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4542 		return false;
4543 	if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4544 		return false;
4545 	if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4546 		return false;
4547 	if (test_opt(inode->i_sb, DAX_ALWAYS))
4548 		return true;
4549 
4550 	return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4551 }
4552 
4553 void ext4_set_inode_flags(struct inode *inode, bool init)
4554 {
4555 	unsigned int flags = EXT4_I(inode)->i_flags;
4556 	unsigned int new_fl = 0;
4557 
4558 	WARN_ON_ONCE(IS_DAX(inode) && init);
4559 
4560 	if (flags & EXT4_SYNC_FL)
4561 		new_fl |= S_SYNC;
4562 	if (flags & EXT4_APPEND_FL)
4563 		new_fl |= S_APPEND;
4564 	if (flags & EXT4_IMMUTABLE_FL)
4565 		new_fl |= S_IMMUTABLE;
4566 	if (flags & EXT4_NOATIME_FL)
4567 		new_fl |= S_NOATIME;
4568 	if (flags & EXT4_DIRSYNC_FL)
4569 		new_fl |= S_DIRSYNC;
4570 
4571 	/* Because of the way inode_set_flags() works we must preserve S_DAX
4572 	 * here if already set. */
4573 	new_fl |= (inode->i_flags & S_DAX);
4574 	if (init && ext4_should_enable_dax(inode))
4575 		new_fl |= S_DAX;
4576 
4577 	if (flags & EXT4_ENCRYPT_FL)
4578 		new_fl |= S_ENCRYPTED;
4579 	if (flags & EXT4_CASEFOLD_FL)
4580 		new_fl |= S_CASEFOLD;
4581 	if (flags & EXT4_VERITY_FL)
4582 		new_fl |= S_VERITY;
4583 	inode_set_flags(inode, new_fl,
4584 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4585 			S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4586 }
4587 
4588 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4589 				  struct ext4_inode_info *ei)
4590 {
4591 	blkcnt_t i_blocks ;
4592 	struct inode *inode = &(ei->vfs_inode);
4593 	struct super_block *sb = inode->i_sb;
4594 
4595 	if (ext4_has_feature_huge_file(sb)) {
4596 		/* we are using combined 48 bit field */
4597 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4598 					le32_to_cpu(raw_inode->i_blocks_lo);
4599 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4600 			/* i_blocks represent file system block size */
4601 			return i_blocks  << (inode->i_blkbits - 9);
4602 		} else {
4603 			return i_blocks;
4604 		}
4605 	} else {
4606 		return le32_to_cpu(raw_inode->i_blocks_lo);
4607 	}
4608 }
4609 
4610 static inline int ext4_iget_extra_inode(struct inode *inode,
4611 					 struct ext4_inode *raw_inode,
4612 					 struct ext4_inode_info *ei)
4613 {
4614 	__le32 *magic = (void *)raw_inode +
4615 			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4616 
4617 	if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
4618 	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4619 		int err;
4620 
4621 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4622 		err = ext4_find_inline_data_nolock(inode);
4623 		if (!err && ext4_has_inline_data(inode))
4624 			ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4625 		return err;
4626 	} else
4627 		EXT4_I(inode)->i_inline_off = 0;
4628 	return 0;
4629 }
4630 
4631 int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4632 {
4633 	if (!ext4_has_feature_project(inode->i_sb))
4634 		return -EOPNOTSUPP;
4635 	*projid = EXT4_I(inode)->i_projid;
4636 	return 0;
4637 }
4638 
4639 /*
4640  * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4641  * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4642  * set.
4643  */
4644 static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4645 {
4646 	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4647 		inode_set_iversion_raw(inode, val);
4648 	else
4649 		inode_set_iversion_queried(inode, val);
4650 }
4651 
4652 static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
4653 
4654 {
4655 	if (flags & EXT4_IGET_EA_INODE) {
4656 		if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4657 			return "missing EA_INODE flag";
4658 		if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4659 		    EXT4_I(inode)->i_file_acl)
4660 			return "ea_inode with extended attributes";
4661 	} else {
4662 		if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4663 			return "unexpected EA_INODE flag";
4664 	}
4665 	if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
4666 		return "unexpected bad inode w/o EXT4_IGET_BAD";
4667 	return NULL;
4668 }
4669 
4670 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4671 			  ext4_iget_flags flags, const char *function,
4672 			  unsigned int line)
4673 {
4674 	struct ext4_iloc iloc;
4675 	struct ext4_inode *raw_inode;
4676 	struct ext4_inode_info *ei;
4677 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4678 	struct inode *inode;
4679 	const char *err_str;
4680 	journal_t *journal = EXT4_SB(sb)->s_journal;
4681 	long ret;
4682 	loff_t size;
4683 	int block;
4684 	uid_t i_uid;
4685 	gid_t i_gid;
4686 	projid_t i_projid;
4687 
4688 	if ((!(flags & EXT4_IGET_SPECIAL) &&
4689 	     ((ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ||
4690 	      ino == le32_to_cpu(es->s_usr_quota_inum) ||
4691 	      ino == le32_to_cpu(es->s_grp_quota_inum) ||
4692 	      ino == le32_to_cpu(es->s_prj_quota_inum) ||
4693 	      ino == le32_to_cpu(es->s_orphan_file_inum))) ||
4694 	    (ino < EXT4_ROOT_INO) ||
4695 	    (ino > le32_to_cpu(es->s_inodes_count))) {
4696 		if (flags & EXT4_IGET_HANDLE)
4697 			return ERR_PTR(-ESTALE);
4698 		__ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4699 			     "inode #%lu: comm %s: iget: illegal inode #",
4700 			     ino, current->comm);
4701 		return ERR_PTR(-EFSCORRUPTED);
4702 	}
4703 
4704 	inode = iget_locked(sb, ino);
4705 	if (!inode)
4706 		return ERR_PTR(-ENOMEM);
4707 	if (!(inode->i_state & I_NEW)) {
4708 		if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4709 			ext4_error_inode(inode, function, line, 0, err_str);
4710 			iput(inode);
4711 			return ERR_PTR(-EFSCORRUPTED);
4712 		}
4713 		return inode;
4714 	}
4715 
4716 	ei = EXT4_I(inode);
4717 	iloc.bh = NULL;
4718 
4719 	ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4720 	if (ret < 0)
4721 		goto bad_inode;
4722 	raw_inode = ext4_raw_inode(&iloc);
4723 
4724 	if ((flags & EXT4_IGET_HANDLE) &&
4725 	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4726 		ret = -ESTALE;
4727 		goto bad_inode;
4728 	}
4729 
4730 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4731 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4732 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4733 			EXT4_INODE_SIZE(inode->i_sb) ||
4734 		    (ei->i_extra_isize & 3)) {
4735 			ext4_error_inode(inode, function, line, 0,
4736 					 "iget: bad extra_isize %u "
4737 					 "(inode size %u)",
4738 					 ei->i_extra_isize,
4739 					 EXT4_INODE_SIZE(inode->i_sb));
4740 			ret = -EFSCORRUPTED;
4741 			goto bad_inode;
4742 		}
4743 	} else
4744 		ei->i_extra_isize = 0;
4745 
4746 	/* Precompute checksum seed for inode metadata */
4747 	if (ext4_has_metadata_csum(sb)) {
4748 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4749 		__u32 csum;
4750 		__le32 inum = cpu_to_le32(inode->i_ino);
4751 		__le32 gen = raw_inode->i_generation;
4752 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4753 				   sizeof(inum));
4754 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4755 					      sizeof(gen));
4756 	}
4757 
4758 	if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4759 	    ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4760 	     (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4761 		ext4_error_inode_err(inode, function, line, 0,
4762 				EFSBADCRC, "iget: checksum invalid");
4763 		ret = -EFSBADCRC;
4764 		goto bad_inode;
4765 	}
4766 
4767 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4768 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4769 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4770 	if (ext4_has_feature_project(sb) &&
4771 	    EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4772 	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4773 		i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4774 	else
4775 		i_projid = EXT4_DEF_PROJID;
4776 
4777 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4778 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4779 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4780 	}
4781 	i_uid_write(inode, i_uid);
4782 	i_gid_write(inode, i_gid);
4783 	ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4784 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4785 
4786 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4787 	ei->i_inline_off = 0;
4788 	ei->i_dir_start_lookup = 0;
4789 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4790 	/* We now have enough fields to check if the inode was active or not.
4791 	 * This is needed because nfsd might try to access dead inodes
4792 	 * the test is that same one that e2fsck uses
4793 	 * NeilBrown 1999oct15
4794 	 */
4795 	if (inode->i_nlink == 0) {
4796 		if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL ||
4797 		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4798 		    ino != EXT4_BOOT_LOADER_INO) {
4799 			/* this inode is deleted or unallocated */
4800 			if (flags & EXT4_IGET_SPECIAL) {
4801 				ext4_error_inode(inode, function, line, 0,
4802 						 "iget: special inode unallocated");
4803 				ret = -EFSCORRUPTED;
4804 			} else
4805 				ret = -ESTALE;
4806 			goto bad_inode;
4807 		}
4808 		/* The only unlinked inodes we let through here have
4809 		 * valid i_mode and are being read by the orphan
4810 		 * recovery code: that's fine, we're about to complete
4811 		 * the process of deleting those.
4812 		 * OR it is the EXT4_BOOT_LOADER_INO which is
4813 		 * not initialized on a new filesystem. */
4814 	}
4815 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4816 	ext4_set_inode_flags(inode, true);
4817 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4818 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4819 	if (ext4_has_feature_64bit(sb))
4820 		ei->i_file_acl |=
4821 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4822 	inode->i_size = ext4_isize(sb, raw_inode);
4823 	if ((size = i_size_read(inode)) < 0) {
4824 		ext4_error_inode(inode, function, line, 0,
4825 				 "iget: bad i_size value: %lld", size);
4826 		ret = -EFSCORRUPTED;
4827 		goto bad_inode;
4828 	}
4829 	/*
4830 	 * If dir_index is not enabled but there's dir with INDEX flag set,
4831 	 * we'd normally treat htree data as empty space. But with metadata
4832 	 * checksumming that corrupts checksums so forbid that.
4833 	 */
4834 	if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4835 	    ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4836 		ext4_error_inode(inode, function, line, 0,
4837 			 "iget: Dir with htree data on filesystem without dir_index feature.");
4838 		ret = -EFSCORRUPTED;
4839 		goto bad_inode;
4840 	}
4841 	ei->i_disksize = inode->i_size;
4842 #ifdef CONFIG_QUOTA
4843 	ei->i_reserved_quota = 0;
4844 #endif
4845 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4846 	ei->i_block_group = iloc.block_group;
4847 	ei->i_last_alloc_group = ~0;
4848 	/*
4849 	 * NOTE! The in-memory inode i_data array is in little-endian order
4850 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4851 	 */
4852 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4853 		ei->i_data[block] = raw_inode->i_block[block];
4854 	INIT_LIST_HEAD(&ei->i_orphan);
4855 	ext4_fc_init_inode(&ei->vfs_inode);
4856 
4857 	/*
4858 	 * Set transaction id's of transactions that have to be committed
4859 	 * to finish f[data]sync. We set them to currently running transaction
4860 	 * as we cannot be sure that the inode or some of its metadata isn't
4861 	 * part of the transaction - the inode could have been reclaimed and
4862 	 * now it is reread from disk.
4863 	 */
4864 	if (journal) {
4865 		transaction_t *transaction;
4866 		tid_t tid;
4867 
4868 		read_lock(&journal->j_state_lock);
4869 		if (journal->j_running_transaction)
4870 			transaction = journal->j_running_transaction;
4871 		else
4872 			transaction = journal->j_committing_transaction;
4873 		if (transaction)
4874 			tid = transaction->t_tid;
4875 		else
4876 			tid = journal->j_commit_sequence;
4877 		read_unlock(&journal->j_state_lock);
4878 		ei->i_sync_tid = tid;
4879 		ei->i_datasync_tid = tid;
4880 	}
4881 
4882 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4883 		if (ei->i_extra_isize == 0) {
4884 			/* The extra space is currently unused. Use it. */
4885 			BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4886 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4887 					    EXT4_GOOD_OLD_INODE_SIZE;
4888 		} else {
4889 			ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4890 			if (ret)
4891 				goto bad_inode;
4892 		}
4893 	}
4894 
4895 	EXT4_INODE_GET_CTIME(inode, raw_inode);
4896 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4897 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4898 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4899 
4900 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4901 		u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4902 
4903 		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4904 			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4905 				ivers |=
4906 		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4907 		}
4908 		ext4_inode_set_iversion_queried(inode, ivers);
4909 	}
4910 
4911 	ret = 0;
4912 	if (ei->i_file_acl &&
4913 	    !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4914 		ext4_error_inode(inode, function, line, 0,
4915 				 "iget: bad extended attribute block %llu",
4916 				 ei->i_file_acl);
4917 		ret = -EFSCORRUPTED;
4918 		goto bad_inode;
4919 	} else if (!ext4_has_inline_data(inode)) {
4920 		/* validate the block references in the inode */
4921 		if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4922 			(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4923 			(S_ISLNK(inode->i_mode) &&
4924 			!ext4_inode_is_fast_symlink(inode)))) {
4925 			if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4926 				ret = ext4_ext_check_inode(inode);
4927 			else
4928 				ret = ext4_ind_check_inode(inode);
4929 		}
4930 	}
4931 	if (ret)
4932 		goto bad_inode;
4933 
4934 	if (S_ISREG(inode->i_mode)) {
4935 		inode->i_op = &ext4_file_inode_operations;
4936 		inode->i_fop = &ext4_file_operations;
4937 		ext4_set_aops(inode);
4938 	} else if (S_ISDIR(inode->i_mode)) {
4939 		inode->i_op = &ext4_dir_inode_operations;
4940 		inode->i_fop = &ext4_dir_operations;
4941 	} else if (S_ISLNK(inode->i_mode)) {
4942 		/* VFS does not allow setting these so must be corruption */
4943 		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4944 			ext4_error_inode(inode, function, line, 0,
4945 					 "iget: immutable or append flags "
4946 					 "not allowed on symlinks");
4947 			ret = -EFSCORRUPTED;
4948 			goto bad_inode;
4949 		}
4950 		if (IS_ENCRYPTED(inode)) {
4951 			inode->i_op = &ext4_encrypted_symlink_inode_operations;
4952 		} else if (ext4_inode_is_fast_symlink(inode)) {
4953 			inode->i_link = (char *)ei->i_data;
4954 			inode->i_op = &ext4_fast_symlink_inode_operations;
4955 			nd_terminate_link(ei->i_data, inode->i_size,
4956 				sizeof(ei->i_data) - 1);
4957 		} else {
4958 			inode->i_op = &ext4_symlink_inode_operations;
4959 		}
4960 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4961 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4962 		inode->i_op = &ext4_special_inode_operations;
4963 		if (raw_inode->i_block[0])
4964 			init_special_inode(inode, inode->i_mode,
4965 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4966 		else
4967 			init_special_inode(inode, inode->i_mode,
4968 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4969 	} else if (ino == EXT4_BOOT_LOADER_INO) {
4970 		make_bad_inode(inode);
4971 	} else {
4972 		ret = -EFSCORRUPTED;
4973 		ext4_error_inode(inode, function, line, 0,
4974 				 "iget: bogus i_mode (%o)", inode->i_mode);
4975 		goto bad_inode;
4976 	}
4977 	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
4978 		ext4_error_inode(inode, function, line, 0,
4979 				 "casefold flag without casefold feature");
4980 		ret = -EFSCORRUPTED;
4981 		goto bad_inode;
4982 	}
4983 	if ((err_str = check_igot_inode(inode, flags)) != NULL) {
4984 		ext4_error_inode(inode, function, line, 0, err_str);
4985 		ret = -EFSCORRUPTED;
4986 		goto bad_inode;
4987 	}
4988 
4989 	brelse(iloc.bh);
4990 	unlock_new_inode(inode);
4991 	return inode;
4992 
4993 bad_inode:
4994 	brelse(iloc.bh);
4995 	iget_failed(inode);
4996 	return ERR_PTR(ret);
4997 }
4998 
4999 static void __ext4_update_other_inode_time(struct super_block *sb,
5000 					   unsigned long orig_ino,
5001 					   unsigned long ino,
5002 					   struct ext4_inode *raw_inode)
5003 {
5004 	struct inode *inode;
5005 
5006 	inode = find_inode_by_ino_rcu(sb, ino);
5007 	if (!inode)
5008 		return;
5009 
5010 	if (!inode_is_dirtytime_only(inode))
5011 		return;
5012 
5013 	spin_lock(&inode->i_lock);
5014 	if (inode_is_dirtytime_only(inode)) {
5015 		struct ext4_inode_info	*ei = EXT4_I(inode);
5016 
5017 		inode->i_state &= ~I_DIRTY_TIME;
5018 		spin_unlock(&inode->i_lock);
5019 
5020 		spin_lock(&ei->i_raw_lock);
5021 		EXT4_INODE_SET_CTIME(inode, raw_inode);
5022 		EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5023 		EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5024 		ext4_inode_csum_set(inode, raw_inode, ei);
5025 		spin_unlock(&ei->i_raw_lock);
5026 		trace_ext4_other_inode_update_time(inode, orig_ino);
5027 		return;
5028 	}
5029 	spin_unlock(&inode->i_lock);
5030 }
5031 
5032 /*
5033  * Opportunistically update the other time fields for other inodes in
5034  * the same inode table block.
5035  */
5036 static void ext4_update_other_inodes_time(struct super_block *sb,
5037 					  unsigned long orig_ino, char *buf)
5038 {
5039 	unsigned long ino;
5040 	int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5041 	int inode_size = EXT4_INODE_SIZE(sb);
5042 
5043 	/*
5044 	 * Calculate the first inode in the inode table block.  Inode
5045 	 * numbers are one-based.  That is, the first inode in a block
5046 	 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5047 	 */
5048 	ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5049 	rcu_read_lock();
5050 	for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5051 		if (ino == orig_ino)
5052 			continue;
5053 		__ext4_update_other_inode_time(sb, orig_ino, ino,
5054 					       (struct ext4_inode *)buf);
5055 	}
5056 	rcu_read_unlock();
5057 }
5058 
5059 /*
5060  * Post the struct inode info into an on-disk inode location in the
5061  * buffer-cache.  This gobbles the caller's reference to the
5062  * buffer_head in the inode location struct.
5063  *
5064  * The caller must have write access to iloc->bh.
5065  */
5066 static int ext4_do_update_inode(handle_t *handle,
5067 				struct inode *inode,
5068 				struct ext4_iloc *iloc)
5069 {
5070 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5071 	struct ext4_inode_info *ei = EXT4_I(inode);
5072 	struct buffer_head *bh = iloc->bh;
5073 	struct super_block *sb = inode->i_sb;
5074 	int err;
5075 	int need_datasync = 0, set_large_file = 0;
5076 
5077 	spin_lock(&ei->i_raw_lock);
5078 
5079 	/*
5080 	 * For fields not tracked in the in-memory inode, initialise them
5081 	 * to zero for new inodes.
5082 	 */
5083 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5084 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5085 
5086 	if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
5087 		need_datasync = 1;
5088 	if (ei->i_disksize > 0x7fffffffULL) {
5089 		if (!ext4_has_feature_large_file(sb) ||
5090 		    EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
5091 			set_large_file = 1;
5092 	}
5093 
5094 	err = ext4_fill_raw_inode(inode, raw_inode);
5095 	spin_unlock(&ei->i_raw_lock);
5096 	if (err) {
5097 		EXT4_ERROR_INODE(inode, "corrupted inode contents");
5098 		goto out_brelse;
5099 	}
5100 
5101 	if (inode->i_sb->s_flags & SB_LAZYTIME)
5102 		ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5103 					      bh->b_data);
5104 
5105 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5106 	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5107 	if (err)
5108 		goto out_error;
5109 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5110 	if (set_large_file) {
5111 		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5112 		err = ext4_journal_get_write_access(handle, sb,
5113 						    EXT4_SB(sb)->s_sbh,
5114 						    EXT4_JTR_NONE);
5115 		if (err)
5116 			goto out_error;
5117 		lock_buffer(EXT4_SB(sb)->s_sbh);
5118 		ext4_set_feature_large_file(sb);
5119 		ext4_superblock_csum_set(sb);
5120 		unlock_buffer(EXT4_SB(sb)->s_sbh);
5121 		ext4_handle_sync(handle);
5122 		err = ext4_handle_dirty_metadata(handle, NULL,
5123 						 EXT4_SB(sb)->s_sbh);
5124 	}
5125 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5126 out_error:
5127 	ext4_std_error(inode->i_sb, err);
5128 out_brelse:
5129 	brelse(bh);
5130 	return err;
5131 }
5132 
5133 /*
5134  * ext4_write_inode()
5135  *
5136  * We are called from a few places:
5137  *
5138  * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5139  *   Here, there will be no transaction running. We wait for any running
5140  *   transaction to commit.
5141  *
5142  * - Within flush work (sys_sync(), kupdate and such).
5143  *   We wait on commit, if told to.
5144  *
5145  * - Within iput_final() -> write_inode_now()
5146  *   We wait on commit, if told to.
5147  *
5148  * In all cases it is actually safe for us to return without doing anything,
5149  * because the inode has been copied into a raw inode buffer in
5150  * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5151  * writeback.
5152  *
5153  * Note that we are absolutely dependent upon all inode dirtiers doing the
5154  * right thing: they *must* call mark_inode_dirty() after dirtying info in
5155  * which we are interested.
5156  *
5157  * It would be a bug for them to not do this.  The code:
5158  *
5159  *	mark_inode_dirty(inode)
5160  *	stuff();
5161  *	inode->i_size = expr;
5162  *
5163  * is in error because write_inode() could occur while `stuff()' is running,
5164  * and the new i_size will be lost.  Plus the inode will no longer be on the
5165  * superblock's dirty inode list.
5166  */
5167 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5168 {
5169 	int err;
5170 
5171 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5172 		return 0;
5173 
5174 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5175 		return -EIO;
5176 
5177 	if (EXT4_SB(inode->i_sb)->s_journal) {
5178 		if (ext4_journal_current_handle()) {
5179 			ext4_debug("called recursively, non-PF_MEMALLOC!\n");
5180 			dump_stack();
5181 			return -EIO;
5182 		}
5183 
5184 		/*
5185 		 * No need to force transaction in WB_SYNC_NONE mode. Also
5186 		 * ext4_sync_fs() will force the commit after everything is
5187 		 * written.
5188 		 */
5189 		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5190 			return 0;
5191 
5192 		err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5193 						EXT4_I(inode)->i_sync_tid);
5194 	} else {
5195 		struct ext4_iloc iloc;
5196 
5197 		err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5198 		if (err)
5199 			return err;
5200 		/*
5201 		 * sync(2) will flush the whole buffer cache. No need to do
5202 		 * it here separately for each inode.
5203 		 */
5204 		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5205 			sync_dirty_buffer(iloc.bh);
5206 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5207 			ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5208 					       "IO error syncing inode");
5209 			err = -EIO;
5210 		}
5211 		brelse(iloc.bh);
5212 	}
5213 	return err;
5214 }
5215 
5216 /*
5217  * In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
5218  * buffers that are attached to a folio straddling i_size and are undergoing
5219  * commit. In that case we have to wait for commit to finish and try again.
5220  */
5221 static void ext4_wait_for_tail_page_commit(struct inode *inode)
5222 {
5223 	unsigned offset;
5224 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5225 	tid_t commit_tid = 0;
5226 	int ret;
5227 
5228 	offset = inode->i_size & (PAGE_SIZE - 1);
5229 	/*
5230 	 * If the folio is fully truncated, we don't need to wait for any commit
5231 	 * (and we even should not as __ext4_journalled_invalidate_folio() may
5232 	 * strip all buffers from the folio but keep the folio dirty which can then
5233 	 * confuse e.g. concurrent ext4_writepages() seeing dirty folio without
5234 	 * buffers). Also we don't need to wait for any commit if all buffers in
5235 	 * the folio remain valid. This is most beneficial for the common case of
5236 	 * blocksize == PAGESIZE.
5237 	 */
5238 	if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5239 		return;
5240 	while (1) {
5241 		struct folio *folio = filemap_lock_folio(inode->i_mapping,
5242 				      inode->i_size >> PAGE_SHIFT);
5243 		if (IS_ERR(folio))
5244 			return;
5245 		ret = __ext4_journalled_invalidate_folio(folio, offset,
5246 						folio_size(folio) - offset);
5247 		folio_unlock(folio);
5248 		folio_put(folio);
5249 		if (ret != -EBUSY)
5250 			return;
5251 		commit_tid = 0;
5252 		read_lock(&journal->j_state_lock);
5253 		if (journal->j_committing_transaction)
5254 			commit_tid = journal->j_committing_transaction->t_tid;
5255 		read_unlock(&journal->j_state_lock);
5256 		if (commit_tid)
5257 			jbd2_log_wait_commit(journal, commit_tid);
5258 	}
5259 }
5260 
5261 /*
5262  * ext4_setattr()
5263  *
5264  * Called from notify_change.
5265  *
5266  * We want to trap VFS attempts to truncate the file as soon as
5267  * possible.  In particular, we want to make sure that when the VFS
5268  * shrinks i_size, we put the inode on the orphan list and modify
5269  * i_disksize immediately, so that during the subsequent flushing of
5270  * dirty pages and freeing of disk blocks, we can guarantee that any
5271  * commit will leave the blocks being flushed in an unused state on
5272  * disk.  (On recovery, the inode will get truncated and the blocks will
5273  * be freed, so we have a strong guarantee that no future commit will
5274  * leave these blocks visible to the user.)
5275  *
5276  * Another thing we have to assure is that if we are in ordered mode
5277  * and inode is still attached to the committing transaction, we must
5278  * we start writeout of all the dirty pages which are being truncated.
5279  * This way we are sure that all the data written in the previous
5280  * transaction are already on disk (truncate waits for pages under
5281  * writeback).
5282  *
5283  * Called with inode->i_rwsem down.
5284  */
5285 int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5286 		 struct iattr *attr)
5287 {
5288 	struct inode *inode = d_inode(dentry);
5289 	int error, rc = 0;
5290 	int orphan = 0;
5291 	const unsigned int ia_valid = attr->ia_valid;
5292 	bool inc_ivers = true;
5293 
5294 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5295 		return -EIO;
5296 
5297 	if (unlikely(IS_IMMUTABLE(inode)))
5298 		return -EPERM;
5299 
5300 	if (unlikely(IS_APPEND(inode) &&
5301 		     (ia_valid & (ATTR_MODE | ATTR_UID |
5302 				  ATTR_GID | ATTR_TIMES_SET))))
5303 		return -EPERM;
5304 
5305 	error = setattr_prepare(idmap, dentry, attr);
5306 	if (error)
5307 		return error;
5308 
5309 	error = fscrypt_prepare_setattr(dentry, attr);
5310 	if (error)
5311 		return error;
5312 
5313 	error = fsverity_prepare_setattr(dentry, attr);
5314 	if (error)
5315 		return error;
5316 
5317 	if (is_quota_modification(idmap, inode, attr)) {
5318 		error = dquot_initialize(inode);
5319 		if (error)
5320 			return error;
5321 	}
5322 
5323 	if (i_uid_needs_update(idmap, attr, inode) ||
5324 	    i_gid_needs_update(idmap, attr, inode)) {
5325 		handle_t *handle;
5326 
5327 		/* (user+group)*(old+new) structure, inode write (sb,
5328 		 * inode block, ? - but truncate inode update has it) */
5329 		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5330 			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5331 			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5332 		if (IS_ERR(handle)) {
5333 			error = PTR_ERR(handle);
5334 			goto err_out;
5335 		}
5336 
5337 		/* dquot_transfer() calls back ext4_get_inode_usage() which
5338 		 * counts xattr inode references.
5339 		 */
5340 		down_read(&EXT4_I(inode)->xattr_sem);
5341 		error = dquot_transfer(idmap, inode, attr);
5342 		up_read(&EXT4_I(inode)->xattr_sem);
5343 
5344 		if (error) {
5345 			ext4_journal_stop(handle);
5346 			return error;
5347 		}
5348 		/* Update corresponding info in inode so that everything is in
5349 		 * one transaction */
5350 		i_uid_update(idmap, attr, inode);
5351 		i_gid_update(idmap, attr, inode);
5352 		error = ext4_mark_inode_dirty(handle, inode);
5353 		ext4_journal_stop(handle);
5354 		if (unlikely(error)) {
5355 			return error;
5356 		}
5357 	}
5358 
5359 	if (attr->ia_valid & ATTR_SIZE) {
5360 		handle_t *handle;
5361 		loff_t oldsize = inode->i_size;
5362 		loff_t old_disksize;
5363 		int shrink = (attr->ia_size < inode->i_size);
5364 
5365 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5366 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5367 
5368 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5369 				return -EFBIG;
5370 			}
5371 		}
5372 		if (!S_ISREG(inode->i_mode)) {
5373 			return -EINVAL;
5374 		}
5375 
5376 		if (attr->ia_size == inode->i_size)
5377 			inc_ivers = false;
5378 
5379 		if (shrink) {
5380 			if (ext4_should_order_data(inode)) {
5381 				error = ext4_begin_ordered_truncate(inode,
5382 							    attr->ia_size);
5383 				if (error)
5384 					goto err_out;
5385 			}
5386 			/*
5387 			 * Blocks are going to be removed from the inode. Wait
5388 			 * for dio in flight.
5389 			 */
5390 			inode_dio_wait(inode);
5391 		}
5392 
5393 		filemap_invalidate_lock(inode->i_mapping);
5394 
5395 		rc = ext4_break_layouts(inode);
5396 		if (rc) {
5397 			filemap_invalidate_unlock(inode->i_mapping);
5398 			goto err_out;
5399 		}
5400 
5401 		if (attr->ia_size != inode->i_size) {
5402 			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5403 			if (IS_ERR(handle)) {
5404 				error = PTR_ERR(handle);
5405 				goto out_mmap_sem;
5406 			}
5407 			if (ext4_handle_valid(handle) && shrink) {
5408 				error = ext4_orphan_add(handle, inode);
5409 				orphan = 1;
5410 			}
5411 			/*
5412 			 * Update c/mtime on truncate up, ext4_truncate() will
5413 			 * update c/mtime in shrink case below
5414 			 */
5415 			if (!shrink)
5416 				inode->i_mtime = inode_set_ctime_current(inode);
5417 
5418 			if (shrink)
5419 				ext4_fc_track_range(handle, inode,
5420 					(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5421 					inode->i_sb->s_blocksize_bits,
5422 					EXT_MAX_BLOCKS - 1);
5423 			else
5424 				ext4_fc_track_range(
5425 					handle, inode,
5426 					(oldsize > 0 ? oldsize - 1 : oldsize) >>
5427 					inode->i_sb->s_blocksize_bits,
5428 					(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5429 					inode->i_sb->s_blocksize_bits);
5430 
5431 			down_write(&EXT4_I(inode)->i_data_sem);
5432 			old_disksize = EXT4_I(inode)->i_disksize;
5433 			EXT4_I(inode)->i_disksize = attr->ia_size;
5434 			rc = ext4_mark_inode_dirty(handle, inode);
5435 			if (!error)
5436 				error = rc;
5437 			/*
5438 			 * We have to update i_size under i_data_sem together
5439 			 * with i_disksize to avoid races with writeback code
5440 			 * running ext4_wb_update_i_disksize().
5441 			 */
5442 			if (!error)
5443 				i_size_write(inode, attr->ia_size);
5444 			else
5445 				EXT4_I(inode)->i_disksize = old_disksize;
5446 			up_write(&EXT4_I(inode)->i_data_sem);
5447 			ext4_journal_stop(handle);
5448 			if (error)
5449 				goto out_mmap_sem;
5450 			if (!shrink) {
5451 				pagecache_isize_extended(inode, oldsize,
5452 							 inode->i_size);
5453 			} else if (ext4_should_journal_data(inode)) {
5454 				ext4_wait_for_tail_page_commit(inode);
5455 			}
5456 		}
5457 
5458 		/*
5459 		 * Truncate pagecache after we've waited for commit
5460 		 * in data=journal mode to make pages freeable.
5461 		 */
5462 		truncate_pagecache(inode, inode->i_size);
5463 		/*
5464 		 * Call ext4_truncate() even if i_size didn't change to
5465 		 * truncate possible preallocated blocks.
5466 		 */
5467 		if (attr->ia_size <= oldsize) {
5468 			rc = ext4_truncate(inode);
5469 			if (rc)
5470 				error = rc;
5471 		}
5472 out_mmap_sem:
5473 		filemap_invalidate_unlock(inode->i_mapping);
5474 	}
5475 
5476 	if (!error) {
5477 		if (inc_ivers)
5478 			inode_inc_iversion(inode);
5479 		setattr_copy(idmap, inode, attr);
5480 		mark_inode_dirty(inode);
5481 	}
5482 
5483 	/*
5484 	 * If the call to ext4_truncate failed to get a transaction handle at
5485 	 * all, we need to clean up the in-core orphan list manually.
5486 	 */
5487 	if (orphan && inode->i_nlink)
5488 		ext4_orphan_del(NULL, inode);
5489 
5490 	if (!error && (ia_valid & ATTR_MODE))
5491 		rc = posix_acl_chmod(idmap, dentry, inode->i_mode);
5492 
5493 err_out:
5494 	if  (error)
5495 		ext4_std_error(inode->i_sb, error);
5496 	if (!error)
5497 		error = rc;
5498 	return error;
5499 }
5500 
5501 u32 ext4_dio_alignment(struct inode *inode)
5502 {
5503 	if (fsverity_active(inode))
5504 		return 0;
5505 	if (ext4_should_journal_data(inode))
5506 		return 0;
5507 	if (ext4_has_inline_data(inode))
5508 		return 0;
5509 	if (IS_ENCRYPTED(inode)) {
5510 		if (!fscrypt_dio_supported(inode))
5511 			return 0;
5512 		return i_blocksize(inode);
5513 	}
5514 	return 1; /* use the iomap defaults */
5515 }
5516 
5517 int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
5518 		 struct kstat *stat, u32 request_mask, unsigned int query_flags)
5519 {
5520 	struct inode *inode = d_inode(path->dentry);
5521 	struct ext4_inode *raw_inode;
5522 	struct ext4_inode_info *ei = EXT4_I(inode);
5523 	unsigned int flags;
5524 
5525 	if ((request_mask & STATX_BTIME) &&
5526 	    EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5527 		stat->result_mask |= STATX_BTIME;
5528 		stat->btime.tv_sec = ei->i_crtime.tv_sec;
5529 		stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5530 	}
5531 
5532 	/*
5533 	 * Return the DIO alignment restrictions if requested.  We only return
5534 	 * this information when requested, since on encrypted files it might
5535 	 * take a fair bit of work to get if the file wasn't opened recently.
5536 	 */
5537 	if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
5538 		u32 dio_align = ext4_dio_alignment(inode);
5539 
5540 		stat->result_mask |= STATX_DIOALIGN;
5541 		if (dio_align == 1) {
5542 			struct block_device *bdev = inode->i_sb->s_bdev;
5543 
5544 			/* iomap defaults */
5545 			stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
5546 			stat->dio_offset_align = bdev_logical_block_size(bdev);
5547 		} else {
5548 			stat->dio_mem_align = dio_align;
5549 			stat->dio_offset_align = dio_align;
5550 		}
5551 	}
5552 
5553 	flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5554 	if (flags & EXT4_APPEND_FL)
5555 		stat->attributes |= STATX_ATTR_APPEND;
5556 	if (flags & EXT4_COMPR_FL)
5557 		stat->attributes |= STATX_ATTR_COMPRESSED;
5558 	if (flags & EXT4_ENCRYPT_FL)
5559 		stat->attributes |= STATX_ATTR_ENCRYPTED;
5560 	if (flags & EXT4_IMMUTABLE_FL)
5561 		stat->attributes |= STATX_ATTR_IMMUTABLE;
5562 	if (flags & EXT4_NODUMP_FL)
5563 		stat->attributes |= STATX_ATTR_NODUMP;
5564 	if (flags & EXT4_VERITY_FL)
5565 		stat->attributes |= STATX_ATTR_VERITY;
5566 
5567 	stat->attributes_mask |= (STATX_ATTR_APPEND |
5568 				  STATX_ATTR_COMPRESSED |
5569 				  STATX_ATTR_ENCRYPTED |
5570 				  STATX_ATTR_IMMUTABLE |
5571 				  STATX_ATTR_NODUMP |
5572 				  STATX_ATTR_VERITY);
5573 
5574 	generic_fillattr(idmap, request_mask, inode, stat);
5575 	return 0;
5576 }
5577 
5578 int ext4_file_getattr(struct mnt_idmap *idmap,
5579 		      const struct path *path, struct kstat *stat,
5580 		      u32 request_mask, unsigned int query_flags)
5581 {
5582 	struct inode *inode = d_inode(path->dentry);
5583 	u64 delalloc_blocks;
5584 
5585 	ext4_getattr(idmap, path, stat, request_mask, query_flags);
5586 
5587 	/*
5588 	 * If there is inline data in the inode, the inode will normally not
5589 	 * have data blocks allocated (it may have an external xattr block).
5590 	 * Report at least one sector for such files, so tools like tar, rsync,
5591 	 * others don't incorrectly think the file is completely sparse.
5592 	 */
5593 	if (unlikely(ext4_has_inline_data(inode)))
5594 		stat->blocks += (stat->size + 511) >> 9;
5595 
5596 	/*
5597 	 * We can't update i_blocks if the block allocation is delayed
5598 	 * otherwise in the case of system crash before the real block
5599 	 * allocation is done, we will have i_blocks inconsistent with
5600 	 * on-disk file blocks.
5601 	 * We always keep i_blocks updated together with real
5602 	 * allocation. But to not confuse with user, stat
5603 	 * will return the blocks that include the delayed allocation
5604 	 * blocks for this file.
5605 	 */
5606 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5607 				   EXT4_I(inode)->i_reserved_data_blocks);
5608 	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5609 	return 0;
5610 }
5611 
5612 static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5613 				   int pextents)
5614 {
5615 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5616 		return ext4_ind_trans_blocks(inode, lblocks);
5617 	return ext4_ext_index_trans_blocks(inode, pextents);
5618 }
5619 
5620 /*
5621  * Account for index blocks, block groups bitmaps and block group
5622  * descriptor blocks if modify datablocks and index blocks
5623  * worse case, the indexs blocks spread over different block groups
5624  *
5625  * If datablocks are discontiguous, they are possible to spread over
5626  * different block groups too. If they are contiguous, with flexbg,
5627  * they could still across block group boundary.
5628  *
5629  * Also account for superblock, inode, quota and xattr blocks
5630  */
5631 static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5632 				  int pextents)
5633 {
5634 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5635 	int gdpblocks;
5636 	int idxblocks;
5637 	int ret;
5638 
5639 	/*
5640 	 * How many index blocks need to touch to map @lblocks logical blocks
5641 	 * to @pextents physical extents?
5642 	 */
5643 	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5644 
5645 	ret = idxblocks;
5646 
5647 	/*
5648 	 * Now let's see how many group bitmaps and group descriptors need
5649 	 * to account
5650 	 */
5651 	groups = idxblocks + pextents;
5652 	gdpblocks = groups;
5653 	if (groups > ngroups)
5654 		groups = ngroups;
5655 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5656 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5657 
5658 	/* bitmaps and block group descriptor blocks */
5659 	ret += groups + gdpblocks;
5660 
5661 	/* Blocks for super block, inode, quota and xattr blocks */
5662 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5663 
5664 	return ret;
5665 }
5666 
5667 /*
5668  * Calculate the total number of credits to reserve to fit
5669  * the modification of a single pages into a single transaction,
5670  * which may include multiple chunks of block allocations.
5671  *
5672  * This could be called via ext4_write_begin()
5673  *
5674  * We need to consider the worse case, when
5675  * one new block per extent.
5676  */
5677 int ext4_writepage_trans_blocks(struct inode *inode)
5678 {
5679 	int bpp = ext4_journal_blocks_per_page(inode);
5680 	int ret;
5681 
5682 	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5683 
5684 	/* Account for data blocks for journalled mode */
5685 	if (ext4_should_journal_data(inode))
5686 		ret += bpp;
5687 	return ret;
5688 }
5689 
5690 /*
5691  * Calculate the journal credits for a chunk of data modification.
5692  *
5693  * This is called from DIO, fallocate or whoever calling
5694  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5695  *
5696  * journal buffers for data blocks are not included here, as DIO
5697  * and fallocate do no need to journal data buffers.
5698  */
5699 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5700 {
5701 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5702 }
5703 
5704 /*
5705  * The caller must have previously called ext4_reserve_inode_write().
5706  * Give this, we know that the caller already has write access to iloc->bh.
5707  */
5708 int ext4_mark_iloc_dirty(handle_t *handle,
5709 			 struct inode *inode, struct ext4_iloc *iloc)
5710 {
5711 	int err = 0;
5712 
5713 	if (unlikely(ext4_forced_shutdown(inode->i_sb))) {
5714 		put_bh(iloc->bh);
5715 		return -EIO;
5716 	}
5717 	ext4_fc_track_inode(handle, inode);
5718 
5719 	/* the do_update_inode consumes one bh->b_count */
5720 	get_bh(iloc->bh);
5721 
5722 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5723 	err = ext4_do_update_inode(handle, inode, iloc);
5724 	put_bh(iloc->bh);
5725 	return err;
5726 }
5727 
5728 /*
5729  * On success, We end up with an outstanding reference count against
5730  * iloc->bh.  This _must_ be cleaned up later.
5731  */
5732 
5733 int
5734 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5735 			 struct ext4_iloc *iloc)
5736 {
5737 	int err;
5738 
5739 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
5740 		return -EIO;
5741 
5742 	err = ext4_get_inode_loc(inode, iloc);
5743 	if (!err) {
5744 		BUFFER_TRACE(iloc->bh, "get_write_access");
5745 		err = ext4_journal_get_write_access(handle, inode->i_sb,
5746 						    iloc->bh, EXT4_JTR_NONE);
5747 		if (err) {
5748 			brelse(iloc->bh);
5749 			iloc->bh = NULL;
5750 		}
5751 	}
5752 	ext4_std_error(inode->i_sb, err);
5753 	return err;
5754 }
5755 
5756 static int __ext4_expand_extra_isize(struct inode *inode,
5757 				     unsigned int new_extra_isize,
5758 				     struct ext4_iloc *iloc,
5759 				     handle_t *handle, int *no_expand)
5760 {
5761 	struct ext4_inode *raw_inode;
5762 	struct ext4_xattr_ibody_header *header;
5763 	unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5764 	struct ext4_inode_info *ei = EXT4_I(inode);
5765 	int error;
5766 
5767 	/* this was checked at iget time, but double check for good measure */
5768 	if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5769 	    (ei->i_extra_isize & 3)) {
5770 		EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5771 				 ei->i_extra_isize,
5772 				 EXT4_INODE_SIZE(inode->i_sb));
5773 		return -EFSCORRUPTED;
5774 	}
5775 	if ((new_extra_isize < ei->i_extra_isize) ||
5776 	    (new_extra_isize < 4) ||
5777 	    (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5778 		return -EINVAL;	/* Should never happen */
5779 
5780 	raw_inode = ext4_raw_inode(iloc);
5781 
5782 	header = IHDR(inode, raw_inode);
5783 
5784 	/* No extended attributes present */
5785 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5786 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5787 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5788 		       EXT4_I(inode)->i_extra_isize, 0,
5789 		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
5790 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5791 		return 0;
5792 	}
5793 
5794 	/*
5795 	 * We may need to allocate external xattr block so we need quotas
5796 	 * initialized. Here we can be called with various locks held so we
5797 	 * cannot affort to initialize quotas ourselves. So just bail.
5798 	 */
5799 	if (dquot_initialize_needed(inode))
5800 		return -EAGAIN;
5801 
5802 	/* try to expand with EAs present */
5803 	error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5804 					   raw_inode, handle);
5805 	if (error) {
5806 		/*
5807 		 * Inode size expansion failed; don't try again
5808 		 */
5809 		*no_expand = 1;
5810 	}
5811 
5812 	return error;
5813 }
5814 
5815 /*
5816  * Expand an inode by new_extra_isize bytes.
5817  * Returns 0 on success or negative error number on failure.
5818  */
5819 static int ext4_try_to_expand_extra_isize(struct inode *inode,
5820 					  unsigned int new_extra_isize,
5821 					  struct ext4_iloc iloc,
5822 					  handle_t *handle)
5823 {
5824 	int no_expand;
5825 	int error;
5826 
5827 	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5828 		return -EOVERFLOW;
5829 
5830 	/*
5831 	 * In nojournal mode, we can immediately attempt to expand
5832 	 * the inode.  When journaled, we first need to obtain extra
5833 	 * buffer credits since we may write into the EA block
5834 	 * with this same handle. If journal_extend fails, then it will
5835 	 * only result in a minor loss of functionality for that inode.
5836 	 * If this is felt to be critical, then e2fsck should be run to
5837 	 * force a large enough s_min_extra_isize.
5838 	 */
5839 	if (ext4_journal_extend(handle,
5840 				EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5841 		return -ENOSPC;
5842 
5843 	if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5844 		return -EBUSY;
5845 
5846 	error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5847 					  handle, &no_expand);
5848 	ext4_write_unlock_xattr(inode, &no_expand);
5849 
5850 	return error;
5851 }
5852 
5853 int ext4_expand_extra_isize(struct inode *inode,
5854 			    unsigned int new_extra_isize,
5855 			    struct ext4_iloc *iloc)
5856 {
5857 	handle_t *handle;
5858 	int no_expand;
5859 	int error, rc;
5860 
5861 	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5862 		brelse(iloc->bh);
5863 		return -EOVERFLOW;
5864 	}
5865 
5866 	handle = ext4_journal_start(inode, EXT4_HT_INODE,
5867 				    EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5868 	if (IS_ERR(handle)) {
5869 		error = PTR_ERR(handle);
5870 		brelse(iloc->bh);
5871 		return error;
5872 	}
5873 
5874 	ext4_write_lock_xattr(inode, &no_expand);
5875 
5876 	BUFFER_TRACE(iloc->bh, "get_write_access");
5877 	error = ext4_journal_get_write_access(handle, inode->i_sb, iloc->bh,
5878 					      EXT4_JTR_NONE);
5879 	if (error) {
5880 		brelse(iloc->bh);
5881 		goto out_unlock;
5882 	}
5883 
5884 	error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5885 					  handle, &no_expand);
5886 
5887 	rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5888 	if (!error)
5889 		error = rc;
5890 
5891 out_unlock:
5892 	ext4_write_unlock_xattr(inode, &no_expand);
5893 	ext4_journal_stop(handle);
5894 	return error;
5895 }
5896 
5897 /*
5898  * What we do here is to mark the in-core inode as clean with respect to inode
5899  * dirtiness (it may still be data-dirty).
5900  * This means that the in-core inode may be reaped by prune_icache
5901  * without having to perform any I/O.  This is a very good thing,
5902  * because *any* task may call prune_icache - even ones which
5903  * have a transaction open against a different journal.
5904  *
5905  * Is this cheating?  Not really.  Sure, we haven't written the
5906  * inode out, but prune_icache isn't a user-visible syncing function.
5907  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5908  * we start and wait on commits.
5909  */
5910 int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5911 				const char *func, unsigned int line)
5912 {
5913 	struct ext4_iloc iloc;
5914 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5915 	int err;
5916 
5917 	might_sleep();
5918 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5919 	err = ext4_reserve_inode_write(handle, inode, &iloc);
5920 	if (err)
5921 		goto out;
5922 
5923 	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5924 		ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5925 					       iloc, handle);
5926 
5927 	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5928 out:
5929 	if (unlikely(err))
5930 		ext4_error_inode_err(inode, func, line, 0, err,
5931 					"mark_inode_dirty error");
5932 	return err;
5933 }
5934 
5935 /*
5936  * ext4_dirty_inode() is called from __mark_inode_dirty()
5937  *
5938  * We're really interested in the case where a file is being extended.
5939  * i_size has been changed by generic_commit_write() and we thus need
5940  * to include the updated inode in the current transaction.
5941  *
5942  * Also, dquot_alloc_block() will always dirty the inode when blocks
5943  * are allocated to the file.
5944  *
5945  * If the inode is marked synchronous, we don't honour that here - doing
5946  * so would cause a commit on atime updates, which we don't bother doing.
5947  * We handle synchronous inodes at the highest possible level.
5948  */
5949 void ext4_dirty_inode(struct inode *inode, int flags)
5950 {
5951 	handle_t *handle;
5952 
5953 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5954 	if (IS_ERR(handle))
5955 		return;
5956 	ext4_mark_inode_dirty(handle, inode);
5957 	ext4_journal_stop(handle);
5958 }
5959 
5960 int ext4_change_inode_journal_flag(struct inode *inode, int val)
5961 {
5962 	journal_t *journal;
5963 	handle_t *handle;
5964 	int err;
5965 	int alloc_ctx;
5966 
5967 	/*
5968 	 * We have to be very careful here: changing a data block's
5969 	 * journaling status dynamically is dangerous.  If we write a
5970 	 * data block to the journal, change the status and then delete
5971 	 * that block, we risk forgetting to revoke the old log record
5972 	 * from the journal and so a subsequent replay can corrupt data.
5973 	 * So, first we make sure that the journal is empty and that
5974 	 * nobody is changing anything.
5975 	 */
5976 
5977 	journal = EXT4_JOURNAL(inode);
5978 	if (!journal)
5979 		return 0;
5980 	if (is_journal_aborted(journal))
5981 		return -EROFS;
5982 
5983 	/* Wait for all existing dio workers */
5984 	inode_dio_wait(inode);
5985 
5986 	/*
5987 	 * Before flushing the journal and switching inode's aops, we have
5988 	 * to flush all dirty data the inode has. There can be outstanding
5989 	 * delayed allocations, there can be unwritten extents created by
5990 	 * fallocate or buffered writes in dioread_nolock mode covered by
5991 	 * dirty data which can be converted only after flushing the dirty
5992 	 * data (and journalled aops don't know how to handle these cases).
5993 	 */
5994 	if (val) {
5995 		filemap_invalidate_lock(inode->i_mapping);
5996 		err = filemap_write_and_wait(inode->i_mapping);
5997 		if (err < 0) {
5998 			filemap_invalidate_unlock(inode->i_mapping);
5999 			return err;
6000 		}
6001 	}
6002 
6003 	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
6004 	jbd2_journal_lock_updates(journal);
6005 
6006 	/*
6007 	 * OK, there are no updates running now, and all cached data is
6008 	 * synced to disk.  We are now in a completely consistent state
6009 	 * which doesn't have anything in the journal, and we know that
6010 	 * no filesystem updates are running, so it is safe to modify
6011 	 * the inode's in-core data-journaling state flag now.
6012 	 */
6013 
6014 	if (val)
6015 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6016 	else {
6017 		err = jbd2_journal_flush(journal, 0);
6018 		if (err < 0) {
6019 			jbd2_journal_unlock_updates(journal);
6020 			ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6021 			return err;
6022 		}
6023 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6024 	}
6025 	ext4_set_aops(inode);
6026 
6027 	jbd2_journal_unlock_updates(journal);
6028 	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
6029 
6030 	if (val)
6031 		filemap_invalidate_unlock(inode->i_mapping);
6032 
6033 	/* Finally we can mark the inode as dirty. */
6034 
6035 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6036 	if (IS_ERR(handle))
6037 		return PTR_ERR(handle);
6038 
6039 	ext4_fc_mark_ineligible(inode->i_sb,
6040 		EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
6041 	err = ext4_mark_inode_dirty(handle, inode);
6042 	ext4_handle_sync(handle);
6043 	ext4_journal_stop(handle);
6044 	ext4_std_error(inode->i_sb, err);
6045 
6046 	return err;
6047 }
6048 
6049 static int ext4_bh_unmapped(handle_t *handle, struct inode *inode,
6050 			    struct buffer_head *bh)
6051 {
6052 	return !buffer_mapped(bh);
6053 }
6054 
6055 vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6056 {
6057 	struct vm_area_struct *vma = vmf->vma;
6058 	struct folio *folio = page_folio(vmf->page);
6059 	loff_t size;
6060 	unsigned long len;
6061 	int err;
6062 	vm_fault_t ret;
6063 	struct file *file = vma->vm_file;
6064 	struct inode *inode = file_inode(file);
6065 	struct address_space *mapping = inode->i_mapping;
6066 	handle_t *handle;
6067 	get_block_t *get_block;
6068 	int retries = 0;
6069 
6070 	if (unlikely(IS_IMMUTABLE(inode)))
6071 		return VM_FAULT_SIGBUS;
6072 
6073 	sb_start_pagefault(inode->i_sb);
6074 	file_update_time(vma->vm_file);
6075 
6076 	filemap_invalidate_lock_shared(mapping);
6077 
6078 	err = ext4_convert_inline_data(inode);
6079 	if (err)
6080 		goto out_ret;
6081 
6082 	/*
6083 	 * On data journalling we skip straight to the transaction handle:
6084 	 * there's no delalloc; page truncated will be checked later; the
6085 	 * early return w/ all buffers mapped (calculates size/len) can't
6086 	 * be used; and there's no dioread_nolock, so only ext4_get_block.
6087 	 */
6088 	if (ext4_should_journal_data(inode))
6089 		goto retry_alloc;
6090 
6091 	/* Delalloc case is easy... */
6092 	if (test_opt(inode->i_sb, DELALLOC) &&
6093 	    !ext4_nonda_switch(inode->i_sb)) {
6094 		do {
6095 			err = block_page_mkwrite(vma, vmf,
6096 						   ext4_da_get_block_prep);
6097 		} while (err == -ENOSPC &&
6098 		       ext4_should_retry_alloc(inode->i_sb, &retries));
6099 		goto out_ret;
6100 	}
6101 
6102 	folio_lock(folio);
6103 	size = i_size_read(inode);
6104 	/* Page got truncated from under us? */
6105 	if (folio->mapping != mapping || folio_pos(folio) > size) {
6106 		folio_unlock(folio);
6107 		ret = VM_FAULT_NOPAGE;
6108 		goto out;
6109 	}
6110 
6111 	len = folio_size(folio);
6112 	if (folio_pos(folio) + len > size)
6113 		len = size - folio_pos(folio);
6114 	/*
6115 	 * Return if we have all the buffers mapped. This avoids the need to do
6116 	 * journal_start/journal_stop which can block and take a long time
6117 	 *
6118 	 * This cannot be done for data journalling, as we have to add the
6119 	 * inode to the transaction's list to writeprotect pages on commit.
6120 	 */
6121 	if (folio_buffers(folio)) {
6122 		if (!ext4_walk_page_buffers(NULL, inode, folio_buffers(folio),
6123 					    0, len, NULL,
6124 					    ext4_bh_unmapped)) {
6125 			/* Wait so that we don't change page under IO */
6126 			folio_wait_stable(folio);
6127 			ret = VM_FAULT_LOCKED;
6128 			goto out;
6129 		}
6130 	}
6131 	folio_unlock(folio);
6132 	/* OK, we need to fill the hole... */
6133 	if (ext4_should_dioread_nolock(inode))
6134 		get_block = ext4_get_block_unwritten;
6135 	else
6136 		get_block = ext4_get_block;
6137 retry_alloc:
6138 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6139 				    ext4_writepage_trans_blocks(inode));
6140 	if (IS_ERR(handle)) {
6141 		ret = VM_FAULT_SIGBUS;
6142 		goto out;
6143 	}
6144 	/*
6145 	 * Data journalling can't use block_page_mkwrite() because it
6146 	 * will set_buffer_dirty() before do_journal_get_write_access()
6147 	 * thus might hit warning messages for dirty metadata buffers.
6148 	 */
6149 	if (!ext4_should_journal_data(inode)) {
6150 		err = block_page_mkwrite(vma, vmf, get_block);
6151 	} else {
6152 		folio_lock(folio);
6153 		size = i_size_read(inode);
6154 		/* Page got truncated from under us? */
6155 		if (folio->mapping != mapping || folio_pos(folio) > size) {
6156 			ret = VM_FAULT_NOPAGE;
6157 			goto out_error;
6158 		}
6159 
6160 		len = folio_size(folio);
6161 		if (folio_pos(folio) + len > size)
6162 			len = size - folio_pos(folio);
6163 
6164 		err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
6165 		if (!err) {
6166 			ret = VM_FAULT_SIGBUS;
6167 			if (ext4_journal_folio_buffers(handle, folio, len))
6168 				goto out_error;
6169 		} else {
6170 			folio_unlock(folio);
6171 		}
6172 	}
6173 	ext4_journal_stop(handle);
6174 	if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6175 		goto retry_alloc;
6176 out_ret:
6177 	ret = vmf_fs_error(err);
6178 out:
6179 	filemap_invalidate_unlock_shared(mapping);
6180 	sb_end_pagefault(inode->i_sb);
6181 	return ret;
6182 out_error:
6183 	folio_unlock(folio);
6184 	ext4_journal_stop(handle);
6185 	goto out;
6186 }
6187