xref: /linux/fs/f2fs/data.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/data.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/sched/mm.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/folio_batch.h>
14 #include <linux/blkdev.h>
15 #include <linux/bio.h>
16 #include <linux/blk-crypto.h>
17 #include <linux/swap.h>
18 #include <linux/prefetch.h>
19 #include <linux/uio.h>
20 #include <linux/sched/signal.h>
21 #include <linux/fiemap.h>
22 #include <linux/iomap.h>
23 
24 #include "f2fs.h"
25 #include "node.h"
26 #include "segment.h"
27 #include "iostat.h"
28 #include <trace/events/f2fs.h>
29 
30 #define NUM_PREALLOC_POST_READ_CTXS	128
31 
32 static struct kmem_cache *bio_post_read_ctx_cache;
33 static struct kmem_cache *bio_entry_slab;
34 static struct kmem_cache *ffs_entry_slab;
35 static mempool_t *bio_post_read_ctx_pool;
36 static struct bio_set f2fs_bioset;
37 
38 struct f2fs_folio_state {
39 	spinlock_t		state_lock;
40 	unsigned int		read_pages_pending;
41 };
42 
43 #define	F2FS_BIO_POOL_SIZE	NR_CURSEG_TYPE
44 
45 int __init f2fs_init_bioset(void)
46 {
47 	return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
48 					0, BIOSET_NEED_BVECS);
49 }
50 
51 void f2fs_destroy_bioset(void)
52 {
53 	bioset_exit(&f2fs_bioset);
54 }
55 
56 bool f2fs_is_cp_guaranteed(const struct folio *folio)
57 {
58 	struct address_space *mapping = folio->mapping;
59 	struct inode *inode;
60 	struct f2fs_sb_info *sbi;
61 
62 	if (fscrypt_is_bounce_folio(folio))
63 		return folio_test_f2fs_gcing(fscrypt_pagecache_folio(folio));
64 
65 	inode = mapping->host;
66 	sbi = F2FS_I_SB(inode);
67 
68 	if (inode->i_ino == F2FS_META_INO(sbi) ||
69 			inode->i_ino == F2FS_NODE_INO(sbi) ||
70 			S_ISDIR(inode->i_mode))
71 		return true;
72 
73 	if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
74 			folio_test_f2fs_gcing(folio))
75 		return true;
76 	return false;
77 }
78 
79 static enum count_type __read_io_type(struct folio *folio)
80 {
81 	struct address_space *mapping = folio->mapping;
82 
83 	if (mapping) {
84 		struct inode *inode = mapping->host;
85 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86 
87 		if (inode->i_ino == F2FS_META_INO(sbi))
88 			return F2FS_RD_META;
89 
90 		if (inode->i_ino == F2FS_NODE_INO(sbi))
91 			return F2FS_RD_NODE;
92 	}
93 	return F2FS_RD_DATA;
94 }
95 
96 /* postprocessing steps for read bios */
97 enum bio_post_read_step {
98 #ifdef CONFIG_FS_ENCRYPTION
99 	STEP_DECRYPT	= BIT(0),
100 #else
101 	STEP_DECRYPT	= 0,	/* compile out the decryption-related code */
102 #endif
103 #ifdef CONFIG_F2FS_FS_COMPRESSION
104 	STEP_DECOMPRESS	= BIT(1),
105 #else
106 	STEP_DECOMPRESS	= 0,	/* compile out the decompression-related code */
107 #endif
108 #ifdef CONFIG_FS_VERITY
109 	STEP_VERITY	= BIT(2),
110 #else
111 	STEP_VERITY	= 0,	/* compile out the verity-related code */
112 #endif
113 };
114 
115 struct bio_post_read_ctx {
116 	struct bio *bio;
117 	struct f2fs_sb_info *sbi;
118 	struct fsverity_info *vi;
119 	struct work_struct work;
120 	unsigned int enabled_steps;
121 	/*
122 	 * decompression_attempted keeps track of whether
123 	 * f2fs_end_read_compressed_page() has been called on the pages in the
124 	 * bio that belong to a compressed cluster yet.
125 	 */
126 	bool decompression_attempted;
127 	block_t fs_blkaddr;
128 };
129 
130 /*
131  * Update and unlock a bio's pages, and free the bio.
132  *
133  * This marks pages up-to-date only if there was no error in the bio (I/O error,
134  * decryption error, or verity error), as indicated by bio->bi_status.
135  *
136  * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
137  * aren't marked up-to-date here, as decompression is done on a per-compression-
138  * cluster basis rather than a per-bio basis.  Instead, we only must do two
139  * things for each compressed page here: call f2fs_end_read_compressed_page()
140  * with failed=true if an error occurred before it would have normally gotten
141  * called (i.e., I/O error or decryption error, but *not* verity error), and
142  * release the bio's reference to the decompress_io_ctx of the page's cluster.
143  */
144 static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
145 {
146 	struct folio_iter fi;
147 	struct bio_post_read_ctx *ctx = bio->bi_private;
148 	unsigned long flags;
149 
150 	bio_for_each_folio_all(fi, bio) {
151 		struct folio *folio = fi.folio;
152 		unsigned nr_pages = fi.length >> PAGE_SHIFT;
153 		bool finished = true;
154 
155 		if (!folio_test_large(folio) &&
156 		    f2fs_is_compressed_page(folio)) {
157 			if (ctx && !ctx->decompression_attempted)
158 				f2fs_end_read_compressed_page(folio, true, 0,
159 							in_task);
160 			f2fs_put_folio_dic(folio, in_task);
161 			continue;
162 		}
163 
164 		if (folio_test_large(folio)) {
165 			struct f2fs_folio_state *ffs = folio->private;
166 
167 			spin_lock_irqsave(&ffs->state_lock, flags);
168 			ffs->read_pages_pending -= nr_pages;
169 			finished = !ffs->read_pages_pending;
170 			spin_unlock_irqrestore(&ffs->state_lock, flags);
171 		}
172 
173 		while (nr_pages--)
174 			dec_page_count(F2FS_F_SB(folio), __read_io_type(folio));
175 
176 		if (bio->bi_status == BLK_STS_OK &&
177 			F2FS_F_SB(folio)->node_inode && is_node_folio(folio) &&
178 			f2fs_sanity_check_node_footer(F2FS_F_SB(folio),
179 				folio, folio->index, NODE_TYPE_REGULAR, true))
180 			bio->bi_status = BLK_STS_IOERR;
181 
182 		if (finished)
183 			folio_end_read(folio, bio->bi_status == BLK_STS_OK);
184 	}
185 
186 	if (ctx)
187 		mempool_free(ctx, bio_post_read_ctx_pool);
188 	bio_put(bio);
189 }
190 
191 static void f2fs_verify_bio(struct work_struct *work)
192 {
193 	struct bio_post_read_ctx *ctx =
194 		container_of(work, struct bio_post_read_ctx, work);
195 	struct bio *bio = ctx->bio;
196 	bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
197 	struct fsverity_info *vi = ctx->vi;
198 
199 	/*
200 	 * fsverity_verify_bio() may call readahead() again, and while verity
201 	 * will be disabled for this, decryption and/or decompression may still
202 	 * be needed, resulting in another bio_post_read_ctx being allocated.
203 	 * So to prevent deadlocks we need to release the current ctx to the
204 	 * mempool first.  This assumes that verity is the last post-read step.
205 	 */
206 	mempool_free(ctx, bio_post_read_ctx_pool);
207 	bio->bi_private = NULL;
208 
209 	/*
210 	 * Verify the bio's pages with fs-verity.  Exclude compressed pages,
211 	 * as those were handled separately by f2fs_end_read_compressed_page().
212 	 */
213 	if (may_have_compressed_pages) {
214 		struct folio_iter fi;
215 
216 		bio_for_each_folio_all(fi, bio) {
217 			struct folio *folio = fi.folio;
218 
219 			if (!f2fs_is_compressed_page(folio) &&
220 			    !fsverity_verify_folio(vi, folio)) {
221 				bio->bi_status = BLK_STS_IOERR;
222 				break;
223 			}
224 		}
225 	} else {
226 		fsverity_verify_bio(vi, bio);
227 	}
228 
229 	f2fs_finish_read_bio(bio, true);
230 }
231 
232 /*
233  * If the bio's data needs to be verified with fs-verity, then enqueue the
234  * verity work for the bio.  Otherwise finish the bio now.
235  *
236  * Note that to avoid deadlocks, the verity work can't be done on the
237  * decryption/decompression workqueue.  This is because verifying the data pages
238  * can involve reading verity metadata pages from the file, and these verity
239  * metadata pages may be encrypted and/or compressed.
240  */
241 static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
242 {
243 	struct bio_post_read_ctx *ctx = bio->bi_private;
244 
245 	if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
246 		INIT_WORK(&ctx->work, f2fs_verify_bio);
247 		fsverity_enqueue_verify_work(&ctx->work);
248 	} else {
249 		f2fs_finish_read_bio(bio, in_task);
250 	}
251 }
252 
253 /*
254  * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
255  * remaining page was read by @ctx->bio.
256  *
257  * Note that a bio may span clusters (even a mix of compressed and uncompressed
258  * clusters) or be for just part of a cluster.  STEP_DECOMPRESS just indicates
259  * that the bio includes at least one compressed page.  The actual decompression
260  * is done on a per-cluster basis, not a per-bio basis.
261  */
262 static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
263 		bool in_task)
264 {
265 	struct folio_iter fi;
266 	bool all_compressed = true;
267 	block_t blkaddr = ctx->fs_blkaddr;
268 
269 	bio_for_each_folio_all(fi, ctx->bio) {
270 		struct folio *folio = fi.folio;
271 
272 		if (f2fs_is_compressed_page(folio))
273 			f2fs_end_read_compressed_page(folio, false, blkaddr,
274 						      in_task);
275 		else
276 			all_compressed = false;
277 
278 		blkaddr++;
279 	}
280 
281 	ctx->decompression_attempted = true;
282 
283 	/*
284 	 * Optimization: if all the bio's pages are compressed, then scheduling
285 	 * the per-bio verity work is unnecessary, as verity will be fully
286 	 * handled at the compression cluster level.
287 	 */
288 	if (all_compressed)
289 		ctx->enabled_steps &= ~STEP_VERITY;
290 }
291 
292 static void f2fs_post_read_work(struct work_struct *work)
293 {
294 	struct bio_post_read_ctx *ctx =
295 		container_of(work, struct bio_post_read_ctx, work);
296 	struct bio *bio = ctx->bio;
297 
298 	if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
299 		f2fs_finish_read_bio(bio, true);
300 		return;
301 	}
302 
303 	if (ctx->enabled_steps & STEP_DECOMPRESS)
304 		f2fs_handle_step_decompress(ctx, true);
305 
306 	f2fs_verify_and_finish_bio(bio, true);
307 }
308 
309 static void f2fs_read_end_io(struct bio *bio)
310 {
311 	struct f2fs_sb_info *sbi = F2FS_F_SB(bio_first_folio_all(bio));
312 	struct bio_post_read_ctx *ctx;
313 	bool intask = in_task() && !irqs_disabled();
314 
315 	iostat_update_and_unbind_ctx(bio);
316 	ctx = bio->bi_private;
317 
318 	if (time_to_inject(sbi, FAULT_READ_IO))
319 		bio->bi_status = BLK_STS_IOERR;
320 
321 	if (bio->bi_status != BLK_STS_OK) {
322 		f2fs_finish_read_bio(bio, intask);
323 		return;
324 	}
325 
326 	if (ctx) {
327 		unsigned int enabled_steps = ctx->enabled_steps &
328 					(STEP_DECRYPT | STEP_DECOMPRESS);
329 
330 		/*
331 		 * If we have only decompression step between decompression and
332 		 * decrypt, we don't need post processing for this.
333 		 */
334 		if (enabled_steps == STEP_DECOMPRESS &&
335 				!f2fs_low_mem_mode(sbi)) {
336 			f2fs_handle_step_decompress(ctx, intask);
337 		} else if (enabled_steps) {
338 			INIT_WORK(&ctx->work, f2fs_post_read_work);
339 			queue_work(ctx->sbi->post_read_wq, &ctx->work);
340 			return;
341 		}
342 	}
343 
344 	f2fs_verify_and_finish_bio(bio, intask);
345 }
346 
347 static void f2fs_write_end_io(struct bio *bio)
348 {
349 	struct f2fs_sb_info *sbi;
350 	struct folio_iter fi;
351 
352 	iostat_update_and_unbind_ctx(bio);
353 	sbi = bio->bi_private;
354 
355 	if (time_to_inject(sbi, FAULT_WRITE_IO))
356 		bio->bi_status = BLK_STS_IOERR;
357 
358 	bio_for_each_folio_all(fi, bio) {
359 		struct folio *folio = fi.folio;
360 		enum count_type type;
361 
362 		if (fscrypt_is_bounce_folio(folio)) {
363 			struct folio *io_folio = folio;
364 
365 			folio = fscrypt_pagecache_folio(io_folio);
366 			fscrypt_free_bounce_page(&io_folio->page);
367 		}
368 
369 #ifdef CONFIG_F2FS_FS_COMPRESSION
370 		if (f2fs_is_compressed_page(folio)) {
371 			f2fs_compress_write_end_io(bio, folio);
372 			continue;
373 		}
374 #endif
375 
376 		type = WB_DATA_TYPE(folio, false);
377 
378 		if (unlikely(bio->bi_status != BLK_STS_OK)) {
379 			mapping_set_error(folio->mapping, -EIO);
380 			if (type == F2FS_WB_CP_DATA)
381 				f2fs_stop_checkpoint(sbi, true,
382 						STOP_CP_REASON_WRITE_FAIL);
383 		}
384 
385 		if (is_node_folio(folio)) {
386 			f2fs_sanity_check_node_footer(sbi, folio,
387 				folio->index, NODE_TYPE_REGULAR, true);
388 			f2fs_bug_on(sbi, folio->index != nid_of_node(folio));
389 		}
390 		if (f2fs_in_warm_node_list(folio))
391 			f2fs_del_fsync_node_entry(sbi, folio);
392 
393 		dec_page_count(sbi, type);
394 
395 		/*
396 		 * we should access sbi before folio_end_writeback() to
397 		 * avoid racing w/ kill_f2fs_super()
398 		 */
399 		if (type == F2FS_WB_CP_DATA && !get_pages(sbi, type) &&
400 				wq_has_sleeper(&sbi->cp_wait))
401 			wake_up(&sbi->cp_wait);
402 
403 		folio_clear_f2fs_gcing(folio);
404 		folio_end_writeback(folio);
405 	}
406 
407 	bio_put(bio);
408 }
409 
410 #ifdef CONFIG_BLK_DEV_ZONED
411 static void f2fs_zone_write_end_io(struct bio *bio)
412 {
413 	struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
414 
415 	bio->bi_private = io->bi_private;
416 	complete(&io->zone_wait);
417 	f2fs_write_end_io(bio);
418 }
419 #endif
420 
421 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
422 		block_t blk_addr, sector_t *sector)
423 {
424 	struct block_device *bdev = sbi->sb->s_bdev;
425 	int i;
426 
427 	if (f2fs_is_multi_device(sbi)) {
428 		for (i = 0; i < sbi->s_ndevs; i++) {
429 			if (FDEV(i).start_blk <= blk_addr &&
430 			    FDEV(i).end_blk >= blk_addr) {
431 				blk_addr -= FDEV(i).start_blk;
432 				bdev = FDEV(i).bdev;
433 				break;
434 			}
435 		}
436 	}
437 
438 	if (sector)
439 		*sector = SECTOR_FROM_BLOCK(blk_addr);
440 	return bdev;
441 }
442 
443 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
444 {
445 	int i;
446 
447 	if (!f2fs_is_multi_device(sbi))
448 		return 0;
449 
450 	for (i = 0; i < sbi->s_ndevs; i++)
451 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
452 			return i;
453 	return 0;
454 }
455 
456 static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
457 {
458 	unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
459 	unsigned int fua_flag, meta_flag, io_flag;
460 	blk_opf_t op_flags = 0;
461 
462 	if (fio->op != REQ_OP_WRITE)
463 		return 0;
464 	if (fio->type == DATA)
465 		io_flag = fio->sbi->data_io_flag;
466 	else if (fio->type == NODE)
467 		io_flag = fio->sbi->node_io_flag;
468 	else
469 		return 0;
470 
471 	fua_flag = io_flag & temp_mask;
472 	meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
473 
474 	/*
475 	 * data/node io flag bits per temp:
476 	 *      REQ_META     |      REQ_FUA      |
477 	 *    5 |    4 |   3 |    2 |    1 |   0 |
478 	 * Cold | Warm | Hot | Cold | Warm | Hot |
479 	 */
480 	if (BIT(fio->temp) & meta_flag)
481 		op_flags |= REQ_META;
482 	if (BIT(fio->temp) & fua_flag)
483 		op_flags |= REQ_FUA;
484 
485 	if (fio->type == DATA &&
486 	    F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
487 		op_flags |= REQ_PRIO;
488 
489 	return op_flags;
490 }
491 
492 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
493 {
494 	struct f2fs_sb_info *sbi = fio->sbi;
495 	struct block_device *bdev;
496 	sector_t sector;
497 	struct bio *bio;
498 
499 	bdev = f2fs_target_device(sbi, fio->new_blkaddr, &sector);
500 	bio = bio_alloc_bioset(bdev, npages,
501 				fio->op | fio->op_flags | f2fs_io_flags(fio),
502 				GFP_NOIO, &f2fs_bioset);
503 	bio->bi_iter.bi_sector = sector;
504 	if (is_read_io(fio->op)) {
505 		bio->bi_end_io = f2fs_read_end_io;
506 		bio->bi_private = NULL;
507 	} else {
508 		bio->bi_end_io = f2fs_write_end_io;
509 		bio->bi_private = sbi;
510 		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
511 						fio->type, fio->temp);
512 	}
513 	iostat_alloc_and_bind_ctx(sbi, bio, NULL);
514 
515 	if (fio->io_wbc)
516 		wbc_init_bio(fio->io_wbc, bio);
517 
518 	return bio;
519 }
520 
521 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
522 				  pgoff_t first_idx,
523 				  const struct f2fs_io_info *fio,
524 				  gfp_t gfp_mask)
525 {
526 	/*
527 	 * The f2fs garbage collector sets ->encrypted_page when it wants to
528 	 * read/write raw data without encryption.
529 	 */
530 	if (!fio || !fio->encrypted_page)
531 		fscrypt_set_bio_crypt_ctx(bio, inode,
532 				(loff_t)first_idx << inode->i_blkbits,
533 				gfp_mask);
534 }
535 
536 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
537 				     pgoff_t next_idx,
538 				     const struct f2fs_io_info *fio)
539 {
540 	/*
541 	 * The f2fs garbage collector sets ->encrypted_page when it wants to
542 	 * read/write raw data without encryption.
543 	 */
544 	if (fio && fio->encrypted_page)
545 		return !bio_has_crypt_ctx(bio);
546 
547 	return fscrypt_mergeable_bio(bio, inode,
548 			(loff_t)next_idx << inode->i_blkbits);
549 }
550 
551 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
552 				 enum page_type type)
553 {
554 	if (!bio)
555 		return;
556 
557 	WARN_ON_ONCE(!is_read_io(bio_op(bio)));
558 	trace_f2fs_submit_read_bio(sbi->sb, type, bio);
559 
560 	iostat_update_submit_ctx(bio, type);
561 	blk_crypto_submit_bio(bio);
562 }
563 
564 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
565 				  enum page_type type)
566 {
567 	WARN_ON_ONCE(is_read_io(bio_op(bio)));
568 	trace_f2fs_submit_write_bio(sbi->sb, type, bio);
569 	iostat_update_submit_ctx(bio, type);
570 	blk_crypto_submit_bio(bio);
571 }
572 
573 static void __submit_merged_bio(struct f2fs_bio_info *io)
574 {
575 	struct f2fs_io_info *fio = &io->fio;
576 
577 	if (!io->bio)
578 		return;
579 
580 	if (is_read_io(fio->op)) {
581 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
582 		f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
583 	} else {
584 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
585 		f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
586 	}
587 	io->bio = NULL;
588 }
589 
590 static bool __has_merged_page(struct bio *bio, struct inode *inode,
591 						struct folio *folio, nid_t ino)
592 {
593 	struct folio_iter fi;
594 
595 	if (!bio)
596 		return false;
597 
598 	if (!inode && !folio && !ino)
599 		return true;
600 
601 	bio_for_each_folio_all(fi, bio) {
602 		struct folio *target = fi.folio;
603 
604 		if (fscrypt_is_bounce_folio(target)) {
605 			target = fscrypt_pagecache_folio(target);
606 			if (IS_ERR(target))
607 				continue;
608 		}
609 		if (f2fs_is_compressed_page(target)) {
610 			target = f2fs_compress_control_folio(target);
611 			if (IS_ERR(target))
612 				continue;
613 		}
614 
615 		if (inode && inode == target->mapping->host)
616 			return true;
617 		if (folio && folio == target)
618 			return true;
619 		if (ino && ino == ino_of_node(target))
620 			return true;
621 	}
622 
623 	return false;
624 }
625 
626 int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
627 {
628 	int i;
629 
630 	for (i = 0; i < NR_PAGE_TYPE; i++) {
631 		int n = (i == META) ? 1 : NR_TEMP_TYPE;
632 		int j;
633 
634 		sbi->write_io[i] = f2fs_kmalloc(sbi,
635 				array_size(n, sizeof(struct f2fs_bio_info)),
636 				GFP_KERNEL);
637 		if (!sbi->write_io[i])
638 			return -ENOMEM;
639 
640 		for (j = HOT; j < n; j++) {
641 			struct f2fs_bio_info *io = &sbi->write_io[i][j];
642 
643 			init_f2fs_rwsem_trace(&io->io_rwsem, sbi,
644 						LOCK_NAME_IO_RWSEM);
645 			io->sbi = sbi;
646 			io->bio = NULL;
647 			io->last_block_in_bio = 0;
648 			spin_lock_init(&io->io_lock);
649 			INIT_LIST_HEAD(&io->io_list);
650 			INIT_LIST_HEAD(&io->bio_list);
651 			init_f2fs_rwsem(&io->bio_list_lock);
652 #ifdef CONFIG_BLK_DEV_ZONED
653 			init_completion(&io->zone_wait);
654 			io->zone_pending_bio = NULL;
655 			io->bi_private = NULL;
656 #endif
657 		}
658 	}
659 
660 	return 0;
661 }
662 
663 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
664 				enum page_type type, enum temp_type temp)
665 {
666 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
667 	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
668 	struct f2fs_lock_context lc;
669 
670 	f2fs_down_write_trace(&io->io_rwsem, &lc);
671 
672 	if (!io->bio)
673 		goto unlock_out;
674 
675 	/* change META to META_FLUSH in the checkpoint procedure */
676 	if (type >= META_FLUSH) {
677 		io->fio.type = META_FLUSH;
678 		io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
679 		if (!test_opt(sbi, NOBARRIER))
680 			io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
681 	}
682 	__submit_merged_bio(io);
683 unlock_out:
684 	f2fs_up_write_trace(&io->io_rwsem, &lc);
685 }
686 
687 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
688 				struct inode *inode, struct folio *folio,
689 				nid_t ino, enum page_type type, bool writeback)
690 {
691 	enum temp_type temp;
692 	bool ret = true;
693 	bool force = !inode && !folio && !ino;
694 
695 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
696 		if (!force)	{
697 			enum page_type btype = PAGE_TYPE_OF_BIO(type);
698 			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
699 			struct f2fs_lock_context lc;
700 
701 			f2fs_down_read_trace(&io->io_rwsem, &lc);
702 			ret = __has_merged_page(io->bio, inode, folio, ino);
703 			f2fs_up_read_trace(&io->io_rwsem, &lc);
704 		}
705 		if (ret) {
706 			__f2fs_submit_merged_write(sbi, type, temp);
707 			/*
708 			 * For waitting writebck case, if the bio owned by the
709 			 * folio is already submitted, we do not need to submit
710 			 * other types of bios.
711 			 */
712 			if (writeback)
713 				break;
714 		}
715 
716 		/* TODO: use HOT temp only for meta pages now. */
717 		if (type >= META)
718 			break;
719 	}
720 }
721 
722 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
723 {
724 	__submit_merged_write_cond(sbi, NULL, NULL, 0, type, false);
725 }
726 
727 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
728 				struct inode *inode, struct folio *folio,
729 				nid_t ino, enum page_type type)
730 {
731 	__submit_merged_write_cond(sbi, inode, folio, ino, type, false);
732 }
733 
734 void f2fs_submit_merged_write_folio(struct f2fs_sb_info *sbi,
735 				struct folio *folio, enum page_type type)
736 {
737 	__submit_merged_write_cond(sbi, NULL, folio, 0, type, true);
738 }
739 
740 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
741 {
742 	f2fs_submit_merged_write(sbi, DATA);
743 	f2fs_submit_merged_write(sbi, NODE);
744 	f2fs_submit_merged_write(sbi, META);
745 }
746 
747 /*
748  * Fill the locked page with data located in the block address.
749  * A caller needs to unlock the page on failure.
750  */
751 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
752 {
753 	struct bio *bio;
754 	struct folio *fio_folio = fio->folio;
755 	struct folio *data_folio = fio->encrypted_page ?
756 			page_folio(fio->encrypted_page) : fio_folio;
757 
758 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
759 			fio->is_por ? META_POR : (__is_meta_io(fio) ?
760 			META_GENERIC : DATA_GENERIC_ENHANCE)))
761 		return -EFSCORRUPTED;
762 
763 	trace_f2fs_submit_folio_bio(data_folio, fio);
764 
765 	/* Allocate a new bio */
766 	bio = __bio_alloc(fio, 1);
767 
768 	f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
769 			fio_folio->index, fio, GFP_NOIO);
770 	bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0);
771 
772 	if (fio->io_wbc && !is_read_io(fio->op))
773 		wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE);
774 
775 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
776 			__read_io_type(data_folio) : WB_DATA_TYPE(fio->folio, false));
777 
778 	if (is_read_io(bio_op(bio)))
779 		f2fs_submit_read_bio(fio->sbi, bio, fio->type);
780 	else
781 		f2fs_submit_write_bio(fio->sbi, bio, fio->type);
782 	return 0;
783 }
784 
785 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
786 				block_t last_blkaddr, block_t cur_blkaddr)
787 {
788 	if (unlikely(sbi->max_io_bytes &&
789 			bio->bi_iter.bi_size >= sbi->max_io_bytes))
790 		return false;
791 	if (last_blkaddr + 1 != cur_blkaddr)
792 		return false;
793 	return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
794 }
795 
796 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
797 						struct f2fs_io_info *fio)
798 {
799 	blk_opf_t mask = ~(REQ_PREFLUSH | REQ_FUA);
800 
801 	if (io->fio.op != fio->op)
802 		return false;
803 	return (io->fio.op_flags & mask) == (fio->op_flags & mask);
804 }
805 
806 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
807 					struct f2fs_bio_info *io,
808 					struct f2fs_io_info *fio,
809 					block_t last_blkaddr,
810 					block_t cur_blkaddr)
811 {
812 	if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
813 		return false;
814 	return io_type_is_mergeable(io, fio);
815 }
816 
817 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
818 				struct folio *folio, enum temp_type temp)
819 {
820 	struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
821 	struct bio_entry *be;
822 
823 	be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
824 	be->bio = bio;
825 	bio_get(bio);
826 
827 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
828 
829 	f2fs_down_write(&io->bio_list_lock);
830 	list_add_tail(&be->list, &io->bio_list);
831 	f2fs_up_write(&io->bio_list_lock);
832 }
833 
834 static void del_bio_entry(struct bio_entry *be)
835 {
836 	list_del(&be->list);
837 	kmem_cache_free(bio_entry_slab, be);
838 }
839 
840 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
841 							struct folio *folio)
842 {
843 	struct folio *fio_folio = fio->folio;
844 	struct f2fs_sb_info *sbi = fio->sbi;
845 	enum temp_type temp;
846 	bool found = false;
847 	int ret = -EAGAIN;
848 
849 	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
850 		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
851 		struct list_head *head = &io->bio_list;
852 		struct bio_entry *be;
853 
854 		f2fs_down_write(&io->bio_list_lock);
855 		list_for_each_entry(be, head, list) {
856 			if (be->bio != *bio)
857 				continue;
858 
859 			found = true;
860 
861 			f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
862 							    *fio->last_block,
863 							    fio->new_blkaddr));
864 			if (f2fs_crypt_mergeable_bio(*bio,
865 					fio_folio->mapping->host,
866 					fio_folio->index, fio) &&
867 			    bio_add_folio(*bio, folio, folio_size(folio), 0)) {
868 				ret = 0;
869 				break;
870 			}
871 
872 			/* page can't be merged into bio; submit the bio */
873 			del_bio_entry(be);
874 			f2fs_submit_write_bio(sbi, *bio, DATA);
875 			break;
876 		}
877 		f2fs_up_write(&io->bio_list_lock);
878 	}
879 
880 	if (ret) {
881 		bio_put(*bio);
882 		*bio = NULL;
883 	}
884 
885 	return ret;
886 }
887 
888 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
889 					struct bio **bio, struct folio *folio)
890 {
891 	enum temp_type temp;
892 	bool found = false;
893 	struct bio *target = bio ? *bio : NULL;
894 
895 	f2fs_bug_on(sbi, !target && !folio);
896 
897 	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
898 		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
899 		struct list_head *head = &io->bio_list;
900 		struct bio_entry *be;
901 
902 		if (list_empty(head))
903 			continue;
904 
905 		f2fs_down_read(&io->bio_list_lock);
906 		list_for_each_entry(be, head, list) {
907 			if (target)
908 				found = (target == be->bio);
909 			else
910 				found = __has_merged_page(be->bio, NULL,
911 							folio, 0);
912 			if (found)
913 				break;
914 		}
915 		f2fs_up_read(&io->bio_list_lock);
916 
917 		if (!found)
918 			continue;
919 
920 		found = false;
921 
922 		f2fs_down_write(&io->bio_list_lock);
923 		list_for_each_entry(be, head, list) {
924 			if (target)
925 				found = (target == be->bio);
926 			else
927 				found = __has_merged_page(be->bio, NULL,
928 							folio, 0);
929 			if (found) {
930 				target = be->bio;
931 				del_bio_entry(be);
932 				break;
933 			}
934 		}
935 		f2fs_up_write(&io->bio_list_lock);
936 	}
937 
938 	if (found)
939 		f2fs_submit_write_bio(sbi, target, DATA);
940 	if (bio && *bio) {
941 		bio_put(*bio);
942 		*bio = NULL;
943 	}
944 }
945 
946 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
947 {
948 	struct bio *bio = *fio->bio;
949 	struct folio *data_folio = fio->encrypted_page ?
950 			page_folio(fio->encrypted_page) : fio->folio;
951 	struct folio *folio = fio->folio;
952 
953 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
954 			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
955 		return -EFSCORRUPTED;
956 
957 	trace_f2fs_submit_folio_bio(data_folio, fio);
958 
959 	if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
960 						fio->new_blkaddr))
961 		f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
962 alloc_new:
963 	if (!bio) {
964 		bio = __bio_alloc(fio, BIO_MAX_VECS);
965 		f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
966 				folio->index, fio, GFP_NOIO);
967 
968 		add_bio_entry(fio->sbi, bio, data_folio, fio->temp);
969 	} else {
970 		if (add_ipu_page(fio, &bio, data_folio))
971 			goto alloc_new;
972 	}
973 
974 	if (fio->io_wbc)
975 		wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio));
976 
977 	inc_page_count(fio->sbi, WB_DATA_TYPE(folio, false));
978 
979 	*fio->last_block = fio->new_blkaddr;
980 	*fio->bio = bio;
981 
982 	return 0;
983 }
984 
985 #ifdef CONFIG_BLK_DEV_ZONED
986 static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
987 {
988 	struct block_device *bdev = sbi->sb->s_bdev;
989 	int devi = 0;
990 
991 	if (f2fs_is_multi_device(sbi)) {
992 		devi = f2fs_target_device_index(sbi, blkaddr);
993 		if (blkaddr < FDEV(devi).start_blk ||
994 		    blkaddr > FDEV(devi).end_blk) {
995 			f2fs_err(sbi, "Invalid block %x", blkaddr);
996 			return false;
997 		}
998 		blkaddr -= FDEV(devi).start_blk;
999 		bdev = FDEV(devi).bdev;
1000 	}
1001 	return bdev_is_zoned(bdev) &&
1002 		f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
1003 		(blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
1004 }
1005 #endif
1006 
1007 void f2fs_submit_page_write(struct f2fs_io_info *fio)
1008 {
1009 	struct f2fs_sb_info *sbi = fio->sbi;
1010 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
1011 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
1012 	struct folio *bio_folio;
1013 	struct f2fs_lock_context lc;
1014 	enum count_type type;
1015 
1016 	f2fs_bug_on(sbi, is_read_io(fio->op));
1017 
1018 	f2fs_down_write_trace(&io->io_rwsem, &lc);
1019 next:
1020 #ifdef CONFIG_BLK_DEV_ZONED
1021 	if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
1022 		wait_for_completion_io(&io->zone_wait);
1023 		bio_put(io->zone_pending_bio);
1024 		io->zone_pending_bio = NULL;
1025 		io->bi_private = NULL;
1026 	}
1027 #endif
1028 
1029 	if (fio->in_list) {
1030 		spin_lock(&io->io_lock);
1031 		if (list_empty(&io->io_list)) {
1032 			spin_unlock(&io->io_lock);
1033 			goto out;
1034 		}
1035 		fio = list_first_entry(&io->io_list,
1036 						struct f2fs_io_info, list);
1037 		list_del(&fio->list);
1038 		spin_unlock(&io->io_lock);
1039 	}
1040 
1041 	verify_fio_blkaddr(fio);
1042 
1043 	if (fio->encrypted_page)
1044 		bio_folio = page_folio(fio->encrypted_page);
1045 	else if (fio->compressed_page)
1046 		bio_folio = page_folio(fio->compressed_page);
1047 	else
1048 		bio_folio = fio->folio;
1049 
1050 	/* set submitted = true as a return value */
1051 	fio->submitted = 1;
1052 
1053 	type = WB_DATA_TYPE(bio_folio, fio->compressed_page);
1054 	inc_page_count(sbi, type);
1055 
1056 	if (io->bio &&
1057 	    (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
1058 			      fio->new_blkaddr) ||
1059 	     !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio),
1060 				bio_folio->index, fio)))
1061 		__submit_merged_bio(io);
1062 alloc_new:
1063 	if (io->bio == NULL) {
1064 		io->bio = __bio_alloc(fio, BIO_MAX_VECS);
1065 		f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio),
1066 				bio_folio->index, fio, GFP_NOIO);
1067 		io->fio = *fio;
1068 	}
1069 
1070 	if (!bio_add_folio(io->bio, bio_folio, folio_size(bio_folio), 0)) {
1071 		__submit_merged_bio(io);
1072 		goto alloc_new;
1073 	}
1074 
1075 	if (fio->io_wbc)
1076 		wbc_account_cgroup_owner(fio->io_wbc, fio->folio,
1077 				folio_size(fio->folio));
1078 
1079 	io->last_block_in_bio = fio->new_blkaddr;
1080 
1081 	trace_f2fs_submit_folio_write(fio->folio, fio);
1082 #ifdef CONFIG_BLK_DEV_ZONED
1083 	if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
1084 			is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1085 		bio_get(io->bio);
1086 		reinit_completion(&io->zone_wait);
1087 		io->bi_private = io->bio->bi_private;
1088 		io->bio->bi_private = io;
1089 		io->bio->bi_end_io = f2fs_zone_write_end_io;
1090 		io->zone_pending_bio = io->bio;
1091 		__submit_merged_bio(io);
1092 	}
1093 #endif
1094 	if (fio->in_list)
1095 		goto next;
1096 out:
1097 	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1098 				!f2fs_is_checkpoint_ready(sbi))
1099 		__submit_merged_bio(io);
1100 	f2fs_up_write_trace(&io->io_rwsem, &lc);
1101 }
1102 
1103 static struct bio *f2fs_grab_read_bio(struct inode *inode,
1104 				      struct fsverity_info *vi, block_t blkaddr,
1105 				      unsigned nr_pages, blk_opf_t op_flag,
1106 				      pgoff_t first_idx, bool for_write)
1107 {
1108 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1109 	struct bio *bio;
1110 	struct bio_post_read_ctx *ctx = NULL;
1111 	unsigned int post_read_steps = 0;
1112 	sector_t sector;
1113 	struct block_device *bdev = f2fs_target_device(sbi, blkaddr, &sector);
1114 
1115 	bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1116 			       REQ_OP_READ | op_flag,
1117 			       for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
1118 	bio->bi_iter.bi_sector = sector;
1119 	f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1120 	bio->bi_end_io = f2fs_read_end_io;
1121 
1122 	if (fscrypt_inode_uses_fs_layer_crypto(inode))
1123 		post_read_steps |= STEP_DECRYPT;
1124 
1125 	if (vi)
1126 		post_read_steps |= STEP_VERITY;
1127 
1128 	/*
1129 	 * STEP_DECOMPRESS is handled specially, since a compressed file might
1130 	 * contain both compressed and uncompressed clusters.  We'll allocate a
1131 	 * bio_post_read_ctx if the file is compressed, but the caller is
1132 	 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1133 	 */
1134 
1135 	if (post_read_steps || f2fs_compressed_file(inode)) {
1136 		/* Due to the mempool, this never fails. */
1137 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1138 		ctx->bio = bio;
1139 		ctx->sbi = sbi;
1140 		ctx->vi = vi;
1141 		ctx->enabled_steps = post_read_steps;
1142 		ctx->fs_blkaddr = blkaddr;
1143 		ctx->decompression_attempted = false;
1144 		bio->bi_private = ctx;
1145 	}
1146 	iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1147 
1148 	return bio;
1149 }
1150 
1151 /* This can handle encryption stuffs */
1152 static void f2fs_submit_page_read(struct inode *inode, struct fsverity_info *vi,
1153 				  struct folio *folio, block_t blkaddr,
1154 				  blk_opf_t op_flags, bool for_write)
1155 {
1156 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1157 	struct bio *bio;
1158 
1159 	bio = f2fs_grab_read_bio(inode, vi, blkaddr, 1, op_flags, folio->index,
1160 				 for_write);
1161 
1162 	/* wait for GCed page writeback via META_MAPPING */
1163 	f2fs_wait_on_block_writeback(inode, blkaddr);
1164 
1165 	if (!bio_add_folio(bio, folio, PAGE_SIZE, 0))
1166 		f2fs_bug_on(sbi, 1);
1167 
1168 	inc_page_count(sbi, F2FS_RD_DATA);
1169 	f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
1170 	f2fs_submit_read_bio(sbi, bio, DATA);
1171 }
1172 
1173 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1174 {
1175 	__le32 *addr = get_dnode_addr(dn->inode, dn->node_folio);
1176 
1177 	dn->data_blkaddr = blkaddr;
1178 	addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1179 }
1180 
1181 /*
1182  * Lock ordering for the change of data block address:
1183  * ->data_page
1184  *  ->node_folio
1185  *    update block addresses in the node page
1186  */
1187 void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1188 {
1189 	f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
1190 	__set_data_blkaddr(dn, blkaddr);
1191 	if (folio_mark_dirty(dn->node_folio))
1192 		dn->node_changed = true;
1193 }
1194 
1195 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1196 {
1197 	f2fs_set_data_blkaddr(dn, blkaddr);
1198 	f2fs_update_read_extent_cache(dn);
1199 }
1200 
1201 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1202 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1203 {
1204 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1205 	int err;
1206 
1207 	if (!count)
1208 		return 0;
1209 
1210 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1211 		return -EPERM;
1212 	err = inc_valid_block_count(sbi, dn->inode, &count, true);
1213 	if (unlikely(err))
1214 		return err;
1215 
1216 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1217 						dn->ofs_in_node, count);
1218 
1219 	f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true);
1220 
1221 	for (; count > 0; dn->ofs_in_node++) {
1222 		block_t blkaddr = f2fs_data_blkaddr(dn);
1223 
1224 		if (blkaddr == NULL_ADDR) {
1225 			__set_data_blkaddr(dn, NEW_ADDR);
1226 			count--;
1227 		}
1228 	}
1229 
1230 	if (folio_mark_dirty(dn->node_folio))
1231 		dn->node_changed = true;
1232 	return 0;
1233 }
1234 
1235 /* Should keep dn->ofs_in_node unchanged */
1236 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1237 {
1238 	unsigned int ofs_in_node = dn->ofs_in_node;
1239 	int ret;
1240 
1241 	ret = f2fs_reserve_new_blocks(dn, 1);
1242 	dn->ofs_in_node = ofs_in_node;
1243 	return ret;
1244 }
1245 
1246 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1247 {
1248 	bool need_put = dn->inode_folio ? false : true;
1249 	int err;
1250 
1251 	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1252 	if (err)
1253 		return err;
1254 
1255 	if (dn->data_blkaddr == NULL_ADDR)
1256 		err = f2fs_reserve_new_block(dn);
1257 	if (err || need_put)
1258 		f2fs_put_dnode(dn);
1259 	return err;
1260 }
1261 
1262 static inline struct fsverity_info *f2fs_need_verity(const struct inode *inode,
1263 						     pgoff_t idx)
1264 {
1265 	if (idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
1266 		return fsverity_get_info(inode);
1267 	return NULL;
1268 }
1269 
1270 struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index,
1271 		blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs)
1272 {
1273 	struct address_space *mapping = inode->i_mapping;
1274 	struct dnode_of_data dn;
1275 	struct folio *folio;
1276 	int err;
1277 retry:
1278 	folio = f2fs_grab_cache_folio(mapping, index, for_write);
1279 	if (IS_ERR(folio))
1280 		return folio;
1281 
1282 	if (folio_test_large(folio)) {
1283 		pgoff_t folio_index = mapping_align_index(mapping, index);
1284 
1285 		f2fs_folio_put(folio, true);
1286 		invalidate_inode_pages2_range(mapping, folio_index,
1287 				folio_index + folio_nr_pages(folio) - 1);
1288 		f2fs_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
1289 		goto retry;
1290 	}
1291 
1292 	if (f2fs_lookup_read_extent_cache_block(inode, index,
1293 						&dn.data_blkaddr)) {
1294 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1295 						DATA_GENERIC_ENHANCE_READ)) {
1296 			err = -EFSCORRUPTED;
1297 			goto put_err;
1298 		}
1299 		goto got_it;
1300 	}
1301 
1302 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1303 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1304 	if (err) {
1305 		if (err == -ENOENT && next_pgofs)
1306 			*next_pgofs = f2fs_get_next_page_offset(&dn, index);
1307 		goto put_err;
1308 	}
1309 	f2fs_put_dnode(&dn);
1310 
1311 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1312 		err = -ENOENT;
1313 		if (next_pgofs)
1314 			*next_pgofs = index + 1;
1315 		goto put_err;
1316 	}
1317 	if (dn.data_blkaddr != NEW_ADDR &&
1318 			!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1319 						dn.data_blkaddr,
1320 						DATA_GENERIC_ENHANCE)) {
1321 		err = -EFSCORRUPTED;
1322 		goto put_err;
1323 	}
1324 got_it:
1325 	if (folio_test_uptodate(folio)) {
1326 		folio_unlock(folio);
1327 		return folio;
1328 	}
1329 
1330 	/*
1331 	 * A new dentry page is allocated but not able to be written, since its
1332 	 * new inode page couldn't be allocated due to -ENOSPC.
1333 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
1334 	 * see, f2fs_add_link -> f2fs_get_new_data_folio ->
1335 	 * f2fs_init_inode_metadata.
1336 	 */
1337 	if (dn.data_blkaddr == NEW_ADDR) {
1338 		folio_zero_segment(folio, 0, folio_size(folio));
1339 		if (!folio_test_uptodate(folio))
1340 			folio_mark_uptodate(folio);
1341 		folio_unlock(folio);
1342 		return folio;
1343 	}
1344 
1345 	f2fs_submit_page_read(inode, f2fs_need_verity(inode, folio->index),
1346 			      folio, dn.data_blkaddr, op_flags, for_write);
1347 	return folio;
1348 
1349 put_err:
1350 	f2fs_folio_put(folio, true);
1351 	return ERR_PTR(err);
1352 }
1353 
1354 struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index,
1355 					pgoff_t *next_pgofs)
1356 {
1357 	struct address_space *mapping = inode->i_mapping;
1358 	struct folio *folio;
1359 
1360 	folio = f2fs_filemap_get_folio(mapping, index, FGP_ACCESSED, 0);
1361 	if (IS_ERR(folio))
1362 		goto read;
1363 	if (folio_test_uptodate(folio))
1364 		return folio;
1365 	f2fs_folio_put(folio, false);
1366 
1367 read:
1368 	folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs);
1369 	if (IS_ERR(folio))
1370 		return folio;
1371 
1372 	if (folio_test_uptodate(folio))
1373 		return folio;
1374 
1375 	folio_wait_locked(folio);
1376 	if (unlikely(!folio_test_uptodate(folio))) {
1377 		f2fs_folio_put(folio, false);
1378 		return ERR_PTR(-EIO);
1379 	}
1380 	return folio;
1381 }
1382 
1383 /*
1384  * If it tries to access a hole, return an error.
1385  * Because, the callers, functions in dir.c and GC, should be able to know
1386  * whether this page exists or not.
1387  */
1388 struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index,
1389 							bool for_write)
1390 {
1391 	struct address_space *mapping = inode->i_mapping;
1392 	struct folio *folio;
1393 
1394 	folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL);
1395 	if (IS_ERR(folio))
1396 		return folio;
1397 
1398 	/* wait for read completion */
1399 	folio_lock(folio);
1400 	if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) {
1401 		f2fs_folio_put(folio, true);
1402 		return ERR_PTR(-EIO);
1403 	}
1404 	return folio;
1405 }
1406 
1407 /*
1408  * Caller ensures that this data page is never allocated.
1409  * A new zero-filled data page is allocated in the page cache.
1410  *
1411  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1412  * f2fs_unlock_op().
1413  * Note that, ifolio is set only by make_empty_dir, and if any error occur,
1414  * ifolio should be released by this function.
1415  */
1416 struct folio *f2fs_get_new_data_folio(struct inode *inode,
1417 		struct folio *ifolio, pgoff_t index, bool new_i_size)
1418 {
1419 	struct address_space *mapping = inode->i_mapping;
1420 	struct folio *folio;
1421 	struct dnode_of_data dn;
1422 	int err;
1423 
1424 	folio = f2fs_grab_cache_folio(mapping, index, true);
1425 	if (IS_ERR(folio)) {
1426 		/*
1427 		 * before exiting, we should make sure ifolio will be released
1428 		 * if any error occur.
1429 		 */
1430 		f2fs_folio_put(ifolio, true);
1431 		return ERR_PTR(-ENOMEM);
1432 	}
1433 
1434 	set_new_dnode(&dn, inode, ifolio, NULL, 0);
1435 	err = f2fs_reserve_block(&dn, index);
1436 	if (err) {
1437 		f2fs_folio_put(folio, true);
1438 		return ERR_PTR(err);
1439 	}
1440 	if (!ifolio)
1441 		f2fs_put_dnode(&dn);
1442 
1443 	if (folio_test_uptodate(folio))
1444 		goto got_it;
1445 
1446 	if (dn.data_blkaddr == NEW_ADDR) {
1447 		folio_zero_segment(folio, 0, folio_size(folio));
1448 		if (!folio_test_uptodate(folio))
1449 			folio_mark_uptodate(folio);
1450 	} else {
1451 		f2fs_folio_put(folio, true);
1452 
1453 		/* if ifolio exists, blkaddr should be NEW_ADDR */
1454 		f2fs_bug_on(F2FS_I_SB(inode), ifolio);
1455 		folio = f2fs_get_lock_data_folio(inode, index, true);
1456 		if (IS_ERR(folio))
1457 			return folio;
1458 	}
1459 got_it:
1460 	if (new_i_size && i_size_read(inode) <
1461 				((loff_t)(index + 1) << PAGE_SHIFT))
1462 		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1463 	return folio;
1464 }
1465 
1466 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1467 {
1468 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1469 	struct f2fs_summary sum;
1470 	struct node_info ni;
1471 	block_t old_blkaddr;
1472 	blkcnt_t count = 1;
1473 	int err;
1474 
1475 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1476 		return -EPERM;
1477 
1478 	err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
1479 	if (err)
1480 		return err;
1481 
1482 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
1483 	if (dn->data_blkaddr == NULL_ADDR) {
1484 		err = inc_valid_block_count(sbi, dn->inode, &count, true);
1485 		if (unlikely(err))
1486 			return err;
1487 	}
1488 
1489 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1490 	old_blkaddr = dn->data_blkaddr;
1491 	err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr,
1492 				&dn->data_blkaddr, &sum, seg_type, NULL);
1493 	if (err)
1494 		return err;
1495 
1496 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1497 		f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1);
1498 
1499 	f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1500 	return 0;
1501 }
1502 
1503 static void f2fs_map_lock(struct f2fs_sb_info *sbi,
1504 				struct f2fs_lock_context *lc,
1505 				int flag)
1506 {
1507 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
1508 		f2fs_down_read_trace(&sbi->node_change, lc);
1509 	else
1510 		f2fs_lock_op(sbi, lc);
1511 }
1512 
1513 static void f2fs_map_unlock(struct f2fs_sb_info *sbi,
1514 				struct f2fs_lock_context *lc,
1515 				int flag)
1516 {
1517 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
1518 		f2fs_up_read_trace(&sbi->node_change, lc);
1519 	else
1520 		f2fs_unlock_op(sbi, lc);
1521 }
1522 
1523 int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
1524 {
1525 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1526 	struct f2fs_lock_context lc;
1527 	int err = 0;
1528 
1529 	f2fs_map_lock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
1530 	if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1531 						&dn->data_blkaddr))
1532 		err = f2fs_reserve_block(dn, index);
1533 	f2fs_map_unlock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
1534 
1535 	return err;
1536 }
1537 
1538 static int f2fs_map_no_dnode(struct inode *inode,
1539 		struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1540 		pgoff_t pgoff)
1541 {
1542 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1543 
1544 	/*
1545 	 * There is one exceptional case that read_node_page() may return
1546 	 * -ENOENT due to filesystem has been shutdown or cp_error, return
1547 	 * -EIO in that case.
1548 	 */
1549 	if (map->m_may_create &&
1550 	    (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
1551 		return -EIO;
1552 
1553 	if (map->m_next_pgofs)
1554 		*map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1555 	if (map->m_next_extent)
1556 		*map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1557 	return 0;
1558 }
1559 
1560 static bool f2fs_map_blocks_cached(struct inode *inode,
1561 		struct f2fs_map_blocks *map, int flag)
1562 {
1563 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1564 	unsigned int maxblocks = map->m_len;
1565 	pgoff_t pgoff = (pgoff_t)map->m_lblk;
1566 	struct extent_info ei = {};
1567 
1568 	if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
1569 		return false;
1570 
1571 	map->m_pblk = ei.blk + pgoff - ei.fofs;
1572 	map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
1573 	map->m_flags = F2FS_MAP_MAPPED;
1574 	if (map->m_next_extent)
1575 		*map->m_next_extent = pgoff + map->m_len;
1576 
1577 	/* for hardware encryption, but to avoid potential issue in future */
1578 	if (flag == F2FS_GET_BLOCK_DIO)
1579 		f2fs_wait_on_block_writeback_range(inode,
1580 					map->m_pblk, map->m_len);
1581 
1582 	map->m_multidev_dio = f2fs_allow_multi_device_dio(sbi, flag);
1583 	if (map->m_multidev_dio) {
1584 		int bidx = f2fs_target_device_index(sbi, map->m_pblk);
1585 		struct f2fs_dev_info *dev = &sbi->devs[bidx];
1586 
1587 		map->m_bdev = dev->bdev;
1588 		map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
1589 		map->m_pblk -= dev->start_blk;
1590 	} else {
1591 		map->m_bdev = inode->i_sb->s_bdev;
1592 	}
1593 	return true;
1594 }
1595 
1596 static bool map_is_mergeable(struct f2fs_sb_info *sbi,
1597 				struct f2fs_map_blocks *map,
1598 				block_t blkaddr, int flag, int bidx,
1599 				int ofs)
1600 {
1601 	if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1602 		return false;
1603 	if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs))
1604 		return true;
1605 	if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR)
1606 		return true;
1607 	if (flag == F2FS_GET_BLOCK_PRE_DIO)
1608 		return true;
1609 	if (flag == F2FS_GET_BLOCK_DIO &&
1610 		map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR)
1611 		return true;
1612 	return false;
1613 }
1614 
1615 /*
1616  * f2fs_map_blocks() tries to find or build mapping relationship which
1617  * maps continuous logical blocks to physical blocks, and return such
1618  * info via f2fs_map_blocks structure.
1619  */
1620 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1621 {
1622 	unsigned int maxblocks = map->m_len;
1623 	struct dnode_of_data dn;
1624 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1625 	struct f2fs_lock_context lc;
1626 	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1627 	pgoff_t pgofs, end_offset, end;
1628 	int err = 0, ofs = 1;
1629 	unsigned int ofs_in_node, last_ofs_in_node;
1630 	blkcnt_t prealloc;
1631 	block_t blkaddr;
1632 	unsigned int start_pgofs;
1633 	int bidx = 0;
1634 	bool is_hole;
1635 	bool lfs_dio_write;
1636 
1637 	if (!maxblocks)
1638 		return 0;
1639 
1640 	lfs_dio_write = (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
1641 				map->m_may_create);
1642 
1643 	if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag)) {
1644 		struct extent_info ei;
1645 
1646 		/*
1647 		 * 1. If map->m_multidev_dio is true, map->m_pblk cannot be
1648 		 * waitted by f2fs_wait_on_block_writeback_range() and are not
1649 		 * mergeable.
1650 		 * 2. If pgofs hits the read extent cache, it means the mapping
1651 		 * is already cached in the extent cache, but it is not
1652 		 * mergeable, and there is no need to query the mapping again
1653 		 * via f2fs_get_dnode_of_data().
1654 		 */
1655 		pgofs =	(pgoff_t)map->m_lblk + map->m_len;
1656 		if (map->m_len == maxblocks ||
1657 			map->m_multidev_dio ||
1658 			f2fs_lookup_read_extent_cache(inode, pgofs, &ei))
1659 			goto out;
1660 		ofs = map->m_len;
1661 		goto map_more;
1662 	}
1663 
1664 	map->m_bdev = inode->i_sb->s_bdev;
1665 	map->m_multidev_dio =
1666 		f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1667 
1668 	map->m_len = 0;
1669 	map->m_flags = 0;
1670 
1671 	/* it only supports block size == page size */
1672 	pgofs =	(pgoff_t)map->m_lblk;
1673 map_more:
1674 	end = (pgoff_t)map->m_lblk + maxblocks;
1675 
1676 	if (flag == F2FS_GET_BLOCK_PRECACHE)
1677 		mode = LOOKUP_NODE_RA;
1678 
1679 next_dnode:
1680 	if (map->m_may_create) {
1681 		if (f2fs_lfs_mode(sbi))
1682 			f2fs_balance_fs(sbi, true);
1683 		f2fs_map_lock(sbi, &lc, flag);
1684 	}
1685 
1686 	/* When reading holes, we need its node page */
1687 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1688 	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1689 	if (err) {
1690 		if (flag == F2FS_GET_BLOCK_BMAP)
1691 			map->m_pblk = 0;
1692 		if (err == -ENOENT)
1693 			err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1694 		goto unlock_out;
1695 	}
1696 
1697 	start_pgofs = pgofs;
1698 	prealloc = 0;
1699 	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1700 	end_offset = ADDRS_PER_PAGE(dn.node_folio, inode);
1701 
1702 next_block:
1703 	blkaddr = f2fs_data_blkaddr(&dn);
1704 	is_hole = !__is_valid_data_blkaddr(blkaddr);
1705 	if (!is_hole &&
1706 	    !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1707 		err = -EFSCORRUPTED;
1708 		goto sync_out;
1709 	}
1710 
1711 	/* use out-place-update for direct IO under LFS mode */
1712 	if (map->m_may_create && (is_hole ||
1713 		(flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
1714 		!f2fs_is_pinned_file(inode) && map->m_last_pblk != blkaddr))) {
1715 		if (unlikely(f2fs_cp_error(sbi))) {
1716 			err = -EIO;
1717 			goto sync_out;
1718 		}
1719 
1720 		switch (flag) {
1721 		case F2FS_GET_BLOCK_PRE_AIO:
1722 			if (blkaddr == NULL_ADDR) {
1723 				prealloc++;
1724 				last_ofs_in_node = dn.ofs_in_node;
1725 			}
1726 			break;
1727 		case F2FS_GET_BLOCK_PRE_DIO:
1728 		case F2FS_GET_BLOCK_DIO:
1729 			err = __allocate_data_block(&dn, map->m_seg_type);
1730 			if (err)
1731 				goto sync_out;
1732 			if (flag == F2FS_GET_BLOCK_PRE_DIO)
1733 				file_need_truncate(inode);
1734 			set_inode_flag(inode, FI_APPEND_WRITE);
1735 			break;
1736 		default:
1737 			WARN_ON_ONCE(1);
1738 			err = -EIO;
1739 			goto sync_out;
1740 		}
1741 
1742 		blkaddr = dn.data_blkaddr;
1743 		if (is_hole)
1744 			map->m_flags |= F2FS_MAP_NEW;
1745 	} else if (is_hole) {
1746 		if (f2fs_compressed_file(inode) &&
1747 		    f2fs_sanity_check_cluster(&dn)) {
1748 			err = -EFSCORRUPTED;
1749 			f2fs_handle_error(sbi,
1750 					ERROR_CORRUPTED_CLUSTER);
1751 			goto sync_out;
1752 		}
1753 
1754 		switch (flag) {
1755 		case F2FS_GET_BLOCK_PRECACHE:
1756 			goto sync_out;
1757 		case F2FS_GET_BLOCK_BMAP:
1758 			map->m_pblk = 0;
1759 			goto sync_out;
1760 		case F2FS_GET_BLOCK_FIEMAP:
1761 			if (blkaddr == NULL_ADDR) {
1762 				if (map->m_next_pgofs)
1763 					*map->m_next_pgofs = pgofs + 1;
1764 				goto sync_out;
1765 			}
1766 			break;
1767 		case F2FS_GET_BLOCK_DIO:
1768 			if (map->m_next_pgofs)
1769 				*map->m_next_pgofs = pgofs + 1;
1770 			break;
1771 		default:
1772 			/* for defragment case */
1773 			if (map->m_next_pgofs)
1774 				*map->m_next_pgofs = pgofs + 1;
1775 			goto sync_out;
1776 		}
1777 	}
1778 
1779 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
1780 		goto skip;
1781 
1782 	if (map->m_multidev_dio)
1783 		bidx = f2fs_target_device_index(sbi, blkaddr);
1784 
1785 	if (map->m_len == 0) {
1786 		/* reserved delalloc block should be mapped for fiemap. */
1787 		if (blkaddr == NEW_ADDR)
1788 			map->m_flags |= F2FS_MAP_DELALLOC;
1789 		/* DIO READ and hole case, should not map the blocks. */
1790 		if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create))
1791 			map->m_flags |= F2FS_MAP_MAPPED;
1792 
1793 		map->m_pblk = blkaddr;
1794 		map->m_len = 1;
1795 
1796 		if (map->m_multidev_dio)
1797 			map->m_bdev = FDEV(bidx).bdev;
1798 
1799 		if (lfs_dio_write)
1800 			map->m_last_pblk = NULL_ADDR;
1801 	} else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
1802 		ofs++;
1803 		map->m_len++;
1804 	} else {
1805 		if (lfs_dio_write && !f2fs_is_pinned_file(inode))
1806 			map->m_last_pblk = blkaddr;
1807 		goto sync_out;
1808 	}
1809 
1810 skip:
1811 	dn.ofs_in_node++;
1812 	pgofs++;
1813 
1814 	/* preallocate blocks in batch for one dnode page */
1815 	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1816 			(pgofs == end || dn.ofs_in_node == end_offset)) {
1817 
1818 		dn.ofs_in_node = ofs_in_node;
1819 		err = f2fs_reserve_new_blocks(&dn, prealloc);
1820 		if (err)
1821 			goto sync_out;
1822 
1823 		map->m_len += dn.ofs_in_node - ofs_in_node;
1824 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1825 			err = -ENOSPC;
1826 			goto sync_out;
1827 		}
1828 		dn.ofs_in_node = end_offset;
1829 	}
1830 
1831 	if (pgofs >= end)
1832 		goto sync_out;
1833 	else if (dn.ofs_in_node < end_offset)
1834 		goto next_block;
1835 
1836 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1837 		if (map->m_flags & F2FS_MAP_MAPPED) {
1838 			unsigned int ofs = start_pgofs - map->m_lblk;
1839 
1840 			f2fs_update_read_extent_cache_range(&dn,
1841 				start_pgofs, map->m_pblk + ofs,
1842 				map->m_len - ofs);
1843 		}
1844 	}
1845 
1846 	f2fs_put_dnode(&dn);
1847 
1848 	if (map->m_may_create) {
1849 		f2fs_map_unlock(sbi, &lc, flag);
1850 		f2fs_balance_fs(sbi, dn.node_changed);
1851 	}
1852 	goto next_dnode;
1853 
1854 sync_out:
1855 
1856 	if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1857 		/*
1858 		 * for hardware encryption, but to avoid potential issue
1859 		 * in future
1860 		 */
1861 		f2fs_wait_on_block_writeback_range(inode,
1862 						map->m_pblk, map->m_len);
1863 
1864 		if (map->m_multidev_dio) {
1865 			block_t blk_addr = map->m_pblk;
1866 
1867 			bidx = f2fs_target_device_index(sbi, map->m_pblk);
1868 
1869 			map->m_bdev = FDEV(bidx).bdev;
1870 			map->m_pblk -= FDEV(bidx).start_blk;
1871 
1872 			if (map->m_may_create)
1873 				f2fs_update_device_state(sbi, inode->i_ino,
1874 							blk_addr, map->m_len);
1875 
1876 			f2fs_bug_on(sbi, blk_addr + map->m_len >
1877 						FDEV(bidx).end_blk + 1);
1878 		}
1879 	}
1880 
1881 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1882 		if (map->m_flags & F2FS_MAP_MAPPED) {
1883 			unsigned int ofs = start_pgofs - map->m_lblk;
1884 
1885 			if (map->m_len > ofs)
1886 				f2fs_update_read_extent_cache_range(&dn,
1887 					start_pgofs, map->m_pblk + ofs,
1888 					map->m_len - ofs);
1889 		}
1890 		if (map->m_next_extent)
1891 			*map->m_next_extent = is_hole ? pgofs + 1 : pgofs;
1892 	}
1893 	f2fs_put_dnode(&dn);
1894 unlock_out:
1895 	if (map->m_may_create) {
1896 		f2fs_map_unlock(sbi, &lc, flag);
1897 		f2fs_balance_fs(sbi, dn.node_changed);
1898 	}
1899 out:
1900 	trace_f2fs_map_blocks(inode, map, flag, err);
1901 	return err;
1902 }
1903 
1904 static bool __f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len,
1905 				bool check_first)
1906 {
1907 	struct f2fs_map_blocks map;
1908 	block_t last_lblk;
1909 	int err;
1910 
1911 	if (pos + len > i_size_read(inode))
1912 		return false;
1913 
1914 	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1915 	map.m_next_pgofs = NULL;
1916 	map.m_next_extent = NULL;
1917 	map.m_seg_type = NO_CHECK_TYPE;
1918 	map.m_may_create = false;
1919 	last_lblk = F2FS_BLK_ALIGN(pos + len);
1920 
1921 	while (map.m_lblk < last_lblk) {
1922 		map.m_len = last_lblk - map.m_lblk;
1923 		err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1924 		if (err || map.m_len == 0)
1925 			return false;
1926 		map.m_lblk += map.m_len;
1927 		if (check_first)
1928 			break;
1929 	}
1930 	return true;
1931 }
1932 
1933 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1934 {
1935 	return __f2fs_overwrite_io(inode, pos, len, false);
1936 }
1937 
1938 static int f2fs_xattr_fiemap(struct inode *inode,
1939 				struct fiemap_extent_info *fieinfo)
1940 {
1941 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1942 	struct node_info ni;
1943 	__u64 phys = 0, len;
1944 	__u32 flags;
1945 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1946 	int err = 0;
1947 
1948 	if (f2fs_has_inline_xattr(inode)) {
1949 		int offset;
1950 		struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
1951 				inode->i_ino, false);
1952 
1953 		if (IS_ERR(folio))
1954 			return PTR_ERR(folio);
1955 
1956 		err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1957 		if (err) {
1958 			f2fs_folio_put(folio, true);
1959 			return err;
1960 		}
1961 
1962 		phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
1963 		offset = offsetof(struct f2fs_inode, i_addr) +
1964 					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1965 					get_inline_xattr_addrs(inode));
1966 
1967 		phys += offset;
1968 		len = inline_xattr_size(inode);
1969 
1970 		f2fs_folio_put(folio, true);
1971 
1972 		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1973 
1974 		if (!xnid)
1975 			flags |= FIEMAP_EXTENT_LAST;
1976 
1977 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1978 		trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1979 		if (err)
1980 			return err;
1981 	}
1982 
1983 	if (xnid) {
1984 		struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi),
1985 				xnid, false);
1986 
1987 		if (IS_ERR(folio))
1988 			return PTR_ERR(folio);
1989 
1990 		err = f2fs_get_node_info(sbi, xnid, &ni, false);
1991 		if (err) {
1992 			f2fs_folio_put(folio, true);
1993 			return err;
1994 		}
1995 
1996 		phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
1997 		len = inode->i_sb->s_blocksize;
1998 
1999 		f2fs_folio_put(folio, true);
2000 
2001 		flags = FIEMAP_EXTENT_LAST;
2002 	}
2003 
2004 	if (phys) {
2005 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
2006 		trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
2007 	}
2008 
2009 	return (err < 0 ? err : 0);
2010 }
2011 
2012 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2013 		u64 start, u64 len)
2014 {
2015 	struct f2fs_map_blocks map;
2016 	sector_t start_blk, last_blk, blk_len, max_len;
2017 	pgoff_t next_pgofs;
2018 	u64 logical = 0, phys = 0, size = 0;
2019 	u32 flags = 0;
2020 	int ret = 0;
2021 	bool compr_cluster = false, compr_appended;
2022 	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
2023 	unsigned int count_in_cluster = 0;
2024 	loff_t maxbytes;
2025 
2026 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
2027 		ret = f2fs_precache_extents(inode);
2028 		if (ret)
2029 			return ret;
2030 	}
2031 
2032 	ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
2033 	if (ret)
2034 		return ret;
2035 
2036 	inode_lock_shared(inode);
2037 
2038 	maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
2039 	if (start > maxbytes) {
2040 		ret = -EFBIG;
2041 		goto out;
2042 	}
2043 
2044 	if (len > maxbytes || (maxbytes - len) < start)
2045 		len = maxbytes - start;
2046 
2047 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
2048 		ret = f2fs_xattr_fiemap(inode, fieinfo);
2049 		goto out;
2050 	}
2051 
2052 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
2053 		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
2054 		if (ret != -EAGAIN)
2055 			goto out;
2056 	}
2057 
2058 	start_blk = F2FS_BYTES_TO_BLK(start);
2059 	last_blk = F2FS_BYTES_TO_BLK(start + len - 1);
2060 	blk_len = last_blk - start_blk + 1;
2061 	max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk;
2062 
2063 next:
2064 	memset(&map, 0, sizeof(map));
2065 	map.m_lblk = start_blk;
2066 	map.m_len = blk_len;
2067 	map.m_next_pgofs = &next_pgofs;
2068 	map.m_seg_type = NO_CHECK_TYPE;
2069 
2070 	if (compr_cluster) {
2071 		map.m_lblk += 1;
2072 		map.m_len = cluster_size - count_in_cluster;
2073 	}
2074 
2075 	ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
2076 	if (ret)
2077 		goto out;
2078 
2079 	/* HOLE */
2080 	if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
2081 		start_blk = next_pgofs;
2082 
2083 		if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes)
2084 			goto prep_next;
2085 
2086 		flags |= FIEMAP_EXTENT_LAST;
2087 	}
2088 
2089 	/*
2090 	 * current extent may cross boundary of inquiry, increase len to
2091 	 * requery.
2092 	 */
2093 	if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) &&
2094 				map.m_lblk + map.m_len - 1 == last_blk &&
2095 				blk_len != max_len) {
2096 		blk_len = max_len;
2097 		goto next;
2098 	}
2099 
2100 	compr_appended = false;
2101 	/* In a case of compressed cluster, append this to the last extent */
2102 	if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
2103 			!(map.m_flags & F2FS_MAP_FLAGS))) {
2104 		compr_appended = true;
2105 		goto skip_fill;
2106 	}
2107 
2108 	if (size) {
2109 		flags |= FIEMAP_EXTENT_MERGED;
2110 		if (IS_ENCRYPTED(inode))
2111 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
2112 
2113 		ret = fiemap_fill_next_extent(fieinfo, logical,
2114 				phys, size, flags);
2115 		trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2116 		if (ret)
2117 			goto out;
2118 		size = 0;
2119 	}
2120 
2121 	if (start_blk > last_blk)
2122 		goto out;
2123 
2124 skip_fill:
2125 	if (map.m_pblk == COMPRESS_ADDR) {
2126 		compr_cluster = true;
2127 		count_in_cluster = 1;
2128 	} else if (compr_appended) {
2129 		unsigned int appended_blks = cluster_size -
2130 						count_in_cluster + 1;
2131 		size += F2FS_BLK_TO_BYTES(appended_blks);
2132 		start_blk += appended_blks;
2133 		compr_cluster = false;
2134 	} else {
2135 		logical = F2FS_BLK_TO_BYTES(start_blk);
2136 		phys = __is_valid_data_blkaddr(map.m_pblk) ?
2137 			F2FS_BLK_TO_BYTES(map.m_pblk) : 0;
2138 		size = F2FS_BLK_TO_BYTES(map.m_len);
2139 		flags = 0;
2140 
2141 		if (compr_cluster) {
2142 			flags = FIEMAP_EXTENT_ENCODED;
2143 			count_in_cluster += map.m_len;
2144 			if (count_in_cluster == cluster_size) {
2145 				compr_cluster = false;
2146 				size += F2FS_BLKSIZE;
2147 			}
2148 		} else if (map.m_flags & F2FS_MAP_DELALLOC) {
2149 			flags = FIEMAP_EXTENT_UNWRITTEN;
2150 		}
2151 
2152 		start_blk += F2FS_BYTES_TO_BLK(size);
2153 	}
2154 
2155 prep_next:
2156 	cond_resched();
2157 	if (fatal_signal_pending(current))
2158 		ret = -EINTR;
2159 	else
2160 		goto next;
2161 out:
2162 	if (ret == 1)
2163 		ret = 0;
2164 
2165 	inode_unlock_shared(inode);
2166 	return ret;
2167 }
2168 
2169 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2170 {
2171 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2172 		return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
2173 
2174 	return i_size_read(inode);
2175 }
2176 
2177 static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
2178 {
2179 	return rac ? REQ_RAHEAD : 0;
2180 }
2181 
2182 static int f2fs_read_single_page(struct inode *inode, struct fsverity_info *vi,
2183 				 struct folio *folio, unsigned int nr_pages,
2184 				 struct f2fs_map_blocks *map,
2185 				 struct bio **bio_ret,
2186 				 sector_t *last_block_in_bio,
2187 				 struct readahead_control *rac)
2188 {
2189 	struct bio *bio = *bio_ret;
2190 	const unsigned int blocksize = F2FS_BLKSIZE;
2191 	sector_t block_in_file;
2192 	sector_t last_block;
2193 	sector_t last_block_in_file;
2194 	sector_t block_nr;
2195 	pgoff_t index = folio->index;
2196 	int ret = 0;
2197 
2198 	block_in_file = (sector_t)index;
2199 	last_block = block_in_file + nr_pages;
2200 	last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
2201 							blocksize - 1);
2202 	if (last_block > last_block_in_file)
2203 		last_block = last_block_in_file;
2204 
2205 	/* just zeroing out page which is beyond EOF */
2206 	if (block_in_file >= last_block)
2207 		goto zero_out;
2208 	/*
2209 	 * Map blocks using the previous result first.
2210 	 */
2211 	if (map->m_flags & F2FS_MAP_MAPPED) {
2212 		if (block_in_file > map->m_lblk &&
2213 			block_in_file < (map->m_lblk + map->m_len))
2214 			goto got_it;
2215 	} else if (block_in_file < *map->m_next_pgofs) {
2216 		goto got_it;
2217 	}
2218 
2219 	/*
2220 	 * Then do more f2fs_map_blocks() calls until we are
2221 	 * done with this page.
2222 	 */
2223 	map->m_lblk = block_in_file;
2224 	map->m_len = last_block - block_in_file;
2225 
2226 	ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
2227 	if (ret)
2228 		goto out;
2229 got_it:
2230 	if ((map->m_flags & F2FS_MAP_MAPPED)) {
2231 		block_nr = map->m_pblk + block_in_file - map->m_lblk;
2232 		folio_set_mappedtodisk(folio);
2233 
2234 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2235 						DATA_GENERIC_ENHANCE_READ)) {
2236 			ret = -EFSCORRUPTED;
2237 			goto out;
2238 		}
2239 	} else {
2240 zero_out:
2241 		folio_zero_segment(folio, 0, folio_size(folio));
2242 		if (vi && !fsverity_verify_folio(vi, folio)) {
2243 			ret = -EIO;
2244 			goto out;
2245 		}
2246 		if (!folio_test_uptodate(folio))
2247 			folio_mark_uptodate(folio);
2248 		folio_unlock(folio);
2249 		goto out;
2250 	}
2251 
2252 	/*
2253 	 * This page will go to BIO.  Do we need to send this
2254 	 * BIO off first?
2255 	 */
2256 	if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2257 				       *last_block_in_bio, block_nr) ||
2258 		    !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
2259 submit_and_realloc:
2260 		f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2261 		bio = NULL;
2262 	}
2263 	if (bio == NULL)
2264 		bio = f2fs_grab_read_bio(inode, vi, block_nr, nr_pages,
2265 					 f2fs_ra_op_flags(rac), index, false);
2266 
2267 	/*
2268 	 * If the page is under writeback, we need to wait for
2269 	 * its completion to see the correct decrypted data.
2270 	 */
2271 	f2fs_wait_on_block_writeback(inode, block_nr);
2272 
2273 	if (!bio_add_folio(bio, folio, blocksize, 0))
2274 		goto submit_and_realloc;
2275 
2276 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2277 	f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2278 							F2FS_BLKSIZE);
2279 	*last_block_in_bio = block_nr;
2280 out:
2281 	*bio_ret = bio;
2282 	return ret;
2283 }
2284 
2285 #ifdef CONFIG_F2FS_FS_COMPRESSION
2286 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2287 				unsigned nr_pages, sector_t *last_block_in_bio,
2288 				struct readahead_control *rac, bool for_write)
2289 {
2290 	struct dnode_of_data dn;
2291 	struct inode *inode = cc->inode;
2292 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2293 	struct bio *bio = *bio_ret;
2294 	unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2295 	sector_t last_block_in_file;
2296 	const unsigned int blocksize = F2FS_BLKSIZE;
2297 	struct decompress_io_ctx *dic = NULL;
2298 	struct extent_info ei = {};
2299 	bool from_dnode = true;
2300 	int i;
2301 	int ret = 0;
2302 
2303 	if (unlikely(f2fs_cp_error(sbi))) {
2304 		ret = -EIO;
2305 		from_dnode = false;
2306 		goto out_put_dnode;
2307 	}
2308 
2309 	f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2310 
2311 	last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
2312 							blocksize - 1);
2313 
2314 	/* get rid of pages beyond EOF */
2315 	for (i = 0; i < cc->cluster_size; i++) {
2316 		struct page *page = cc->rpages[i];
2317 		struct folio *folio;
2318 
2319 		if (!page)
2320 			continue;
2321 
2322 		folio = page_folio(page);
2323 		if ((sector_t)folio->index >= last_block_in_file) {
2324 			folio_zero_segment(folio, 0, folio_size(folio));
2325 			if (!folio_test_uptodate(folio))
2326 				folio_mark_uptodate(folio);
2327 		} else if (!folio_test_uptodate(folio)) {
2328 			continue;
2329 		}
2330 		folio_unlock(folio);
2331 		if (for_write)
2332 			folio_put(folio);
2333 		cc->rpages[i] = NULL;
2334 		cc->nr_rpages--;
2335 	}
2336 
2337 	/* we are done since all pages are beyond EOF */
2338 	if (f2fs_cluster_is_empty(cc))
2339 		goto out;
2340 
2341 	if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
2342 		from_dnode = false;
2343 
2344 	if (!from_dnode)
2345 		goto skip_reading_dnode;
2346 
2347 	set_new_dnode(&dn, inode, NULL, NULL, 0);
2348 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2349 	if (ret)
2350 		goto out;
2351 
2352 	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2353 
2354 skip_reading_dnode:
2355 	for (i = 1; i < cc->cluster_size; i++) {
2356 		block_t blkaddr;
2357 
2358 		blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
2359 					dn.ofs_in_node + i) :
2360 					ei.blk + i - 1;
2361 
2362 		if (!__is_valid_data_blkaddr(blkaddr))
2363 			break;
2364 
2365 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2366 			ret = -EFAULT;
2367 			goto out_put_dnode;
2368 		}
2369 		cc->nr_cpages++;
2370 
2371 		if (!from_dnode && i >= ei.c_len)
2372 			break;
2373 	}
2374 
2375 	/* nothing to decompress */
2376 	if (cc->nr_cpages == 0) {
2377 		ret = 0;
2378 		goto out_put_dnode;
2379 	}
2380 
2381 	dic = f2fs_alloc_dic(cc);
2382 	if (IS_ERR(dic)) {
2383 		ret = PTR_ERR(dic);
2384 		goto out_put_dnode;
2385 	}
2386 
2387 	for (i = 0; i < cc->nr_cpages; i++) {
2388 		struct folio *folio = page_folio(dic->cpages[i]);
2389 		block_t blkaddr;
2390 		struct bio_post_read_ctx *ctx;
2391 
2392 		blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio,
2393 					dn.ofs_in_node + i + 1) :
2394 					ei.blk + i;
2395 
2396 		f2fs_wait_on_block_writeback(inode, blkaddr);
2397 
2398 		if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) {
2399 			if (atomic_dec_and_test(&dic->remaining_pages)) {
2400 				f2fs_decompress_cluster(dic, true);
2401 				break;
2402 			}
2403 			continue;
2404 		}
2405 
2406 		if (bio && (!page_is_mergeable(sbi, bio,
2407 					*last_block_in_bio, blkaddr) ||
2408 		    !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
2409 submit_and_realloc:
2410 			f2fs_submit_read_bio(sbi, bio, DATA);
2411 			bio = NULL;
2412 		}
2413 
2414 		if (!bio)
2415 			bio = f2fs_grab_read_bio(inode, cc->vi, blkaddr,
2416 						 nr_pages - i,
2417 						 f2fs_ra_op_flags(rac),
2418 						 folio->index, for_write);
2419 
2420 		if (!bio_add_folio(bio, folio, blocksize, 0))
2421 			goto submit_and_realloc;
2422 
2423 		ctx = get_post_read_ctx(bio);
2424 		ctx->enabled_steps |= STEP_DECOMPRESS;
2425 		refcount_inc(&dic->refcnt);
2426 
2427 		inc_page_count(sbi, F2FS_RD_DATA);
2428 		f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
2429 		*last_block_in_bio = blkaddr;
2430 	}
2431 
2432 	if (from_dnode)
2433 		f2fs_put_dnode(&dn);
2434 
2435 	*bio_ret = bio;
2436 	return 0;
2437 
2438 out_put_dnode:
2439 	if (from_dnode)
2440 		f2fs_put_dnode(&dn);
2441 out:
2442 	for (i = 0; i < cc->cluster_size; i++) {
2443 		if (cc->rpages[i]) {
2444 			ClearPageUptodate(cc->rpages[i]);
2445 			unlock_page(cc->rpages[i]);
2446 		}
2447 	}
2448 	*bio_ret = bio;
2449 	return ret;
2450 }
2451 #endif
2452 
2453 static struct f2fs_folio_state *ffs_find_or_alloc(struct folio *folio)
2454 {
2455 	struct f2fs_folio_state *ffs = folio->private;
2456 
2457 	if (ffs)
2458 		return ffs;
2459 
2460 	ffs = f2fs_kmem_cache_alloc(ffs_entry_slab,
2461 			GFP_NOIO | __GFP_ZERO, true, NULL);
2462 
2463 	spin_lock_init(&ffs->state_lock);
2464 	folio_attach_private(folio, ffs);
2465 	return ffs;
2466 }
2467 
2468 static void ffs_detach_free(struct folio *folio)
2469 {
2470 	struct f2fs_folio_state *ffs;
2471 
2472 	if (!folio_test_large(folio)) {
2473 		folio_detach_private(folio);
2474 		return;
2475 	}
2476 
2477 	ffs = folio_detach_private(folio);
2478 	if (!ffs)
2479 		return;
2480 
2481 	WARN_ON_ONCE(ffs->read_pages_pending != 0);
2482 	kmem_cache_free(ffs_entry_slab, ffs);
2483 }
2484 
2485 static int f2fs_read_data_large_folio(struct inode *inode,
2486 		struct fsverity_info *vi,
2487 		struct readahead_control *rac, struct folio *folio)
2488 {
2489 	struct bio *bio = NULL;
2490 	sector_t last_block_in_bio = 0;
2491 	struct f2fs_map_blocks map = {0, };
2492 	pgoff_t index, offset, next_pgofs = 0;
2493 	unsigned max_nr_pages = rac ? readahead_count(rac) :
2494 				folio_nr_pages(folio);
2495 	unsigned nrpages;
2496 	struct f2fs_folio_state *ffs;
2497 	int ret = 0;
2498 	bool folio_in_bio;
2499 
2500 	if (!IS_IMMUTABLE(inode) || f2fs_compressed_file(inode)) {
2501 		if (folio)
2502 			folio_unlock(folio);
2503 		return -EOPNOTSUPP;
2504 	}
2505 
2506 	map.m_seg_type = NO_CHECK_TYPE;
2507 
2508 	if (rac)
2509 		folio = readahead_folio(rac);
2510 next_folio:
2511 	if (!folio)
2512 		goto out;
2513 
2514 	f2fs_update_read_folio_count(F2FS_I_SB(inode), folio);
2515 
2516 	folio_in_bio = false;
2517 	index = folio->index;
2518 	offset = 0;
2519 	ffs = NULL;
2520 	nrpages = folio_nr_pages(folio);
2521 
2522 	for (; nrpages; nrpages--, max_nr_pages--, index++, offset++) {
2523 		sector_t block_nr;
2524 		/*
2525 		 * Map blocks using the previous result first.
2526 		 */
2527 		if (map.m_flags & F2FS_MAP_MAPPED) {
2528 			if (index > map.m_lblk &&
2529 				index < (map.m_lblk + map.m_len))
2530 				goto got_it;
2531 		} else if (index < next_pgofs) {
2532 			/* hole case */
2533 			goto got_it;
2534 		}
2535 
2536 		/*
2537 		 * Then do more f2fs_map_blocks() calls until we are
2538 		 * done with this page.
2539 		 */
2540 		memset(&map, 0, sizeof(map));
2541 		map.m_next_pgofs = &next_pgofs;
2542 		map.m_seg_type = NO_CHECK_TYPE;
2543 		map.m_lblk = index;
2544 		map.m_len = max_nr_pages;
2545 
2546 		ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2547 		if (ret)
2548 			goto err_out;
2549 got_it:
2550 		if ((map.m_flags & F2FS_MAP_MAPPED)) {
2551 			block_nr = map.m_pblk + index - map.m_lblk;
2552 			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2553 						DATA_GENERIC_ENHANCE_READ)) {
2554 				ret = -EFSCORRUPTED;
2555 				goto err_out;
2556 			}
2557 		} else {
2558 			size_t page_offset = offset << PAGE_SHIFT;
2559 			folio_zero_range(folio, page_offset, PAGE_SIZE);
2560 			if (vi && !fsverity_verify_blocks(vi, folio, PAGE_SIZE, page_offset)) {
2561 				ret = -EIO;
2562 				goto err_out;
2563 			}
2564 			continue;
2565 		}
2566 
2567 		/* We must increment read_pages_pending before possible BIOs submitting
2568 		 * to prevent from premature folio_end_read() call on folio
2569 		 */
2570 		if (folio_test_large(folio)) {
2571 			ffs = ffs_find_or_alloc(folio);
2572 
2573 			/* set the bitmap to wait */
2574 			spin_lock_irq(&ffs->state_lock);
2575 			ffs->read_pages_pending++;
2576 			spin_unlock_irq(&ffs->state_lock);
2577 		}
2578 
2579 		/*
2580 		 * This page will go to BIO.  Do we need to send this
2581 		 * BIO off first?
2582 		 */
2583 		if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2584 						last_block_in_bio, block_nr) ||
2585 			!f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
2586 submit_and_realloc:
2587 			f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2588 			bio = NULL;
2589 		}
2590 		if (bio == NULL)
2591 			bio = f2fs_grab_read_bio(inode, vi,
2592 					block_nr, max_nr_pages,
2593 					f2fs_ra_op_flags(rac),
2594 					index, false);
2595 
2596 		/*
2597 		 * If the page is under writeback, we need to wait for
2598 		 * its completion to see the correct decrypted data.
2599 		 */
2600 		f2fs_wait_on_block_writeback(inode, block_nr);
2601 
2602 		if (!bio_add_folio(bio, folio, F2FS_BLKSIZE,
2603 					offset << PAGE_SHIFT))
2604 			goto submit_and_realloc;
2605 
2606 		folio_in_bio = true;
2607 		inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2608 		f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2609 				F2FS_BLKSIZE);
2610 		last_block_in_bio = block_nr;
2611 	}
2612 	trace_f2fs_read_folio(folio, DATA);
2613 err_out:
2614 	if (!folio_in_bio) {
2615 		folio_end_read(folio, !ret);
2616 		if (ret)
2617 			return ret;
2618 	}
2619 	if (rac) {
2620 		folio = readahead_folio(rac);
2621 		goto next_folio;
2622 	}
2623 out:
2624 	f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2625 	if (ret) {
2626 		/* Wait bios and clear uptodate. */
2627 		folio_lock(folio);
2628 		folio_clear_uptodate(folio);
2629 		folio_unlock(folio);
2630 	}
2631 	return ret;
2632 }
2633 
2634 /*
2635  * This function was originally taken from fs/mpage.c, and customized for f2fs.
2636  * Major change was from block_size == page_size in f2fs by default.
2637  */
2638 static int f2fs_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
2639 		struct readahead_control *rac, struct folio *folio)
2640 {
2641 	struct bio *bio = NULL;
2642 	sector_t last_block_in_bio = 0;
2643 	struct f2fs_map_blocks map;
2644 #ifdef CONFIG_F2FS_FS_COMPRESSION
2645 	struct compress_ctx cc = {
2646 		.inode = inode,
2647 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2648 		.cluster_size = F2FS_I(inode)->i_cluster_size,
2649 		.cluster_idx = NULL_CLUSTER,
2650 		.rpages = NULL,
2651 		.cpages = NULL,
2652 		.nr_rpages = 0,
2653 		.nr_cpages = 0,
2654 	};
2655 	pgoff_t nc_cluster_idx = NULL_CLUSTER;
2656 	pgoff_t index;
2657 #endif
2658 	pgoff_t next_pgofs = 0;
2659 	unsigned nr_pages = rac ? readahead_count(rac) : 1;
2660 	struct address_space *mapping = rac ? rac->mapping : folio->mapping;
2661 	unsigned max_nr_pages = nr_pages;
2662 	int ret = 0;
2663 
2664 	if (mapping_large_folio_support(mapping))
2665 		return f2fs_read_data_large_folio(inode, vi, rac, folio);
2666 
2667 #ifdef CONFIG_F2FS_FS_COMPRESSION
2668 	if (f2fs_compressed_file(inode)) {
2669 		index = rac ? readahead_index(rac) : folio->index;
2670 		max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
2671 				round_down(index, cc.cluster_size);
2672 	}
2673 #endif
2674 
2675 	map.m_pblk = 0;
2676 	map.m_lblk = 0;
2677 	map.m_len = 0;
2678 	map.m_flags = 0;
2679 	map.m_next_pgofs = &next_pgofs;
2680 	map.m_next_extent = NULL;
2681 	map.m_seg_type = NO_CHECK_TYPE;
2682 	map.m_may_create = false;
2683 
2684 	for (; nr_pages; nr_pages--) {
2685 		if (rac) {
2686 			folio = readahead_folio(rac);
2687 			prefetchw(&folio->flags);
2688 		}
2689 
2690 		f2fs_update_read_folio_count(F2FS_I_SB(inode), folio);
2691 
2692 #ifdef CONFIG_F2FS_FS_COMPRESSION
2693 		index = folio->index;
2694 
2695 		if (!f2fs_compressed_file(inode))
2696 			goto read_single_page;
2697 
2698 		/* there are remained compressed pages, submit them */
2699 		if (!f2fs_cluster_can_merge_page(&cc, index)) {
2700 			cc.vi = vi;
2701 			ret = f2fs_read_multi_pages(&cc, &bio,
2702 						max_nr_pages,
2703 						&last_block_in_bio,
2704 						rac, false);
2705 			f2fs_destroy_compress_ctx(&cc, false);
2706 			if (ret)
2707 				goto set_error_page;
2708 		}
2709 		if (cc.cluster_idx == NULL_CLUSTER) {
2710 			if (nc_cluster_idx == index >> cc.log_cluster_size)
2711 				goto read_single_page;
2712 
2713 			ret = f2fs_is_compressed_cluster(inode, index);
2714 			if (ret < 0)
2715 				goto set_error_page;
2716 			else if (!ret) {
2717 				nc_cluster_idx =
2718 					index >> cc.log_cluster_size;
2719 				goto read_single_page;
2720 			}
2721 
2722 			nc_cluster_idx = NULL_CLUSTER;
2723 		}
2724 		ret = f2fs_init_compress_ctx(&cc);
2725 		if (ret)
2726 			goto set_error_page;
2727 
2728 		f2fs_compress_ctx_add_page(&cc, folio);
2729 
2730 		goto next_page;
2731 read_single_page:
2732 #endif
2733 
2734 		ret = f2fs_read_single_page(inode, vi, folio, max_nr_pages,
2735 					    &map, &bio, &last_block_in_bio,
2736 					    rac);
2737 		if (ret) {
2738 #ifdef CONFIG_F2FS_FS_COMPRESSION
2739 set_error_page:
2740 #endif
2741 			folio_zero_segment(folio, 0, folio_size(folio));
2742 			folio_unlock(folio);
2743 		}
2744 #ifdef CONFIG_F2FS_FS_COMPRESSION
2745 next_page:
2746 #endif
2747 
2748 #ifdef CONFIG_F2FS_FS_COMPRESSION
2749 		if (f2fs_compressed_file(inode)) {
2750 			/* last page */
2751 			if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2752 				cc.vi = vi;
2753 				ret = f2fs_read_multi_pages(&cc, &bio,
2754 							max_nr_pages,
2755 							&last_block_in_bio,
2756 							rac, false);
2757 				f2fs_destroy_compress_ctx(&cc, false);
2758 			}
2759 		}
2760 #endif
2761 	}
2762 	f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2763 	return ret;
2764 }
2765 
2766 static int f2fs_read_data_folio(struct file *file, struct folio *folio)
2767 {
2768 	struct inode *inode = folio->mapping->host;
2769 	struct fsverity_info *vi = NULL;
2770 	int ret;
2771 
2772 	trace_f2fs_readpage(folio, DATA);
2773 
2774 	if (!f2fs_is_compress_backend_ready(inode)) {
2775 		folio_unlock(folio);
2776 		return -EOPNOTSUPP;
2777 	}
2778 
2779 	/* If the file has inline data, try to read it directly */
2780 	if (f2fs_has_inline_data(inode)) {
2781 		ret = f2fs_read_inline_data(inode, folio);
2782 		if (ret != -EAGAIN)
2783 			return ret;
2784 	}
2785 
2786 	vi = f2fs_need_verity(inode, folio->index);
2787 	if (vi)
2788 		fsverity_readahead(vi, folio->index, folio_nr_pages(folio));
2789 	return f2fs_mpage_readpages(inode, vi, NULL, folio);
2790 }
2791 
2792 static void f2fs_readahead(struct readahead_control *rac)
2793 {
2794 	struct inode *inode = rac->mapping->host;
2795 	struct fsverity_info *vi = NULL;
2796 
2797 	trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2798 
2799 	if (!f2fs_is_compress_backend_ready(inode))
2800 		return;
2801 
2802 	/* If the file has inline data, skip readahead */
2803 	if (f2fs_has_inline_data(inode))
2804 		return;
2805 
2806 	vi = f2fs_need_verity(inode, readahead_index(rac));
2807 	if (vi)
2808 		fsverity_readahead(vi, readahead_index(rac),
2809 				   readahead_count(rac));
2810 	f2fs_mpage_readpages(inode, vi, rac, NULL);
2811 }
2812 
2813 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2814 {
2815 	struct inode *inode = fio_inode(fio);
2816 	struct folio *mfolio;
2817 	struct page *page;
2818 
2819 	if (!f2fs_encrypted_file(inode))
2820 		return 0;
2821 
2822 	page = fio->compressed_page ? fio->compressed_page : fio->page;
2823 
2824 	if (fscrypt_inode_uses_inline_crypto(inode))
2825 		return 0;
2826 
2827 	fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
2828 					PAGE_SIZE, 0, GFP_NOFS);
2829 	if (IS_ERR(fio->encrypted_page))
2830 		return PTR_ERR(fio->encrypted_page);
2831 
2832 	mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr);
2833 	if (!IS_ERR(mfolio)) {
2834 		if (folio_test_uptodate(mfolio))
2835 			memcpy(folio_address(mfolio),
2836 				page_address(fio->encrypted_page), PAGE_SIZE);
2837 		f2fs_folio_put(mfolio, true);
2838 	}
2839 	return 0;
2840 }
2841 
2842 static inline bool check_inplace_update_policy(struct inode *inode,
2843 				struct f2fs_io_info *fio)
2844 {
2845 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2846 
2847 	if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
2848 	    is_inode_flag_set(inode, FI_OPU_WRITE))
2849 		return false;
2850 	if (IS_F2FS_IPU_FORCE(sbi))
2851 		return true;
2852 	if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
2853 		return true;
2854 	if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
2855 		return true;
2856 	if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
2857 	    utilization(sbi) > SM_I(sbi)->min_ipu_util)
2858 		return true;
2859 
2860 	/*
2861 	 * IPU for rewrite async pages
2862 	 */
2863 	if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2864 	    !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2865 		return true;
2866 
2867 	/* this is only set during fdatasync */
2868 	if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2869 		return true;
2870 
2871 	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2872 			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2873 		return true;
2874 
2875 	return false;
2876 }
2877 
2878 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2879 {
2880 	/* swap file is migrating in aligned write mode */
2881 	if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2882 		return false;
2883 
2884 	if (f2fs_is_pinned_file(inode))
2885 		return true;
2886 
2887 	/* if this is cold file, we should overwrite to avoid fragmentation */
2888 	if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2889 		return true;
2890 
2891 	return check_inplace_update_policy(inode, fio);
2892 }
2893 
2894 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2895 {
2896 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2897 
2898 	/* The below cases were checked when setting it. */
2899 	if (f2fs_is_pinned_file(inode))
2900 		return false;
2901 	if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2902 		return true;
2903 	if (f2fs_lfs_mode(sbi))
2904 		return true;
2905 	if (S_ISDIR(inode->i_mode))
2906 		return true;
2907 	if (IS_NOQUOTA(inode))
2908 		return true;
2909 	if (f2fs_used_in_atomic_write(inode))
2910 		return true;
2911 	/* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
2912 	if (f2fs_compressed_file(inode) &&
2913 		F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER &&
2914 		is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
2915 		return true;
2916 
2917 	/* swap file is migrating in aligned write mode */
2918 	if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2919 		return true;
2920 
2921 	if (is_inode_flag_set(inode, FI_OPU_WRITE))
2922 		return true;
2923 
2924 	if (fio) {
2925 		if (page_private_gcing(fio->page))
2926 			return true;
2927 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2928 			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2929 			return true;
2930 	}
2931 	return false;
2932 }
2933 
2934 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2935 {
2936 	struct inode *inode = fio_inode(fio);
2937 
2938 	if (f2fs_should_update_outplace(inode, fio))
2939 		return false;
2940 
2941 	return f2fs_should_update_inplace(inode, fio);
2942 }
2943 
2944 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2945 {
2946 	struct folio *folio = fio->folio;
2947 	struct inode *inode = folio->mapping->host;
2948 	struct dnode_of_data dn;
2949 	struct node_info ni;
2950 	struct f2fs_lock_context lc;
2951 	bool ipu_force = false;
2952 	bool atomic_commit;
2953 	int err = 0;
2954 
2955 	/* Use COW inode to make dnode_of_data for atomic write */
2956 	atomic_commit = f2fs_is_atomic_file(inode) &&
2957 				folio_test_f2fs_atomic(folio);
2958 	if (atomic_commit)
2959 		set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2960 	else
2961 		set_new_dnode(&dn, inode, NULL, NULL, 0);
2962 
2963 	if (need_inplace_update(fio) &&
2964 	    f2fs_lookup_read_extent_cache_block(inode, folio->index,
2965 						&fio->old_blkaddr)) {
2966 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2967 						DATA_GENERIC_ENHANCE))
2968 			return -EFSCORRUPTED;
2969 
2970 		ipu_force = true;
2971 		fio->need_lock = LOCK_DONE;
2972 		goto got_it;
2973 	}
2974 
2975 	if (is_sbi_flag_set(fio->sbi, SBI_ENABLE_CHECKPOINT) &&
2976 		time_to_inject(fio->sbi, FAULT_SKIP_WRITE))
2977 		return -EINVAL;
2978 
2979 	/* Deadlock due to between page->lock and f2fs_lock_op */
2980 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi, &lc))
2981 		return -EAGAIN;
2982 
2983 	err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
2984 	if (err)
2985 		goto out;
2986 
2987 	fio->old_blkaddr = dn.data_blkaddr;
2988 
2989 	/* This page is already truncated */
2990 	if (fio->old_blkaddr == NULL_ADDR) {
2991 		folio_clear_uptodate(folio);
2992 		folio_clear_f2fs_gcing(folio);
2993 		goto out_writepage;
2994 	}
2995 got_it:
2996 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2997 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2998 						DATA_GENERIC_ENHANCE)) {
2999 		err = -EFSCORRUPTED;
3000 		goto out_writepage;
3001 	}
3002 
3003 	/* wait for GCed page writeback via META_MAPPING */
3004 	if (fio->meta_gc)
3005 		f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
3006 
3007 	/*
3008 	 * If current allocation needs SSR,
3009 	 * it had better in-place writes for updated data.
3010 	 */
3011 	if (ipu_force ||
3012 		(__is_valid_data_blkaddr(fio->old_blkaddr) &&
3013 					need_inplace_update(fio))) {
3014 		err = f2fs_encrypt_one_page(fio);
3015 		if (err)
3016 			goto out_writepage;
3017 
3018 		folio_start_writeback(folio);
3019 		f2fs_put_dnode(&dn);
3020 		if (fio->need_lock == LOCK_REQ)
3021 			f2fs_unlock_op(fio->sbi, &lc);
3022 		err = f2fs_inplace_write_data(fio);
3023 		if (err) {
3024 			if (fscrypt_inode_uses_fs_layer_crypto(inode))
3025 				fscrypt_finalize_bounce_page(&fio->encrypted_page);
3026 			folio_end_writeback(folio);
3027 		} else {
3028 			set_inode_flag(inode, FI_UPDATE_WRITE);
3029 		}
3030 		trace_f2fs_do_write_data_page(folio, IPU);
3031 		return err;
3032 	}
3033 
3034 	if (fio->need_lock == LOCK_RETRY) {
3035 		if (!f2fs_trylock_op(fio->sbi, &lc)) {
3036 			err = -EAGAIN;
3037 			goto out_writepage;
3038 		}
3039 		fio->need_lock = LOCK_REQ;
3040 	}
3041 
3042 	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
3043 	if (err)
3044 		goto out_writepage;
3045 
3046 	fio->version = ni.version;
3047 
3048 	err = f2fs_encrypt_one_page(fio);
3049 	if (err)
3050 		goto out_writepage;
3051 
3052 	folio_start_writeback(folio);
3053 
3054 	if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
3055 		f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
3056 
3057 	/* LFS mode write path */
3058 	f2fs_outplace_write_data(&dn, fio);
3059 	trace_f2fs_do_write_data_page(folio, OPU);
3060 	set_inode_flag(inode, FI_APPEND_WRITE);
3061 	if (atomic_commit)
3062 		folio_clear_f2fs_atomic(folio);
3063 out_writepage:
3064 	f2fs_put_dnode(&dn);
3065 out:
3066 	if (fio->need_lock == LOCK_REQ)
3067 		f2fs_unlock_op(fio->sbi, &lc);
3068 	return err;
3069 }
3070 
3071 int f2fs_write_single_data_page(struct folio *folio, int *submitted,
3072 				struct bio **bio,
3073 				sector_t *last_block,
3074 				struct writeback_control *wbc,
3075 				enum iostat_type io_type,
3076 				int compr_blocks,
3077 				bool allow_balance)
3078 {
3079 	struct inode *inode = folio->mapping->host;
3080 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3081 	loff_t i_size = i_size_read(inode);
3082 	const pgoff_t end_index = ((unsigned long long)i_size)
3083 							>> PAGE_SHIFT;
3084 	loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
3085 	unsigned offset = 0;
3086 	bool need_balance_fs = false;
3087 	bool quota_inode = IS_NOQUOTA(inode);
3088 	int err = 0;
3089 	struct f2fs_io_info fio = {
3090 		.sbi = sbi,
3091 		.ino = inode->i_ino,
3092 		.type = DATA,
3093 		.op = REQ_OP_WRITE,
3094 		.op_flags = wbc_to_write_flags(wbc),
3095 		.old_blkaddr = NULL_ADDR,
3096 		.folio = folio,
3097 		.encrypted_page = NULL,
3098 		.submitted = 0,
3099 		.compr_blocks = compr_blocks,
3100 		.need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
3101 		.meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
3102 		.io_type = io_type,
3103 		.io_wbc = wbc,
3104 		.bio = bio,
3105 		.last_block = last_block,
3106 	};
3107 
3108 	trace_f2fs_writepage(folio, DATA);
3109 
3110 	/* we should bypass data pages to proceed the kworker jobs */
3111 	if (unlikely(f2fs_cp_error(sbi))) {
3112 		mapping_set_error(folio->mapping, -EIO);
3113 		/*
3114 		 * don't drop any dirty dentry pages for keeping lastest
3115 		 * directory structure.
3116 		 */
3117 		if (S_ISDIR(inode->i_mode) &&
3118 				!is_sbi_flag_set(sbi, SBI_IS_CLOSE))
3119 			goto redirty_out;
3120 
3121 		/* keep data pages in remount-ro mode */
3122 		if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
3123 			goto redirty_out;
3124 		goto out;
3125 	}
3126 
3127 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3128 		goto redirty_out;
3129 
3130 	if (folio->index < end_index ||
3131 			f2fs_verity_in_progress(inode) ||
3132 			compr_blocks)
3133 		goto write;
3134 
3135 	/*
3136 	 * If the offset is out-of-range of file size,
3137 	 * this page does not have to be written to disk.
3138 	 */
3139 	offset = i_size & (PAGE_SIZE - 1);
3140 	if ((folio->index >= end_index + 1) || !offset)
3141 		goto out;
3142 
3143 	folio_zero_segment(folio, offset, folio_size(folio));
3144 write:
3145 	/* Dentry/quota blocks are controlled by checkpoint */
3146 	if (S_ISDIR(inode->i_mode) || quota_inode) {
3147 		struct f2fs_lock_context lc;
3148 
3149 		/*
3150 		 * We need to wait for node_write to avoid block allocation during
3151 		 * checkpoint. This can only happen to quota writes which can cause
3152 		 * the below discard race condition.
3153 		 */
3154 		if (quota_inode)
3155 			f2fs_down_read_trace(&sbi->node_write, &lc);
3156 
3157 		fio.need_lock = LOCK_DONE;
3158 		err = f2fs_do_write_data_page(&fio);
3159 
3160 		if (quota_inode)
3161 			f2fs_up_read_trace(&sbi->node_write, &lc);
3162 
3163 		goto done;
3164 	}
3165 
3166 	need_balance_fs = true;
3167 	err = -EAGAIN;
3168 	if (f2fs_has_inline_data(inode)) {
3169 		err = f2fs_write_inline_data(inode, folio);
3170 		if (!err)
3171 			goto out;
3172 	}
3173 
3174 	if (err == -EAGAIN) {
3175 		err = f2fs_do_write_data_page(&fio);
3176 		if (err == -EAGAIN) {
3177 			f2fs_bug_on(sbi, compr_blocks);
3178 			fio.need_lock = LOCK_REQ;
3179 			err = f2fs_do_write_data_page(&fio);
3180 		}
3181 	}
3182 
3183 	if (err) {
3184 		file_set_keep_isize(inode);
3185 	} else {
3186 		spin_lock(&F2FS_I(inode)->i_size_lock);
3187 		if (F2FS_I(inode)->last_disk_size < psize)
3188 			F2FS_I(inode)->last_disk_size = psize;
3189 		spin_unlock(&F2FS_I(inode)->i_size_lock);
3190 	}
3191 
3192 done:
3193 	if (err && err != -ENOENT)
3194 		goto redirty_out;
3195 
3196 out:
3197 	inode_dec_dirty_pages(inode);
3198 	if (err) {
3199 		folio_clear_uptodate(folio);
3200 		folio_clear_f2fs_gcing(folio);
3201 	}
3202 	folio_unlock(folio);
3203 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
3204 			!F2FS_I(inode)->wb_task && allow_balance)
3205 		f2fs_balance_fs(sbi, need_balance_fs);
3206 
3207 	if (unlikely(f2fs_cp_error(sbi))) {
3208 		f2fs_submit_merged_write(sbi, DATA);
3209 		if (bio && *bio)
3210 			f2fs_submit_merged_ipu_write(sbi, bio, NULL);
3211 		submitted = NULL;
3212 	}
3213 
3214 	if (submitted)
3215 		*submitted = fio.submitted;
3216 
3217 	return 0;
3218 
3219 redirty_out:
3220 	folio_redirty_for_writepage(wbc, folio);
3221 	/*
3222 	 * pageout() in MM translates EAGAIN, so calls handle_write_error()
3223 	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
3224 	 * file_write_and_wait_range() will see EIO error, which is critical
3225 	 * to return value of fsync() followed by atomic_write failure to user.
3226 	 */
3227 	folio_unlock(folio);
3228 	if (!err)
3229 		return 1;
3230 	return err;
3231 }
3232 
3233 /*
3234  * This function was copied from write_cache_pages from mm/page-writeback.c.
3235  * The major change is making write step of cold data page separately from
3236  * warm/hot data page.
3237  */
3238 static int f2fs_write_cache_pages(struct address_space *mapping,
3239 					struct writeback_control *wbc,
3240 					enum iostat_type io_type)
3241 {
3242 	int ret = 0;
3243 	int done = 0, retry = 0;
3244 	struct page *pages_local[F2FS_ONSTACK_PAGES];
3245 	struct page **pages = pages_local;
3246 	struct folio_batch fbatch;
3247 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
3248 	struct bio *bio = NULL;
3249 	sector_t last_block;
3250 #ifdef CONFIG_F2FS_FS_COMPRESSION
3251 	struct inode *inode = mapping->host;
3252 	struct compress_ctx cc = {
3253 		.inode = inode,
3254 		.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
3255 		.cluster_size = F2FS_I(inode)->i_cluster_size,
3256 		.cluster_idx = NULL_CLUSTER,
3257 		.rpages = NULL,
3258 		.nr_rpages = 0,
3259 		.cpages = NULL,
3260 		.valid_nr_cpages = 0,
3261 		.rbuf = NULL,
3262 		.cbuf = NULL,
3263 		.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
3264 		.private = NULL,
3265 	};
3266 #endif
3267 	int nr_folios, p, idx;
3268 	int nr_pages;
3269 	unsigned int max_pages = F2FS_ONSTACK_PAGES;
3270 	pgoff_t index;
3271 	pgoff_t end;		/* Inclusive */
3272 	pgoff_t done_index;
3273 	int range_whole = 0;
3274 	xa_mark_t tag;
3275 	int nwritten = 0;
3276 	int submitted = 0;
3277 	int i;
3278 
3279 #ifdef CONFIG_F2FS_FS_COMPRESSION
3280 	if (f2fs_compressed_file(inode) &&
3281 		1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
3282 		pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
3283 				cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
3284 		max_pages = 1 << cc.log_cluster_size;
3285 	}
3286 #endif
3287 
3288 	folio_batch_init(&fbatch);
3289 
3290 	if (get_dirty_pages(mapping->host) <=
3291 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3292 		set_inode_flag(mapping->host, FI_HOT_DATA);
3293 	else
3294 		clear_inode_flag(mapping->host, FI_HOT_DATA);
3295 
3296 	if (wbc->range_cyclic) {
3297 		index = mapping->writeback_index; /* prev offset */
3298 		end = -1;
3299 	} else {
3300 		index = wbc->range_start >> PAGE_SHIFT;
3301 		end = wbc->range_end >> PAGE_SHIFT;
3302 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3303 			range_whole = 1;
3304 	}
3305 	tag = wbc_to_tag(wbc);
3306 retry:
3307 	retry = 0;
3308 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3309 		tag_pages_for_writeback(mapping, index, end);
3310 	done_index = index;
3311 	while (!done && !retry && (index <= end)) {
3312 		nr_pages = 0;
3313 again:
3314 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
3315 				tag, &fbatch);
3316 		if (nr_folios == 0) {
3317 			if (nr_pages)
3318 				goto write;
3319 			break;
3320 		}
3321 
3322 		for (i = 0; i < nr_folios; i++) {
3323 			struct folio *folio = fbatch.folios[i];
3324 
3325 			idx = 0;
3326 			p = folio_nr_pages(folio);
3327 add_more:
3328 			pages[nr_pages] = folio_page(folio, idx);
3329 			folio_get(folio);
3330 			if (++nr_pages == max_pages) {
3331 				index = folio->index + idx + 1;
3332 				folio_batch_release(&fbatch);
3333 				goto write;
3334 			}
3335 			if (++idx < p)
3336 				goto add_more;
3337 		}
3338 		folio_batch_release(&fbatch);
3339 		goto again;
3340 write:
3341 		for (i = 0; i < nr_pages; i++) {
3342 			struct page *page = pages[i];
3343 			struct folio *folio = page_folio(page);
3344 			bool need_readd;
3345 readd:
3346 			need_readd = false;
3347 #ifdef CONFIG_F2FS_FS_COMPRESSION
3348 			if (f2fs_compressed_file(inode)) {
3349 				void *fsdata = NULL;
3350 				struct page *pagep;
3351 				int ret2;
3352 
3353 				ret = f2fs_init_compress_ctx(&cc);
3354 				if (ret) {
3355 					done = 1;
3356 					break;
3357 				}
3358 
3359 				if (!f2fs_cluster_can_merge_page(&cc,
3360 								folio->index)) {
3361 					ret = f2fs_write_multi_pages(&cc,
3362 						&submitted, wbc, io_type);
3363 					if (!ret)
3364 						need_readd = true;
3365 					goto result;
3366 				}
3367 
3368 				if (unlikely(f2fs_cp_error(sbi)))
3369 					goto lock_folio;
3370 
3371 				if (!f2fs_cluster_is_empty(&cc))
3372 					goto lock_folio;
3373 
3374 				if (f2fs_all_cluster_page_ready(&cc,
3375 					pages, i, nr_pages, true))
3376 					goto lock_folio;
3377 
3378 				ret2 = f2fs_prepare_compress_overwrite(
3379 							inode, &pagep,
3380 							folio->index, &fsdata);
3381 				if (ret2 < 0) {
3382 					ret = ret2;
3383 					done = 1;
3384 					break;
3385 				} else if (ret2 &&
3386 					(!f2fs_compress_write_end(inode,
3387 						fsdata, folio->index, 1) ||
3388 					 !f2fs_all_cluster_page_ready(&cc,
3389 						pages, i, nr_pages,
3390 						false))) {
3391 					retry = 1;
3392 					break;
3393 				}
3394 			}
3395 #endif
3396 			/* give a priority to WB_SYNC threads */
3397 			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3398 					wbc->sync_mode == WB_SYNC_NONE) {
3399 				done = 1;
3400 				break;
3401 			}
3402 #ifdef CONFIG_F2FS_FS_COMPRESSION
3403 lock_folio:
3404 #endif
3405 			done_index = folio->index;
3406 retry_write:
3407 			folio_lock(folio);
3408 
3409 			if (unlikely(folio->mapping != mapping)) {
3410 continue_unlock:
3411 				folio_unlock(folio);
3412 				continue;
3413 			}
3414 
3415 			if (!folio_test_dirty(folio)) {
3416 				/* someone wrote it for us */
3417 				goto continue_unlock;
3418 			}
3419 
3420 			if (folio_test_writeback(folio)) {
3421 				if (wbc->sync_mode == WB_SYNC_NONE)
3422 					goto continue_unlock;
3423 				f2fs_folio_wait_writeback(folio, DATA, true, true);
3424 			}
3425 
3426 			if (!folio_clear_dirty_for_io(folio))
3427 				goto continue_unlock;
3428 
3429 #ifdef CONFIG_F2FS_FS_COMPRESSION
3430 			if (f2fs_compressed_file(inode)) {
3431 				folio_get(folio);
3432 				f2fs_compress_ctx_add_page(&cc, folio);
3433 				continue;
3434 			}
3435 #endif
3436 			submitted = 0;
3437 			ret = f2fs_write_single_data_page(folio,
3438 					&submitted, &bio, &last_block,
3439 					wbc, io_type, 0, true);
3440 #ifdef CONFIG_F2FS_FS_COMPRESSION
3441 result:
3442 #endif
3443 			nwritten += submitted;
3444 			wbc->nr_to_write -= submitted;
3445 
3446 			if (unlikely(ret)) {
3447 				/*
3448 				 * keep nr_to_write, since vfs uses this to
3449 				 * get # of written pages.
3450 				 */
3451 				if (ret == 1) {
3452 					ret = 0;
3453 					goto next;
3454 				} else if (ret == -EAGAIN) {
3455 					ret = 0;
3456 					if (wbc->sync_mode == WB_SYNC_ALL) {
3457 						f2fs_schedule_timeout(
3458 							DEFAULT_SCHEDULE_TIMEOUT);
3459 						goto retry_write;
3460 					}
3461 					goto next;
3462 				}
3463 				done_index = folio_next_index(folio);
3464 				done = 1;
3465 				break;
3466 			}
3467 
3468 			if (wbc->nr_to_write <= 0 &&
3469 					wbc->sync_mode == WB_SYNC_NONE) {
3470 				done = 1;
3471 				break;
3472 			}
3473 next:
3474 			if (need_readd)
3475 				goto readd;
3476 		}
3477 		release_pages(pages, nr_pages);
3478 		cond_resched();
3479 	}
3480 #ifdef CONFIG_F2FS_FS_COMPRESSION
3481 	/* flush remained pages in compress cluster */
3482 	if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3483 		ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3484 		nwritten += submitted;
3485 		wbc->nr_to_write -= submitted;
3486 		if (ret) {
3487 			done = 1;
3488 			retry = 0;
3489 		}
3490 	}
3491 	if (f2fs_compressed_file(inode))
3492 		f2fs_destroy_compress_ctx(&cc, false);
3493 #endif
3494 	if (retry) {
3495 		index = 0;
3496 		end = -1;
3497 		goto retry;
3498 	}
3499 	if (wbc->range_cyclic && !done)
3500 		done_index = 0;
3501 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3502 		mapping->writeback_index = done_index;
3503 
3504 	if (nwritten)
3505 		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3506 								NULL, 0, DATA);
3507 	/* submit cached bio of IPU write */
3508 	if (bio)
3509 		f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3510 
3511 #ifdef CONFIG_F2FS_FS_COMPRESSION
3512 	if (pages != pages_local)
3513 		kfree(pages);
3514 #endif
3515 
3516 	return ret;
3517 }
3518 
3519 static inline bool __should_serialize_io(struct inode *inode,
3520 					struct writeback_control *wbc)
3521 {
3522 	/* to avoid deadlock in path of data flush */
3523 	if (F2FS_I(inode)->wb_task)
3524 		return false;
3525 
3526 	if (!S_ISREG(inode->i_mode))
3527 		return false;
3528 	if (IS_NOQUOTA(inode))
3529 		return false;
3530 
3531 	if (f2fs_is_pinned_file(inode))
3532 		return false;
3533 	if (f2fs_need_compress_data(inode))
3534 		return true;
3535 	if (wbc->sync_mode != WB_SYNC_ALL)
3536 		return true;
3537 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3538 		return true;
3539 	return false;
3540 }
3541 
3542 static inline void account_writeback(struct inode *inode, bool inc)
3543 {
3544 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3545 		return;
3546 
3547 	f2fs_down_read(&F2FS_I(inode)->i_sem);
3548 	if (inc)
3549 		atomic_inc(&F2FS_I(inode)->writeback);
3550 	else
3551 		atomic_dec(&F2FS_I(inode)->writeback);
3552 	f2fs_up_read(&F2FS_I(inode)->i_sem);
3553 }
3554 
3555 static inline void update_skipped_write(struct f2fs_sb_info *sbi,
3556 						struct writeback_control *wbc)
3557 {
3558 	long skipped = wbc->pages_skipped;
3559 
3560 	if (is_sbi_flag_set(sbi, SBI_ENABLE_CHECKPOINT) && skipped &&
3561 		wbc->sync_mode == WB_SYNC_ALL)
3562 		atomic_add(skipped, &sbi->nr_pages[F2FS_SKIPPED_WRITE]);
3563 }
3564 
3565 static int __f2fs_write_data_pages(struct address_space *mapping,
3566 						struct writeback_control *wbc,
3567 						enum iostat_type io_type)
3568 {
3569 	struct inode *inode = mapping->host;
3570 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3571 	struct blk_plug plug;
3572 	int ret;
3573 	bool locked = false;
3574 
3575 	/* skip writing if there is no dirty page in this inode */
3576 	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3577 		return 0;
3578 
3579 	/* during POR, we don't need to trigger writepage at all. */
3580 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3581 		goto skip_write;
3582 
3583 	if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3584 			wbc->sync_mode == WB_SYNC_NONE &&
3585 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3586 			f2fs_available_free_memory(sbi, DIRTY_DENTS))
3587 		goto skip_write;
3588 
3589 	/* skip writing in file defragment preparing stage */
3590 	if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3591 		goto skip_write;
3592 
3593 	trace_f2fs_writepages(mapping->host, wbc, DATA);
3594 
3595 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3596 	if (wbc->sync_mode == WB_SYNC_ALL)
3597 		atomic_inc(&sbi->wb_sync_req[DATA]);
3598 	else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3599 		/* to avoid potential deadlock */
3600 		if (current->plug)
3601 			blk_finish_plug(current->plug);
3602 		goto skip_write;
3603 	}
3604 
3605 	if (__should_serialize_io(inode, wbc)) {
3606 		mutex_lock(&sbi->writepages);
3607 		locked = true;
3608 	}
3609 
3610 	account_writeback(inode, true);
3611 
3612 	blk_start_plug(&plug);
3613 	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3614 	blk_finish_plug(&plug);
3615 
3616 	account_writeback(inode, false);
3617 
3618 	if (locked)
3619 		mutex_unlock(&sbi->writepages);
3620 
3621 	if (wbc->sync_mode == WB_SYNC_ALL)
3622 		atomic_dec(&sbi->wb_sync_req[DATA]);
3623 	/*
3624 	 * if some pages were truncated, we cannot guarantee its mapping->host
3625 	 * to detect pending bios.
3626 	 */
3627 
3628 	f2fs_remove_dirty_inode(inode);
3629 
3630 	/*
3631 	 * f2fs_write_cache_pages() has retry logic for EAGAIN case which is
3632 	 * common when racing w/ checkpoint, so only update skipped write
3633 	 * when ret is non-zero.
3634 	 */
3635 	if (ret)
3636 		update_skipped_write(sbi, wbc);
3637 	return ret;
3638 
3639 skip_write:
3640 	wbc->pages_skipped += get_dirty_pages(inode);
3641 	update_skipped_write(sbi, wbc);
3642 	trace_f2fs_writepages(mapping->host, wbc, DATA);
3643 	return 0;
3644 }
3645 
3646 static int f2fs_write_data_pages(struct address_space *mapping,
3647 			    struct writeback_control *wbc)
3648 {
3649 	struct inode *inode = mapping->host;
3650 
3651 	return __f2fs_write_data_pages(mapping, wbc,
3652 			F2FS_I(inode)->cp_task == current ?
3653 			FS_CP_DATA_IO : FS_DATA_IO);
3654 }
3655 
3656 void f2fs_write_failed(struct inode *inode, loff_t to)
3657 {
3658 	loff_t i_size = i_size_read(inode);
3659 
3660 	if (IS_NOQUOTA(inode))
3661 		return;
3662 
3663 	/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3664 	if (to > i_size && !f2fs_verity_in_progress(inode)) {
3665 		f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3666 		filemap_invalidate_lock(inode->i_mapping);
3667 
3668 		truncate_pagecache(inode, i_size);
3669 		f2fs_truncate_blocks(inode, i_size, true);
3670 
3671 		filemap_invalidate_unlock(inode->i_mapping);
3672 		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3673 	}
3674 }
3675 
3676 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3677 			struct folio *folio, loff_t pos, unsigned int len,
3678 			block_t *blk_addr, bool *node_changed)
3679 {
3680 	struct inode *inode = folio->mapping->host;
3681 	pgoff_t index = folio->index;
3682 	struct dnode_of_data dn;
3683 	struct f2fs_lock_context lc;
3684 	struct folio *ifolio;
3685 	bool locked = false;
3686 	int flag = F2FS_GET_BLOCK_PRE_AIO;
3687 	int err = 0;
3688 
3689 	/*
3690 	 * If a whole page is being written and we already preallocated all the
3691 	 * blocks, then there is no need to get a block address now.
3692 	 */
3693 	if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
3694 		return 0;
3695 
3696 	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
3697 	if (f2fs_has_inline_data(inode)) {
3698 		if (pos + len > MAX_INLINE_DATA(inode))
3699 			flag = F2FS_GET_BLOCK_DEFAULT;
3700 		f2fs_map_lock(sbi, &lc, flag);
3701 		locked = true;
3702 	} else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
3703 		f2fs_map_lock(sbi, &lc, flag);
3704 		locked = true;
3705 	}
3706 
3707 restart:
3708 	/* check inline_data */
3709 	ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
3710 	if (IS_ERR(ifolio)) {
3711 		err = PTR_ERR(ifolio);
3712 		goto unlock_out;
3713 	}
3714 
3715 	set_new_dnode(&dn, inode, ifolio, ifolio, 0);
3716 
3717 	if (f2fs_has_inline_data(inode)) {
3718 		if (pos + len <= MAX_INLINE_DATA(inode)) {
3719 			f2fs_do_read_inline_data(folio, ifolio);
3720 			set_inode_flag(inode, FI_DATA_EXIST);
3721 			if (inode->i_nlink)
3722 				folio_set_f2fs_inline(ifolio);
3723 			goto out;
3724 		}
3725 		err = f2fs_convert_inline_folio(&dn, folio);
3726 		if (err || dn.data_blkaddr != NULL_ADDR)
3727 			goto out;
3728 	}
3729 
3730 	if (!f2fs_lookup_read_extent_cache_block(inode, index,
3731 						 &dn.data_blkaddr)) {
3732 		if (IS_DEVICE_ALIASING(inode)) {
3733 			err = -ENODATA;
3734 			goto out;
3735 		}
3736 
3737 		if (locked) {
3738 			err = f2fs_reserve_block(&dn, index);
3739 			goto out;
3740 		}
3741 
3742 		/* hole case */
3743 		err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3744 		if (!err && dn.data_blkaddr != NULL_ADDR)
3745 			goto out;
3746 		f2fs_put_dnode(&dn);
3747 		f2fs_map_lock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
3748 		WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3749 		locked = true;
3750 		goto restart;
3751 	}
3752 out:
3753 	if (!err) {
3754 		/* convert_inline_page can make node_changed */
3755 		*blk_addr = dn.data_blkaddr;
3756 		*node_changed = dn.node_changed;
3757 	}
3758 	f2fs_put_dnode(&dn);
3759 unlock_out:
3760 	if (locked)
3761 		f2fs_map_unlock(sbi, &lc, flag);
3762 	return err;
3763 }
3764 
3765 static int __find_data_block(struct inode *inode, pgoff_t index,
3766 				block_t *blk_addr)
3767 {
3768 	struct dnode_of_data dn;
3769 	struct folio *ifolio;
3770 	int err = 0;
3771 
3772 	ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
3773 	if (IS_ERR(ifolio))
3774 		return PTR_ERR(ifolio);
3775 
3776 	set_new_dnode(&dn, inode, ifolio, ifolio, 0);
3777 
3778 	if (!f2fs_lookup_read_extent_cache_block(inode, index,
3779 						 &dn.data_blkaddr)) {
3780 		/* hole case */
3781 		err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3782 		if (err) {
3783 			dn.data_blkaddr = NULL_ADDR;
3784 			err = 0;
3785 		}
3786 	}
3787 	*blk_addr = dn.data_blkaddr;
3788 	f2fs_put_dnode(&dn);
3789 	return err;
3790 }
3791 
3792 static int __reserve_data_block(struct inode *inode, pgoff_t index,
3793 				block_t *blk_addr, bool *node_changed)
3794 {
3795 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3796 	struct dnode_of_data dn;
3797 	struct f2fs_lock_context lc;
3798 	struct folio *ifolio;
3799 	int err = 0;
3800 
3801 	f2fs_map_lock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
3802 
3803 	ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
3804 	if (IS_ERR(ifolio)) {
3805 		err = PTR_ERR(ifolio);
3806 		goto unlock_out;
3807 	}
3808 	set_new_dnode(&dn, inode, ifolio, ifolio, 0);
3809 
3810 	if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3811 						&dn.data_blkaddr))
3812 		err = f2fs_reserve_block(&dn, index);
3813 
3814 	*blk_addr = dn.data_blkaddr;
3815 	*node_changed = dn.node_changed;
3816 	f2fs_put_dnode(&dn);
3817 
3818 unlock_out:
3819 	f2fs_map_unlock(sbi, &lc, F2FS_GET_BLOCK_PRE_AIO);
3820 	return err;
3821 }
3822 
3823 static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
3824 			struct folio *folio, loff_t pos, unsigned int len,
3825 			block_t *blk_addr, bool *node_changed, bool *use_cow)
3826 {
3827 	struct inode *inode = folio->mapping->host;
3828 	struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3829 	pgoff_t index = folio->index;
3830 	int err = 0;
3831 	block_t ori_blk_addr = NULL_ADDR;
3832 
3833 	/* If pos is beyond the end of file, reserve a new block in COW inode */
3834 	if ((pos & PAGE_MASK) >= i_size_read(inode))
3835 		goto reserve_block;
3836 
3837 	/* Look for the block in COW inode first */
3838 	err = __find_data_block(cow_inode, index, blk_addr);
3839 	if (err) {
3840 		return err;
3841 	} else if (*blk_addr != NULL_ADDR) {
3842 		*use_cow = true;
3843 		return 0;
3844 	}
3845 
3846 	if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
3847 		goto reserve_block;
3848 
3849 	/* Look for the block in the original inode */
3850 	err = __find_data_block(inode, index, &ori_blk_addr);
3851 	if (err)
3852 		return err;
3853 
3854 reserve_block:
3855 	/* Finally, we should reserve a new block in COW inode for the update */
3856 	err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
3857 	if (err)
3858 		return err;
3859 	inc_atomic_write_cnt(inode);
3860 
3861 	if (ori_blk_addr != NULL_ADDR)
3862 		*blk_addr = ori_blk_addr;
3863 	return 0;
3864 }
3865 
3866 static int f2fs_write_begin(const struct kiocb *iocb,
3867 			    struct address_space *mapping,
3868 			    loff_t pos, unsigned len, struct folio **foliop,
3869 			    void **fsdata)
3870 {
3871 	struct inode *inode = mapping->host;
3872 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3873 	struct folio *folio;
3874 	pgoff_t index = pos >> PAGE_SHIFT;
3875 	bool need_balance = false;
3876 	bool use_cow = false;
3877 	block_t blkaddr = NULL_ADDR;
3878 	int err = 0;
3879 
3880 	trace_f2fs_write_begin(inode, pos, len);
3881 
3882 	if (!f2fs_is_checkpoint_ready(sbi)) {
3883 		err = -ENOSPC;
3884 		goto fail;
3885 	}
3886 
3887 	/*
3888 	 * We should check this at this moment to avoid deadlock on inode page
3889 	 * and #0 page. The locking rule for inline_data conversion should be:
3890 	 * folio_lock(folio #0) -> folio_lock(inode_page)
3891 	 */
3892 	if (index != 0) {
3893 		err = f2fs_convert_inline_inode(inode);
3894 		if (err)
3895 			goto fail;
3896 	}
3897 
3898 #ifdef CONFIG_F2FS_FS_COMPRESSION
3899 	if (f2fs_compressed_file(inode)) {
3900 		int ret;
3901 		struct page *page;
3902 
3903 		*fsdata = NULL;
3904 
3905 		if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3906 			goto repeat;
3907 
3908 		ret = f2fs_prepare_compress_overwrite(inode, &page,
3909 							index, fsdata);
3910 		if (ret < 0) {
3911 			err = ret;
3912 			goto fail;
3913 		} else if (ret) {
3914 			*foliop = page_folio(page);
3915 			return 0;
3916 		}
3917 	}
3918 #endif
3919 
3920 repeat:
3921 	/*
3922 	 * Do not use FGP_STABLE to avoid deadlock.
3923 	 * Will wait that below with our IO control.
3924 	 */
3925 	folio = f2fs_filemap_get_folio(mapping, index,
3926 				FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_NOFS,
3927 				mapping_gfp_mask(mapping));
3928 	if (IS_ERR(folio)) {
3929 		err = PTR_ERR(folio);
3930 		goto fail;
3931 	}
3932 
3933 	/* TODO: cluster can be compressed due to race with .writepage */
3934 
3935 	*foliop = folio;
3936 
3937 	if (f2fs_is_atomic_file(inode))
3938 		err = prepare_atomic_write_begin(sbi, folio, pos, len,
3939 					&blkaddr, &need_balance, &use_cow);
3940 	else
3941 		err = prepare_write_begin(sbi, folio, pos, len,
3942 					&blkaddr, &need_balance);
3943 	if (err)
3944 		goto put_folio;
3945 
3946 	if (need_balance && !IS_NOQUOTA(inode) &&
3947 			has_not_enough_free_secs(sbi, 0, 0)) {
3948 		folio_unlock(folio);
3949 		f2fs_balance_fs(sbi, true);
3950 		folio_lock(folio);
3951 		if (folio->mapping != mapping) {
3952 			/* The folio got truncated from under us */
3953 			folio_unlock(folio);
3954 			folio_put(folio);
3955 			goto repeat;
3956 		}
3957 	}
3958 
3959 	f2fs_folio_wait_writeback(folio, DATA, false, true);
3960 
3961 	if (len == folio_size(folio) || folio_test_uptodate(folio))
3962 		return 0;
3963 
3964 	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3965 	    !f2fs_verity_in_progress(inode)) {
3966 		folio_zero_segment(folio, len, folio_size(folio));
3967 		return 0;
3968 	}
3969 
3970 	if (blkaddr == NEW_ADDR) {
3971 		folio_zero_segment(folio, 0, folio_size(folio));
3972 		folio_mark_uptodate(folio);
3973 	} else {
3974 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3975 				DATA_GENERIC_ENHANCE_READ)) {
3976 			err = -EFSCORRUPTED;
3977 			goto put_folio;
3978 		}
3979 		f2fs_submit_page_read(use_cow ? F2FS_I(inode)->cow_inode :
3980 						inode,
3981 				      NULL, /* can't write to fsverity files */
3982 				      folio, blkaddr, 0, true);
3983 
3984 		folio_lock(folio);
3985 		if (unlikely(folio->mapping != mapping)) {
3986 			folio_unlock(folio);
3987 			folio_put(folio);
3988 			goto repeat;
3989 		}
3990 		if (unlikely(!folio_test_uptodate(folio))) {
3991 			err = -EIO;
3992 			goto put_folio;
3993 		}
3994 	}
3995 	return 0;
3996 
3997 put_folio:
3998 	f2fs_folio_put(folio, true);
3999 fail:
4000 	f2fs_write_failed(inode, pos + len);
4001 	return err;
4002 }
4003 
4004 static int f2fs_write_end(const struct kiocb *iocb,
4005 			struct address_space *mapping,
4006 			loff_t pos, unsigned len, unsigned copied,
4007 			struct folio *folio, void *fsdata)
4008 {
4009 	struct inode *inode = folio->mapping->host;
4010 
4011 	trace_f2fs_write_end(inode, pos, len, copied);
4012 
4013 	/*
4014 	 * This should be come from len == PAGE_SIZE, and we expect copied
4015 	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
4016 	 * let generic_perform_write() try to copy data again through copied=0.
4017 	 */
4018 	if (!folio_test_uptodate(folio)) {
4019 		if (unlikely(copied != len))
4020 			copied = 0;
4021 		else
4022 			folio_mark_uptodate(folio);
4023 	}
4024 
4025 #ifdef CONFIG_F2FS_FS_COMPRESSION
4026 	/* overwrite compressed file */
4027 	if (f2fs_compressed_file(inode) && fsdata) {
4028 		f2fs_compress_write_end(inode, fsdata, folio->index, copied);
4029 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4030 
4031 		if (pos + copied > i_size_read(inode) &&
4032 				!f2fs_verity_in_progress(inode))
4033 			f2fs_i_size_write(inode, pos + copied);
4034 		return copied;
4035 	}
4036 #endif
4037 
4038 	if (!copied)
4039 		goto unlock_out;
4040 
4041 	folio_mark_dirty(folio);
4042 
4043 	if (f2fs_is_atomic_file(inode))
4044 		folio_set_f2fs_atomic(folio);
4045 
4046 	if (pos + copied > i_size_read(inode) &&
4047 	    !f2fs_verity_in_progress(inode)) {
4048 		f2fs_i_size_write(inode, pos + copied);
4049 		if (f2fs_is_atomic_file(inode))
4050 			f2fs_i_size_write(F2FS_I(inode)->cow_inode,
4051 					pos + copied);
4052 	}
4053 unlock_out:
4054 	f2fs_folio_put(folio, true);
4055 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4056 	return copied;
4057 }
4058 
4059 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
4060 {
4061 	struct inode *inode = folio->mapping->host;
4062 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4063 
4064 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
4065 				(offset || length != folio_size(folio)))
4066 		return;
4067 
4068 	if (folio_test_dirty(folio)) {
4069 		if (inode->i_ino == F2FS_META_INO(sbi)) {
4070 			dec_page_count(sbi, F2FS_DIRTY_META);
4071 		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
4072 			dec_page_count(sbi, F2FS_DIRTY_NODES);
4073 		} else {
4074 			inode_dec_dirty_pages(inode);
4075 			f2fs_remove_dirty_inode(inode);
4076 		}
4077 	}
4078 
4079 	if (offset || length != folio_size(folio))
4080 		return;
4081 
4082 	folio_cancel_dirty(folio);
4083 	ffs_detach_free(folio);
4084 }
4085 
4086 bool f2fs_release_folio(struct folio *folio, gfp_t wait)
4087 {
4088 	/* If this is dirty folio, keep private data */
4089 	if (folio_test_dirty(folio))
4090 		return false;
4091 
4092 	ffs_detach_free(folio);
4093 	return true;
4094 }
4095 
4096 static bool f2fs_dirty_data_folio(struct address_space *mapping,
4097 		struct folio *folio)
4098 {
4099 	struct inode *inode = mapping->host;
4100 
4101 	trace_f2fs_set_page_dirty(folio, DATA);
4102 
4103 	if (!folio_test_uptodate(folio))
4104 		folio_mark_uptodate(folio);
4105 	BUG_ON(folio_test_swapcache(folio));
4106 
4107 	if (filemap_dirty_folio(mapping, folio)) {
4108 		f2fs_update_dirty_folio(inode, folio);
4109 		return true;
4110 	}
4111 	return false;
4112 }
4113 
4114 
4115 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
4116 {
4117 #ifdef CONFIG_F2FS_FS_COMPRESSION
4118 	struct dnode_of_data dn;
4119 	sector_t start_idx, blknr = 0;
4120 	int ret;
4121 
4122 	start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
4123 
4124 	set_new_dnode(&dn, inode, NULL, NULL, 0);
4125 	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
4126 	if (ret)
4127 		return 0;
4128 
4129 	if (dn.data_blkaddr != COMPRESS_ADDR) {
4130 		dn.ofs_in_node += block - start_idx;
4131 		blknr = f2fs_data_blkaddr(&dn);
4132 		if (!__is_valid_data_blkaddr(blknr))
4133 			blknr = 0;
4134 	}
4135 
4136 	f2fs_put_dnode(&dn);
4137 	return blknr;
4138 #else
4139 	return 0;
4140 #endif
4141 }
4142 
4143 
4144 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
4145 {
4146 	struct inode *inode = mapping->host;
4147 	sector_t blknr = 0;
4148 
4149 	if (f2fs_has_inline_data(inode))
4150 		goto out;
4151 
4152 	/* make sure allocating whole blocks */
4153 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
4154 		filemap_write_and_wait(mapping);
4155 
4156 	/* Block number less than F2FS MAX BLOCKS */
4157 	if (unlikely(block >= max_file_blocks(inode)))
4158 		goto out;
4159 
4160 	if (f2fs_compressed_file(inode)) {
4161 		blknr = f2fs_bmap_compress(inode, block);
4162 	} else {
4163 		struct f2fs_map_blocks map;
4164 
4165 		memset(&map, 0, sizeof(map));
4166 		map.m_lblk = block;
4167 		map.m_len = 1;
4168 		map.m_next_pgofs = NULL;
4169 		map.m_seg_type = NO_CHECK_TYPE;
4170 
4171 		if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
4172 			blknr = map.m_pblk;
4173 	}
4174 out:
4175 	trace_f2fs_bmap(inode, block, blknr);
4176 	return blknr;
4177 }
4178 
4179 #ifdef CONFIG_SWAP
4180 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
4181 							unsigned int blkcnt)
4182 {
4183 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4184 	unsigned int blkofs;
4185 	unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
4186 	unsigned int end_blk = start_blk + blkcnt - 1;
4187 	unsigned int secidx = start_blk / blk_per_sec;
4188 	unsigned int end_sec;
4189 	int ret = 0;
4190 
4191 	if (!blkcnt)
4192 		return 0;
4193 	end_sec = end_blk / blk_per_sec;
4194 
4195 	f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4196 	filemap_invalidate_lock(inode->i_mapping);
4197 
4198 	set_inode_flag(inode, FI_ALIGNED_WRITE);
4199 	set_inode_flag(inode, FI_OPU_WRITE);
4200 
4201 	for (; secidx <= end_sec; secidx++) {
4202 		unsigned int blkofs_end = secidx == end_sec ?
4203 				end_blk % blk_per_sec : blk_per_sec - 1;
4204 
4205 		f2fs_down_write(&sbi->pin_sem);
4206 
4207 		ret = f2fs_allocate_pinning_section(sbi);
4208 		if (ret) {
4209 			f2fs_up_write(&sbi->pin_sem);
4210 			break;
4211 		}
4212 
4213 		set_inode_flag(inode, FI_SKIP_WRITES);
4214 
4215 		for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
4216 			struct folio *folio;
4217 			unsigned int blkidx = secidx * blk_per_sec + blkofs;
4218 
4219 			folio = f2fs_get_lock_data_folio(inode, blkidx, true);
4220 			if (IS_ERR(folio)) {
4221 				f2fs_up_write(&sbi->pin_sem);
4222 				ret = PTR_ERR(folio);
4223 				goto done;
4224 			}
4225 
4226 			folio_mark_dirty(folio);
4227 			f2fs_folio_put(folio, true);
4228 		}
4229 
4230 		clear_inode_flag(inode, FI_SKIP_WRITES);
4231 
4232 		ret = filemap_fdatawrite(inode->i_mapping);
4233 
4234 		f2fs_up_write(&sbi->pin_sem);
4235 
4236 		if (ret)
4237 			break;
4238 	}
4239 
4240 done:
4241 	clear_inode_flag(inode, FI_SKIP_WRITES);
4242 	clear_inode_flag(inode, FI_OPU_WRITE);
4243 	clear_inode_flag(inode, FI_ALIGNED_WRITE);
4244 
4245 	filemap_invalidate_unlock(inode->i_mapping);
4246 	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4247 
4248 	return ret;
4249 }
4250 
4251 static int check_swap_activate(struct swap_info_struct *sis,
4252 				struct file *swap_file, sector_t *span)
4253 {
4254 	struct address_space *mapping = swap_file->f_mapping;
4255 	struct inode *inode = mapping->host;
4256 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4257 	block_t cur_lblock;
4258 	block_t last_lblock;
4259 	block_t pblock;
4260 	block_t lowest_pblock = -1;
4261 	block_t highest_pblock = 0;
4262 	int nr_extents = 0;
4263 	unsigned int nr_pblocks;
4264 	unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
4265 	unsigned int not_aligned = 0;
4266 	int ret = 0;
4267 
4268 	/*
4269 	 * Map all the blocks into the extent list.  This code doesn't try
4270 	 * to be very smart.
4271 	 */
4272 	cur_lblock = 0;
4273 	last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode));
4274 
4275 	while (cur_lblock < last_lblock && cur_lblock < sis->max) {
4276 		struct f2fs_map_blocks map;
4277 		bool last_extent = false;
4278 retry:
4279 		cond_resched();
4280 
4281 		memset(&map, 0, sizeof(map));
4282 		map.m_lblk = cur_lblock;
4283 		map.m_len = last_lblock - cur_lblock;
4284 		map.m_next_pgofs = NULL;
4285 		map.m_next_extent = NULL;
4286 		map.m_seg_type = NO_CHECK_TYPE;
4287 		map.m_may_create = false;
4288 
4289 		ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
4290 		if (ret)
4291 			goto out;
4292 
4293 		/* hole */
4294 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
4295 			f2fs_err(sbi, "Swapfile has holes");
4296 			ret = -EINVAL;
4297 			goto out;
4298 		}
4299 
4300 		pblock = map.m_pblk;
4301 		nr_pblocks = map.m_len;
4302 
4303 		if (!last_extent &&
4304 			((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
4305 			nr_pblocks % blks_per_sec ||
4306 			f2fs_is_sequential_zone_area(sbi, pblock))) {
4307 			not_aligned++;
4308 
4309 			nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4310 			if (cur_lblock + nr_pblocks > sis->max)
4311 				nr_pblocks -= blks_per_sec;
4312 
4313 			/* this extent is last one */
4314 			if (!nr_pblocks) {
4315 				nr_pblocks = last_lblock - cur_lblock;
4316 				last_extent = true;
4317 			}
4318 
4319 			ret = f2fs_migrate_blocks(inode, cur_lblock,
4320 							nr_pblocks);
4321 			if (ret) {
4322 				if (ret == -ENOENT)
4323 					ret = -EINVAL;
4324 				goto out;
4325 			}
4326 
4327 			/* lookup block mapping info after block migration */
4328 			goto retry;
4329 		}
4330 
4331 		if (cur_lblock + nr_pblocks >= sis->max)
4332 			nr_pblocks = sis->max - cur_lblock;
4333 
4334 		if (cur_lblock) {	/* exclude the header page */
4335 			if (pblock < lowest_pblock)
4336 				lowest_pblock = pblock;
4337 			if (pblock + nr_pblocks - 1 > highest_pblock)
4338 				highest_pblock = pblock + nr_pblocks - 1;
4339 		}
4340 
4341 		/*
4342 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4343 		 */
4344 		ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4345 		if (ret < 0)
4346 			goto out;
4347 		nr_extents += ret;
4348 		cur_lblock += nr_pblocks;
4349 	}
4350 	ret = nr_extents;
4351 	*span = 1 + highest_pblock - lowest_pblock;
4352 	if (cur_lblock == 0)
4353 		cur_lblock = 1;	/* force Empty message */
4354 	sis->max = cur_lblock;
4355 	sis->pages = cur_lblock - 1;
4356 out:
4357 	if (not_aligned)
4358 		f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
4359 			  not_aligned, blks_per_sec * F2FS_BLKSIZE);
4360 	return ret;
4361 }
4362 
4363 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4364 				sector_t *span)
4365 {
4366 	struct inode *inode = file_inode(file);
4367 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4368 	int ret;
4369 
4370 	if (!S_ISREG(inode->i_mode))
4371 		return -EINVAL;
4372 
4373 	if (f2fs_readonly(sbi->sb))
4374 		return -EROFS;
4375 
4376 	if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
4377 		f2fs_err(sbi, "Swapfile not supported in LFS mode");
4378 		return -EINVAL;
4379 	}
4380 
4381 	ret = f2fs_convert_inline_inode(inode);
4382 	if (ret)
4383 		return ret;
4384 
4385 	if (!f2fs_disable_compressed_file(inode))
4386 		return -EINVAL;
4387 
4388 	ret = filemap_fdatawrite(inode->i_mapping);
4389 	if (ret < 0)
4390 		return ret;
4391 
4392 	f2fs_precache_extents(inode);
4393 
4394 	ret = check_swap_activate(sis, file, span);
4395 	if (ret < 0)
4396 		return ret;
4397 
4398 	stat_inc_swapfile_inode(inode);
4399 	set_inode_flag(inode, FI_PIN_FILE);
4400 	f2fs_update_time(sbi, REQ_TIME);
4401 	return ret;
4402 }
4403 
4404 static void f2fs_swap_deactivate(struct file *file)
4405 {
4406 	struct inode *inode = file_inode(file);
4407 
4408 	stat_dec_swapfile_inode(inode);
4409 	clear_inode_flag(inode, FI_PIN_FILE);
4410 }
4411 #else
4412 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4413 				sector_t *span)
4414 {
4415 	return -EOPNOTSUPP;
4416 }
4417 
4418 static void f2fs_swap_deactivate(struct file *file)
4419 {
4420 }
4421 #endif
4422 
4423 const struct address_space_operations f2fs_dblock_aops = {
4424 	.read_folio	= f2fs_read_data_folio,
4425 	.readahead	= f2fs_readahead,
4426 	.writepages	= f2fs_write_data_pages,
4427 	.write_begin	= f2fs_write_begin,
4428 	.write_end	= f2fs_write_end,
4429 	.dirty_folio	= f2fs_dirty_data_folio,
4430 	.migrate_folio	= filemap_migrate_folio,
4431 	.invalidate_folio = f2fs_invalidate_folio,
4432 	.release_folio	= f2fs_release_folio,
4433 	.bmap		= f2fs_bmap,
4434 	.swap_activate  = f2fs_swap_activate,
4435 	.swap_deactivate = f2fs_swap_deactivate,
4436 };
4437 
4438 void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
4439 {
4440 	struct address_space *mapping = folio->mapping;
4441 	unsigned long flags;
4442 
4443 	xa_lock_irqsave(&mapping->i_pages, flags);
4444 	__xa_clear_mark(&mapping->i_pages, folio->index,
4445 						PAGECACHE_TAG_DIRTY);
4446 	xa_unlock_irqrestore(&mapping->i_pages, flags);
4447 }
4448 
4449 int __init f2fs_init_post_read_processing(void)
4450 {
4451 	bio_post_read_ctx_cache =
4452 		kmem_cache_create("f2fs_bio_post_read_ctx",
4453 				  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4454 	if (!bio_post_read_ctx_cache)
4455 		goto fail;
4456 	bio_post_read_ctx_pool =
4457 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4458 					 bio_post_read_ctx_cache);
4459 	if (!bio_post_read_ctx_pool)
4460 		goto fail_free_cache;
4461 	return 0;
4462 
4463 fail_free_cache:
4464 	kmem_cache_destroy(bio_post_read_ctx_cache);
4465 fail:
4466 	return -ENOMEM;
4467 }
4468 
4469 void f2fs_destroy_post_read_processing(void)
4470 {
4471 	mempool_destroy(bio_post_read_ctx_pool);
4472 	kmem_cache_destroy(bio_post_read_ctx_cache);
4473 }
4474 
4475 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4476 {
4477 	if (!f2fs_sb_has_encrypt(sbi) &&
4478 		!f2fs_sb_has_verity(sbi) &&
4479 		!f2fs_sb_has_compression(sbi))
4480 		return 0;
4481 
4482 	sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4483 						 WQ_UNBOUND | WQ_HIGHPRI,
4484 						 num_online_cpus());
4485 	return sbi->post_read_wq ? 0 : -ENOMEM;
4486 }
4487 
4488 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4489 {
4490 	if (sbi->post_read_wq)
4491 		destroy_workqueue(sbi->post_read_wq);
4492 }
4493 
4494 int __init f2fs_init_bio_entry_cache(void)
4495 {
4496 	bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4497 			sizeof(struct bio_entry));
4498 
4499 	if (!bio_entry_slab)
4500 		return -ENOMEM;
4501 
4502 	ffs_entry_slab = f2fs_kmem_cache_create("f2fs_ffs_slab",
4503 			sizeof(struct f2fs_folio_state));
4504 
4505 	if (!ffs_entry_slab) {
4506 		kmem_cache_destroy(bio_entry_slab);
4507 		return -ENOMEM;
4508 	}
4509 
4510 	return 0;
4511 }
4512 
4513 void f2fs_destroy_bio_entry_cache(void)
4514 {
4515 	kmem_cache_destroy(bio_entry_slab);
4516 	kmem_cache_destroy(ffs_entry_slab);
4517 }
4518 
4519 static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4520 			    unsigned int flags, struct iomap *iomap,
4521 			    struct iomap *srcmap)
4522 {
4523 	struct f2fs_map_blocks map = { NULL, };
4524 	pgoff_t next_pgofs = 0;
4525 	int err;
4526 
4527 	map.m_lblk = F2FS_BYTES_TO_BLK(offset);
4528 	map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1;
4529 	map.m_next_pgofs = &next_pgofs;
4530 	map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
4531 						inode->i_write_hint);
4532 	if (flags & IOMAP_WRITE && iomap->private) {
4533 		map.m_last_pblk = (unsigned long)iomap->private;
4534 		iomap->private = NULL;
4535 	}
4536 
4537 	/*
4538 	 * If the blocks being overwritten are already allocated,
4539 	 * f2fs_map_lock and f2fs_balance_fs are not necessary.
4540 	 */
4541 	if ((flags & IOMAP_WRITE) &&
4542 		!__f2fs_overwrite_io(inode, offset, length, true))
4543 		map.m_may_create = true;
4544 
4545 	err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
4546 	if (err)
4547 		return err;
4548 
4549 	iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk);
4550 
4551 	/*
4552 	 * When inline encryption is enabled, sometimes I/O to an encrypted file
4553 	 * has to be broken up to guarantee DUN contiguity.  Handle this by
4554 	 * limiting the length of the mapping returned.
4555 	 */
4556 	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4557 
4558 	/*
4559 	 * We should never see delalloc or compressed extents here based on
4560 	 * prior flushing and checks.
4561 	 */
4562 	if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
4563 		return -EINVAL;
4564 
4565 	if (map.m_flags & F2FS_MAP_MAPPED) {
4566 		if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
4567 			return -EINVAL;
4568 
4569 		iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
4570 		iomap->type = IOMAP_MAPPED;
4571 		iomap->flags |= IOMAP_F_MERGED;
4572 		iomap->bdev = map.m_bdev;
4573 		iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
4574 
4575 		if (flags & IOMAP_WRITE && map.m_last_pblk)
4576 			iomap->private = (void *)map.m_last_pblk;
4577 	} else {
4578 		if (flags & IOMAP_WRITE)
4579 			return -ENOTBLK;
4580 
4581 		if (map.m_pblk == NULL_ADDR) {
4582 			iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) -
4583 							iomap->offset;
4584 			iomap->type = IOMAP_HOLE;
4585 		} else if (map.m_pblk == NEW_ADDR) {
4586 			iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
4587 			iomap->type = IOMAP_UNWRITTEN;
4588 		} else {
4589 			f2fs_bug_on(F2FS_I_SB(inode), 1);
4590 		}
4591 		iomap->addr = IOMAP_NULL_ADDR;
4592 	}
4593 
4594 	if (map.m_flags & F2FS_MAP_NEW)
4595 		iomap->flags |= IOMAP_F_NEW;
4596 	if ((inode_state_read_once(inode) & I_DIRTY_DATASYNC) ||
4597 	    offset + length > i_size_read(inode))
4598 		iomap->flags |= IOMAP_F_DIRTY;
4599 
4600 	return 0;
4601 }
4602 
4603 const struct iomap_ops f2fs_iomap_ops = {
4604 	.iomap_begin	= f2fs_iomap_begin,
4605 };
4606