Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0
3 * fs/f2fs/data.c
16 #include <linux/blk-crypto.h>
52 struct address_space *mapping = page_folio(page)->mapping; in f2fs_is_cp_guaranteed() local
59 inode = mapping->host; in f2fs_is_cp_guaranteed()
62 if (inode->i_ino == F2FS_META_INO(sbi) || in f2fs_is_cp_guaranteed()
63 inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_is_cp_guaranteed()
64 S_ISDIR(inode->i_mode)) in f2fs_is_cp_guaranteed()
67 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) || in f2fs_is_cp_guaranteed()
75 struct address_space *mapping = folio->mapping; in __read_io_type() local
77 if (mapping) { in __read_io_type()
78 struct inode *inode = mapping->host; in __read_io_type()
81 if (inode->i_ino == F2FS_META_INO(sbi)) in __read_io_type()
84 if (inode->i_ino == F2FS_NODE_INO(sbi)) in __read_io_type()
95 STEP_DECRYPT = 0, /* compile out the decryption-related code */
100 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
105 STEP_VERITY = 0, /* compile out the verity-related code */
126 * This marks pages up-to-date only if there was no error in the bio (I/O error,
127 * decryption error, or verity error), as indicated by bio->bi_status.
129 * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
130 * aren't marked up-to-date here, as decompression is done on a per-compression-
131 * cluster basis rather than a per-bio basis. Instead, we only must do two
140 struct bio_post_read_ctx *ctx = bio->bi_private; in f2fs_finish_read_bio()
145 if (f2fs_is_compressed_page(&folio->page)) { in f2fs_finish_read_bio()
146 if (ctx && !ctx->decompression_attempted) in f2fs_finish_read_bio()
147 f2fs_end_read_compressed_page(&folio->page, true, 0, in f2fs_finish_read_bio()
154 folio_end_read(folio, bio->bi_status == BLK_STS_OK); in f2fs_finish_read_bio()
166 struct bio *bio = ctx->bio; in f2fs_verify_bio()
167 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS); in f2fs_verify_bio()
174 * mempool first. This assumes that verity is the last post-read step. in f2fs_verify_bio()
177 bio->bi_private = NULL; in f2fs_verify_bio()
180 * Verify the bio's pages with fs-verity. Exclude compressed pages, in f2fs_verify_bio()
188 struct page *page = bv->bv_page; in f2fs_verify_bio()
192 bio->bi_status = BLK_STS_IOERR; in f2fs_verify_bio()
204 * If the bio's data needs to be verified with fs-verity, then enqueue the
208 * decryption/decompression workqueue. This is because verifying the data pages
214 struct bio_post_read_ctx *ctx = bio->bi_private; in f2fs_verify_and_finish_bio()
216 if (ctx && (ctx->enabled_steps & STEP_VERITY)) { in f2fs_verify_and_finish_bio()
217 INIT_WORK(&ctx->work, f2fs_verify_bio); in f2fs_verify_and_finish_bio()
218 fsverity_enqueue_verify_work(&ctx->work); in f2fs_verify_and_finish_bio()
226 * remaining page was read by @ctx->bio.
231 * is done on a per-cluster basis, not a per-bio basis.
239 block_t blkaddr = ctx->fs_blkaddr; in f2fs_handle_step_decompress()
241 bio_for_each_segment_all(bv, ctx->bio, iter_all) { in f2fs_handle_step_decompress()
242 struct page *page = bv->bv_page; in f2fs_handle_step_decompress()
253 ctx->decompression_attempted = true; in f2fs_handle_step_decompress()
257 * the per-bio verity work is unnecessary, as verity will be fully in f2fs_handle_step_decompress()
261 ctx->enabled_steps &= ~STEP_VERITY; in f2fs_handle_step_decompress()
268 struct bio *bio = ctx->bio; in f2fs_post_read_work()
270 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) { in f2fs_post_read_work()
275 if (ctx->enabled_steps & STEP_DECOMPRESS) in f2fs_post_read_work()
288 ctx = bio->bi_private; in f2fs_read_end_io()
291 bio->bi_status = BLK_STS_IOERR; in f2fs_read_end_io()
293 if (bio->bi_status != BLK_STS_OK) { in f2fs_read_end_io()
299 unsigned int enabled_steps = ctx->enabled_steps & in f2fs_read_end_io()
310 INIT_WORK(&ctx->work, f2fs_post_read_work); in f2fs_read_end_io()
311 queue_work(ctx->sbi->post_read_wq, &ctx->work); in f2fs_read_end_io()
325 sbi = bio->bi_private; in f2fs_write_end_io()
328 bio->bi_status = BLK_STS_IOERR; in f2fs_write_end_io()
338 fscrypt_free_bounce_page(&io_folio->page); in f2fs_write_end_io()
342 if (f2fs_is_compressed_page(&folio->page)) { in f2fs_write_end_io()
343 f2fs_compress_write_end_io(bio, &folio->page); in f2fs_write_end_io()
348 type = WB_DATA_TYPE(&folio->page, false); in f2fs_write_end_io()
350 if (unlikely(bio->bi_status != BLK_STS_OK)) { in f2fs_write_end_io()
351 mapping_set_error(folio->mapping, -EIO); in f2fs_write_end_io()
358 folio->index != nid_of_node(&folio->page)); in f2fs_write_end_io()
363 clear_page_private_gcing(&folio->page); in f2fs_write_end_io()
367 wq_has_sleeper(&sbi->cp_wait)) in f2fs_write_end_io()
368 wake_up(&sbi->cp_wait); in f2fs_write_end_io()
376 struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private; in f2fs_zone_write_end_io()
378 bio->bi_private = io->bi_private; in f2fs_zone_write_end_io()
379 complete(&io->zone_wait); in f2fs_zone_write_end_io()
387 struct block_device *bdev = sbi->sb->s_bdev; in f2fs_target_device()
391 for (i = 0; i < sbi->s_ndevs; i++) { in f2fs_target_device()
394 blk_addr -= FDEV(i).start_blk; in f2fs_target_device()
413 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_target_device_index()
421 unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0); in f2fs_io_flags()
422 struct folio *fio_folio = page_folio(fio->page); in f2fs_io_flags()
426 if (fio->op != REQ_OP_WRITE) in f2fs_io_flags()
428 if (fio->type == DATA) in f2fs_io_flags()
429 io_flag = fio->sbi->data_io_flag; in f2fs_io_flags()
430 else if (fio->type == NODE) in f2fs_io_flags()
431 io_flag = fio->sbi->node_io_flag; in f2fs_io_flags()
439 * data/node io flag bits per temp: in f2fs_io_flags()
444 if (BIT(fio->temp) & meta_flag) in f2fs_io_flags()
446 if (BIT(fio->temp) & fua_flag) in f2fs_io_flags()
449 if (fio->type == DATA && in f2fs_io_flags()
450 F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE) in f2fs_io_flags()
458 struct f2fs_sb_info *sbi = fio->sbi; in __bio_alloc()
463 bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or); in __bio_alloc()
465 fio->op | fio->op_flags | f2fs_io_flags(fio), in __bio_alloc()
467 bio->bi_iter.bi_sector = sector; in __bio_alloc()
468 if (is_read_io(fio->op)) { in __bio_alloc()
469 bio->bi_end_io = f2fs_read_end_io; in __bio_alloc()
470 bio->bi_private = NULL; in __bio_alloc()
472 bio->bi_end_io = f2fs_write_end_io; in __bio_alloc()
473 bio->bi_private = sbi; in __bio_alloc()
474 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, in __bio_alloc()
475 fio->type, fio->temp); in __bio_alloc()
479 if (fio->io_wbc) in __bio_alloc()
480 wbc_init_bio(fio->io_wbc, bio); in __bio_alloc()
491 * The f2fs garbage collector sets ->encrypted_page when it wants to in f2fs_set_bio_crypt_ctx()
492 * read/write raw data without encryption. in f2fs_set_bio_crypt_ctx()
494 if (!fio || !fio->encrypted_page) in f2fs_set_bio_crypt_ctx()
503 * The f2fs garbage collector sets ->encrypted_page when it wants to in f2fs_crypt_mergeable_bio()
504 * read/write raw data without encryption. in f2fs_crypt_mergeable_bio()
506 if (fio && fio->encrypted_page) in f2fs_crypt_mergeable_bio()
516 trace_f2fs_submit_read_bio(sbi->sb, type, bio); in f2fs_submit_read_bio()
526 trace_f2fs_submit_write_bio(sbi->sb, type, bio); in f2fs_submit_write_bio()
533 struct f2fs_io_info *fio = &io->fio; in __submit_merged_bio()
535 if (!io->bio) in __submit_merged_bio()
538 if (is_read_io(fio->op)) { in __submit_merged_bio()
539 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
540 f2fs_submit_read_bio(io->sbi, io->bio, fio->type); in __submit_merged_bio()
542 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
543 f2fs_submit_write_bio(io->sbi, io->bio, fio->type); in __submit_merged_bio()
545 io->bio = NULL; in __submit_merged_bio()
567 if (f2fs_is_compressed_page(&target->page)) { in __has_merged_page()
573 if (inode && inode == target->mapping->host) in __has_merged_page()
575 if (page && page == &target->page) in __has_merged_page()
577 if (ino && ino == ino_of_node(&target->page)) in __has_merged_page()
592 sbi->write_io[i] = f2fs_kmalloc(sbi, in f2fs_init_write_merge_io()
595 if (!sbi->write_io[i]) in f2fs_init_write_merge_io()
596 return -ENOMEM; in f2fs_init_write_merge_io()
599 struct f2fs_bio_info *io = &sbi->write_io[i][j]; in f2fs_init_write_merge_io()
601 init_f2fs_rwsem(&io->io_rwsem); in f2fs_init_write_merge_io()
602 io->sbi = sbi; in f2fs_init_write_merge_io()
603 io->bio = NULL; in f2fs_init_write_merge_io()
604 io->last_block_in_bio = 0; in f2fs_init_write_merge_io()
605 spin_lock_init(&io->io_lock); in f2fs_init_write_merge_io()
606 INIT_LIST_HEAD(&io->io_list); in f2fs_init_write_merge_io()
607 INIT_LIST_HEAD(&io->bio_list); in f2fs_init_write_merge_io()
608 init_f2fs_rwsem(&io->bio_list_lock); in f2fs_init_write_merge_io()
610 init_completion(&io->zone_wait); in f2fs_init_write_merge_io()
611 io->zone_pending_bio = NULL; in f2fs_init_write_merge_io()
612 io->bi_private = NULL; in f2fs_init_write_merge_io()
624 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; in __f2fs_submit_merged_write()
626 f2fs_down_write(&io->io_rwsem); in __f2fs_submit_merged_write()
628 if (!io->bio) in __f2fs_submit_merged_write()
633 io->fio.type = META_FLUSH; in __f2fs_submit_merged_write()
634 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC; in __f2fs_submit_merged_write()
636 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA; in __f2fs_submit_merged_write()
640 f2fs_up_write(&io->io_rwsem); in __f2fs_submit_merged_write()
653 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; in __submit_merged_write_cond()
655 f2fs_down_read(&io->io_rwsem); in __submit_merged_write_cond()
656 ret = __has_merged_page(io->bio, inode, page, ino); in __submit_merged_write_cond()
657 f2fs_up_read(&io->io_rwsem); in __submit_merged_write_cond()
682 f2fs_submit_merged_write(sbi, DATA); in f2fs_flush_merged_writes()
688 * Fill the locked page with data located in the block address.
694 struct folio *fio_folio = page_folio(fio->page); in f2fs_submit_page_bio()
695 struct folio *data_folio = fio->encrypted_page ? in f2fs_submit_page_bio()
696 page_folio(fio->encrypted_page) : fio_folio; in f2fs_submit_page_bio()
698 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_submit_page_bio()
699 fio->is_por ? META_POR : (__is_meta_io(fio) ? in f2fs_submit_page_bio()
701 return -EFSCORRUPTED; in f2fs_submit_page_bio()
708 f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host, in f2fs_submit_page_bio()
709 fio_folio->index, fio, GFP_NOIO); in f2fs_submit_page_bio()
712 if (fio->io_wbc && !is_read_io(fio->op)) in f2fs_submit_page_bio()
713 wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE); in f2fs_submit_page_bio()
715 inc_page_count(fio->sbi, is_read_io(fio->op) ? in f2fs_submit_page_bio()
716 __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false)); in f2fs_submit_page_bio()
719 f2fs_submit_read_bio(fio->sbi, bio, fio->type); in f2fs_submit_page_bio()
721 f2fs_submit_write_bio(fio->sbi, bio, fio->type); in f2fs_submit_page_bio()
728 if (unlikely(sbi->max_io_bytes && in page_is_mergeable()
729 bio->bi_iter.bi_size >= sbi->max_io_bytes)) in page_is_mergeable()
733 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL); in page_is_mergeable()
739 if (io->fio.op != fio->op) in io_type_is_mergeable()
741 return io->fio.op_flags == fio->op_flags; in io_type_is_mergeable()
758 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; in add_bio_entry()
762 be->bio = bio; in add_bio_entry()
768 f2fs_down_write(&io->bio_list_lock); in add_bio_entry()
769 list_add_tail(&be->list, &io->bio_list); in add_bio_entry()
770 f2fs_up_write(&io->bio_list_lock); in add_bio_entry()
775 list_del(&be->list); in del_bio_entry()
782 struct folio *fio_folio = page_folio(fio->page); in add_ipu_page()
783 struct f2fs_sb_info *sbi = fio->sbi; in add_ipu_page()
786 int ret = -EAGAIN; in add_ipu_page()
789 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; in add_ipu_page()
790 struct list_head *head = &io->bio_list; in add_ipu_page()
793 f2fs_down_write(&io->bio_list_lock); in add_ipu_page()
795 if (be->bio != *bio) in add_ipu_page()
801 *fio->last_block, in add_ipu_page()
802 fio->new_blkaddr)); in add_ipu_page()
804 fio_folio->mapping->host, in add_ipu_page()
805 fio_folio->index, fio) && in add_ipu_page()
814 f2fs_submit_write_bio(sbi, *bio, DATA); in add_ipu_page()
817 f2fs_up_write(&io->bio_list_lock); in add_ipu_page()
838 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; in f2fs_submit_merged_ipu_write()
839 struct list_head *head = &io->bio_list; in f2fs_submit_merged_ipu_write()
845 f2fs_down_read(&io->bio_list_lock); in f2fs_submit_merged_ipu_write()
848 found = (target == be->bio); in f2fs_submit_merged_ipu_write()
850 found = __has_merged_page(be->bio, NULL, in f2fs_submit_merged_ipu_write()
851 &folio->page, 0); in f2fs_submit_merged_ipu_write()
855 f2fs_up_read(&io->bio_list_lock); in f2fs_submit_merged_ipu_write()
862 f2fs_down_write(&io->bio_list_lock); in f2fs_submit_merged_ipu_write()
865 found = (target == be->bio); in f2fs_submit_merged_ipu_write()
867 found = __has_merged_page(be->bio, NULL, in f2fs_submit_merged_ipu_write()
868 &folio->page, 0); in f2fs_submit_merged_ipu_write()
870 target = be->bio; in f2fs_submit_merged_ipu_write()
875 f2fs_up_write(&io->bio_list_lock); in f2fs_submit_merged_ipu_write()
879 f2fs_submit_write_bio(sbi, target, DATA); in f2fs_submit_merged_ipu_write()
888 struct bio *bio = *fio->bio; in f2fs_merge_page_bio()
889 struct page *page = fio->encrypted_page ? in f2fs_merge_page_bio()
890 fio->encrypted_page : fio->page; in f2fs_merge_page_bio()
891 struct folio *folio = page_folio(fio->page); in f2fs_merge_page_bio()
893 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_merge_page_bio()
895 return -EFSCORRUPTED; in f2fs_merge_page_bio()
899 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, in f2fs_merge_page_bio()
900 fio->new_blkaddr)) in f2fs_merge_page_bio()
901 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); in f2fs_merge_page_bio()
905 f2fs_set_bio_crypt_ctx(bio, folio->mapping->host, in f2fs_merge_page_bio()
906 folio->index, fio, GFP_NOIO); in f2fs_merge_page_bio()
908 add_bio_entry(fio->sbi, bio, page, fio->temp); in f2fs_merge_page_bio()
914 if (fio->io_wbc) in f2fs_merge_page_bio()
915 wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio)); in f2fs_merge_page_bio()
917 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false)); in f2fs_merge_page_bio()
919 *fio->last_block = fio->new_blkaddr; in f2fs_merge_page_bio()
920 *fio->bio = bio; in f2fs_merge_page_bio()
928 struct block_device *bdev = sbi->sb->s_bdev; in is_end_zone_blkaddr()
938 blkaddr -= FDEV(devi).start_blk; in is_end_zone_blkaddr()
943 (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1); in is_end_zone_blkaddr()
949 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_submit_page_write()
950 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); in f2fs_submit_page_write()
951 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; in f2fs_submit_page_write()
955 f2fs_bug_on(sbi, is_read_io(fio->op)); in f2fs_submit_page_write()
957 f2fs_down_write(&io->io_rwsem); in f2fs_submit_page_write()
960 if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) { in f2fs_submit_page_write()
961 wait_for_completion_io(&io->zone_wait); in f2fs_submit_page_write()
962 bio_put(io->zone_pending_bio); in f2fs_submit_page_write()
963 io->zone_pending_bio = NULL; in f2fs_submit_page_write()
964 io->bi_private = NULL; in f2fs_submit_page_write()
968 if (fio->in_list) { in f2fs_submit_page_write()
969 spin_lock(&io->io_lock); in f2fs_submit_page_write()
970 if (list_empty(&io->io_list)) { in f2fs_submit_page_write()
971 spin_unlock(&io->io_lock); in f2fs_submit_page_write()
974 fio = list_first_entry(&io->io_list, in f2fs_submit_page_write()
976 list_del(&fio->list); in f2fs_submit_page_write()
977 spin_unlock(&io->io_lock); in f2fs_submit_page_write()
982 if (fio->encrypted_page) in f2fs_submit_page_write()
983 bio_page = fio->encrypted_page; in f2fs_submit_page_write()
984 else if (fio->compressed_page) in f2fs_submit_page_write()
985 bio_page = fio->compressed_page; in f2fs_submit_page_write()
987 bio_page = fio->page; in f2fs_submit_page_write()
990 fio->submitted = 1; in f2fs_submit_page_write()
992 type = WB_DATA_TYPE(bio_page, fio->compressed_page); in f2fs_submit_page_write()
995 if (io->bio && in f2fs_submit_page_write()
996 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, in f2fs_submit_page_write()
997 fio->new_blkaddr) || in f2fs_submit_page_write()
998 !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio), in f2fs_submit_page_write()
999 page_folio(bio_page)->index, fio))) in f2fs_submit_page_write()
1002 if (io->bio == NULL) { in f2fs_submit_page_write()
1003 io->bio = __bio_alloc(fio, BIO_MAX_VECS); in f2fs_submit_page_write()
1004 f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio), in f2fs_submit_page_write()
1005 page_folio(bio_page)->index, fio, GFP_NOIO); in f2fs_submit_page_write()
1006 io->fio = *fio; in f2fs_submit_page_write()
1009 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_write()
1014 if (fio->io_wbc) in f2fs_submit_page_write()
1015 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page), in f2fs_submit_page_write()
1018 io->last_block_in_bio = fio->new_blkaddr; in f2fs_submit_page_write()
1020 trace_f2fs_submit_folio_write(page_folio(fio->page), fio); in f2fs_submit_page_write()
1023 is_end_zone_blkaddr(sbi, fio->new_blkaddr)) { in f2fs_submit_page_write()
1024 bio_get(io->bio); in f2fs_submit_page_write()
1025 reinit_completion(&io->zone_wait); in f2fs_submit_page_write()
1026 io->bi_private = io->bio->bi_private; in f2fs_submit_page_write()
1027 io->bio->bi_private = io; in f2fs_submit_page_write()
1028 io->bio->bi_end_io = f2fs_zone_write_end_io; in f2fs_submit_page_write()
1029 io->zone_pending_bio = io->bio; in f2fs_submit_page_write()
1033 if (fio->in_list) in f2fs_submit_page_write()
1039 f2fs_up_write(&io->io_rwsem); in f2fs_submit_page_write()
1056 bio->bi_iter.bi_sector = sector; in f2fs_grab_read_bio()
1058 bio->bi_end_io = f2fs_read_end_io; in f2fs_grab_read_bio()
1076 ctx->bio = bio; in f2fs_grab_read_bio()
1077 ctx->sbi = sbi; in f2fs_grab_read_bio()
1078 ctx->enabled_steps = post_read_steps; in f2fs_grab_read_bio()
1079 ctx->fs_blkaddr = blkaddr; in f2fs_grab_read_bio()
1080 ctx->decompression_attempted = false; in f2fs_grab_read_bio()
1081 bio->bi_private = ctx; in f2fs_grab_read_bio()
1097 folio->index, for_write); in f2fs_submit_page_read()
1106 if (bio->bi_private) in f2fs_submit_page_read()
1107 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in f2fs_submit_page_read()
1109 return -EFAULT; in f2fs_submit_page_read()
1113 f2fs_submit_read_bio(sbi, bio, DATA); in f2fs_submit_page_read()
1119 __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio); in __set_data_blkaddr()
1121 dn->data_blkaddr = blkaddr; in __set_data_blkaddr()
1122 addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); in __set_data_blkaddr()
1126 * Lock ordering for the change of data block address:
1127 * ->data_page
1128 * ->node_folio
1133 f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); in f2fs_set_data_blkaddr()
1135 if (folio_mark_dirty(dn->node_folio)) in f2fs_set_data_blkaddr()
1136 dn->node_changed = true; in f2fs_set_data_blkaddr()
1145 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
1148 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_reserve_new_blocks()
1154 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in f2fs_reserve_new_blocks()
1155 return -EPERM; in f2fs_reserve_new_blocks()
1156 err = inc_valid_block_count(sbi, dn->inode, &count, true); in f2fs_reserve_new_blocks()
1160 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, in f2fs_reserve_new_blocks()
1161 dn->ofs_in_node, count); in f2fs_reserve_new_blocks()
1163 f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); in f2fs_reserve_new_blocks()
1165 for (; count > 0; dn->ofs_in_node++) { in f2fs_reserve_new_blocks()
1170 count--; in f2fs_reserve_new_blocks()
1174 if (folio_mark_dirty(dn->node_folio)) in f2fs_reserve_new_blocks()
1175 dn->node_changed = true; in f2fs_reserve_new_blocks()
1179 /* Should keep dn->ofs_in_node unchanged */
1182 unsigned int ofs_in_node = dn->ofs_in_node; in f2fs_reserve_new_block()
1186 dn->ofs_in_node = ofs_in_node; in f2fs_reserve_new_block()
1192 bool need_put = dn->inode_folio ? false : true; in f2fs_reserve_block()
1199 if (dn->data_blkaddr == NULL_ADDR) in f2fs_reserve_block()
1209 struct address_space *mapping = inode->i_mapping; in f2fs_get_read_data_folio() local
1214 folio = f2fs_grab_cache_folio(mapping, index, for_write); in f2fs_get_read_data_folio()
1222 err = -EFSCORRUPTED; in f2fs_get_read_data_folio()
1231 if (err == -ENOENT && next_pgofs) in f2fs_get_read_data_folio()
1238 err = -ENOENT; in f2fs_get_read_data_folio()
1247 err = -EFSCORRUPTED; in f2fs_get_read_data_folio()
1258 * new inode page couldn't be allocated due to -ENOSPC. in f2fs_get_read_data_folio()
1260 * see, f2fs_add_link -> f2fs_get_new_data_folio -> in f2fs_get_read_data_folio()
1285 struct address_space *mapping = inode->i_mapping; in f2fs_find_data_folio() local
1288 folio = __filemap_get_folio(mapping, index, FGP_ACCESSED, 0); in f2fs_find_data_folio()
1306 return ERR_PTR(-EIO); in f2fs_find_data_folio()
1319 struct address_space *mapping = inode->i_mapping; in f2fs_get_lock_data_folio() local
1328 if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) { in f2fs_get_lock_data_folio()
1330 return ERR_PTR(-EIO); in f2fs_get_lock_data_folio()
1336 * Caller ensures that this data page is never allocated.
1337 * A new zero-filled data page is allocated in the page cache.
1347 struct address_space *mapping = inode->i_mapping; in f2fs_get_new_data_folio() local
1352 folio = f2fs_grab_cache_folio(mapping, index, true); in f2fs_get_new_data_folio()
1359 return ERR_PTR(-ENOMEM); in f2fs_get_new_data_folio()
1396 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in __allocate_data_block()
1403 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in __allocate_data_block()
1404 return -EPERM; in __allocate_data_block()
1406 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); in __allocate_data_block()
1410 dn->data_blkaddr = f2fs_data_blkaddr(dn); in __allocate_data_block()
1411 if (dn->data_blkaddr == NULL_ADDR) { in __allocate_data_block()
1412 err = inc_valid_block_count(sbi, dn->inode, &count, true); in __allocate_data_block()
1417 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); in __allocate_data_block()
1418 old_blkaddr = dn->data_blkaddr; in __allocate_data_block()
1420 &dn->data_blkaddr, &sum, seg_type, NULL); in __allocate_data_block()
1427 f2fs_update_data_blkaddr(dn, dn->data_blkaddr); in __allocate_data_block()
1434 f2fs_down_read(&sbi->node_change); in f2fs_map_lock()
1442 f2fs_up_read(&sbi->node_change); in f2fs_map_unlock()
1449 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_get_block_locked()
1453 if (!f2fs_lookup_read_extent_cache_block(dn->inode, index, in f2fs_get_block_locked()
1454 &dn->data_blkaddr)) in f2fs_get_block_locked()
1469 * -ENOENT due to filesystem has been shutdown or cp_error, return in f2fs_map_no_dnode()
1470 * -EIO in that case. in f2fs_map_no_dnode()
1472 if (map->m_may_create && in f2fs_map_no_dnode()
1474 return -EIO; in f2fs_map_no_dnode()
1476 if (map->m_next_pgofs) in f2fs_map_no_dnode()
1477 *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff); in f2fs_map_no_dnode()
1478 if (map->m_next_extent) in f2fs_map_no_dnode()
1479 *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff); in f2fs_map_no_dnode()
1487 unsigned int maxblocks = map->m_len; in f2fs_map_blocks_cached()
1488 pgoff_t pgoff = (pgoff_t)map->m_lblk; in f2fs_map_blocks_cached()
1494 map->m_pblk = ei.blk + pgoff - ei.fofs; in f2fs_map_blocks_cached()
1495 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff); in f2fs_map_blocks_cached()
1496 map->m_flags = F2FS_MAP_MAPPED; in f2fs_map_blocks_cached()
1497 if (map->m_next_extent) in f2fs_map_blocks_cached()
1498 *map->m_next_extent = pgoff + map->m_len; in f2fs_map_blocks_cached()
1503 map->m_pblk, map->m_len); in f2fs_map_blocks_cached()
1506 int bidx = f2fs_target_device_index(sbi, map->m_pblk); in f2fs_map_blocks_cached()
1507 struct f2fs_dev_info *dev = &sbi->devs[bidx]; in f2fs_map_blocks_cached()
1509 map->m_bdev = dev->bdev; in f2fs_map_blocks_cached()
1510 map->m_pblk -= dev->start_blk; in f2fs_map_blocks_cached()
1511 map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk); in f2fs_map_blocks_cached()
1513 map->m_bdev = inode->i_sb->s_bdev; in f2fs_map_blocks_cached()
1523 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev) in map_is_mergeable()
1525 if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) in map_is_mergeable()
1527 if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) in map_is_mergeable()
1532 map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR) in map_is_mergeable()
1538 * f2fs_map_blocks() tries to find or build mapping relationship which
1544 unsigned int maxblocks = map->m_len; in f2fs_map_blocks()
1547 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; in f2fs_map_blocks()
1560 if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag)) in f2fs_map_blocks()
1563 map->m_bdev = inode->i_sb->s_bdev; in f2fs_map_blocks()
1564 map->m_multidev_dio = in f2fs_map_blocks()
1567 map->m_len = 0; in f2fs_map_blocks()
1568 map->m_flags = 0; in f2fs_map_blocks()
1571 pgofs = (pgoff_t)map->m_lblk; in f2fs_map_blocks()
1575 if (map->m_may_create) in f2fs_map_blocks()
1583 map->m_pblk = 0; in f2fs_map_blocks()
1584 if (err == -ENOENT) in f2fs_map_blocks()
1592 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode); in f2fs_map_blocks()
1599 err = -EFSCORRUPTED; in f2fs_map_blocks()
1603 /* use out-place-update for direct IO under LFS mode */ in f2fs_map_blocks()
1604 if (map->m_may_create && (is_hole || in f2fs_map_blocks()
1608 err = -EIO; in f2fs_map_blocks()
1621 err = __allocate_data_block(&dn, map->m_seg_type); in f2fs_map_blocks()
1630 err = -EIO; in f2fs_map_blocks()
1636 map->m_flags |= F2FS_MAP_NEW; in f2fs_map_blocks()
1640 err = -EFSCORRUPTED; in f2fs_map_blocks()
1650 map->m_pblk = 0; in f2fs_map_blocks()
1654 if (map->m_next_pgofs) in f2fs_map_blocks()
1655 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1660 if (map->m_next_pgofs) in f2fs_map_blocks()
1661 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1665 if (map->m_next_pgofs) in f2fs_map_blocks()
1666 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1674 if (map->m_multidev_dio) in f2fs_map_blocks()
1677 if (map->m_len == 0) { in f2fs_map_blocks()
1680 map->m_flags |= F2FS_MAP_DELALLOC; in f2fs_map_blocks()
1682 if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create)) in f2fs_map_blocks()
1683 map->m_flags |= F2FS_MAP_MAPPED; in f2fs_map_blocks()
1685 map->m_pblk = blkaddr; in f2fs_map_blocks()
1686 map->m_len = 1; in f2fs_map_blocks()
1688 if (map->m_multidev_dio) in f2fs_map_blocks()
1689 map->m_bdev = FDEV(bidx).bdev; in f2fs_map_blocks()
1692 map->m_len++; in f2fs_map_blocks()
1710 map->m_len += dn.ofs_in_node - ofs_in_node; in f2fs_map_blocks()
1712 err = -ENOSPC; in f2fs_map_blocks()
1719 map->m_may_create) { in f2fs_map_blocks()
1722 CAP_BLKS_PER_SEC(sbi) - 1) in f2fs_map_blocks()
1732 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1733 unsigned int ofs = start_pgofs - map->m_lblk; in f2fs_map_blocks()
1736 start_pgofs, map->m_pblk + ofs, in f2fs_map_blocks()
1737 map->m_len - ofs); in f2fs_map_blocks()
1743 if (map->m_may_create) { in f2fs_map_blocks()
1751 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1757 map->m_pblk, map->m_len); in f2fs_map_blocks()
1759 if (map->m_multidev_dio) { in f2fs_map_blocks()
1760 block_t blk_addr = map->m_pblk; in f2fs_map_blocks()
1762 bidx = f2fs_target_device_index(sbi, map->m_pblk); in f2fs_map_blocks()
1764 map->m_bdev = FDEV(bidx).bdev; in f2fs_map_blocks()
1765 map->m_pblk -= FDEV(bidx).start_blk; in f2fs_map_blocks()
1767 if (map->m_may_create) in f2fs_map_blocks()
1768 f2fs_update_device_state(sbi, inode->i_ino, in f2fs_map_blocks()
1769 blk_addr, map->m_len); in f2fs_map_blocks()
1771 f2fs_bug_on(sbi, blk_addr + map->m_len > in f2fs_map_blocks()
1777 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1778 unsigned int ofs = start_pgofs - map->m_lblk; in f2fs_map_blocks()
1781 start_pgofs, map->m_pblk + ofs, in f2fs_map_blocks()
1782 map->m_len - ofs); in f2fs_map_blocks()
1784 if (map->m_next_extent) in f2fs_map_blocks()
1785 *map->m_next_extent = pgofs + 1; in f2fs_map_blocks()
1789 if (map->m_may_create) { in f2fs_map_blocks()
1815 map.m_len = last_lblk - map.m_lblk; in f2fs_overwrite_io()
1831 nid_t xnid = F2FS_I(inode)->i_xattr_nid; in f2fs_xattr_fiemap()
1837 inode->i_ino, false); in f2fs_xattr_fiemap()
1842 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); in f2fs_xattr_fiemap()
1850 sizeof(__le32) * (DEF_ADDRS_PER_INODE - in f2fs_xattr_fiemap()
1883 len = inode->i_sb->s_blocksize; in f2fs_xattr_fiemap()
1908 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; in f2fs_fiemap()
1912 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { in f2fs_fiemap()
1926 ret = -EFBIG; in f2fs_fiemap()
1930 if (len > maxbytes || (maxbytes - len) < start) in f2fs_fiemap()
1931 len = maxbytes - start; in f2fs_fiemap()
1933 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { in f2fs_fiemap()
1940 if (ret != -EAGAIN) in f2fs_fiemap()
1945 last_blk = F2FS_BYTES_TO_BLK(start + len - 1); in f2fs_fiemap()
1946 blk_len = last_blk - start_blk + 1; in f2fs_fiemap()
1947 max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk; in f2fs_fiemap()
1958 map.m_len = cluster_size - count_in_cluster; in f2fs_fiemap()
1980 map.m_lblk + map.m_len - 1 == last_blk && in f2fs_fiemap()
2015 unsigned int appended_blks = cluster_size - in f2fs_fiemap()
2044 ret = -EINTR; in f2fs_fiemap()
2081 pgoff_t index = folio->index; in f2fs_read_single_page()
2087 blocksize - 1); in f2fs_read_single_page()
2097 if ((map->m_flags & F2FS_MAP_MAPPED) && in f2fs_read_single_page()
2098 block_in_file > map->m_lblk && in f2fs_read_single_page()
2099 block_in_file < (map->m_lblk + map->m_len)) in f2fs_read_single_page()
2106 map->m_lblk = block_in_file; in f2fs_read_single_page()
2107 map->m_len = last_block - block_in_file; in f2fs_read_single_page()
2113 if ((map->m_flags & F2FS_MAP_MAPPED)) { in f2fs_read_single_page()
2114 block_nr = map->m_pblk + block_in_file - map->m_lblk; in f2fs_read_single_page()
2119 ret = -EFSCORRUPTED; in f2fs_read_single_page()
2127 ret = -EIO; in f2fs_read_single_page()
2144 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_read_single_page()
2160 * its completion to see the correct decrypted data. in f2fs_read_single_page()
2182 struct inode *inode = cc->inode; in f2fs_read_multi_pages()
2185 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; in f2fs_read_multi_pages()
2195 ret = -EIO; in f2fs_read_multi_pages()
2203 blocksize - 1); in f2fs_read_multi_pages()
2206 for (i = 0; i < cc->cluster_size; i++) { in f2fs_read_multi_pages()
2207 struct page *page = cc->rpages[i]; in f2fs_read_multi_pages()
2214 if ((sector_t)folio->index >= last_block_in_file) { in f2fs_read_multi_pages()
2224 cc->rpages[i] = NULL; in f2fs_read_multi_pages()
2225 cc->nr_rpages--; in f2fs_read_multi_pages()
2246 for (i = 1; i < cc->cluster_size; i++) { in f2fs_read_multi_pages()
2251 ei.blk + i - 1; in f2fs_read_multi_pages()
2257 ret = -EFAULT; in f2fs_read_multi_pages()
2260 cc->nr_cpages++; in f2fs_read_multi_pages()
2267 if (cc->nr_cpages == 0) { in f2fs_read_multi_pages()
2278 for (i = 0; i < cc->nr_cpages; i++) { in f2fs_read_multi_pages()
2279 struct folio *folio = page_folio(dic->cpages[i]); in f2fs_read_multi_pages()
2290 if (atomic_dec_and_test(&dic->remaining_pages)) { in f2fs_read_multi_pages()
2299 !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) { in f2fs_read_multi_pages()
2301 f2fs_submit_read_bio(sbi, bio, DATA); in f2fs_read_multi_pages()
2308 folio->index, for_write); in f2fs_read_multi_pages()
2322 ctx->enabled_steps |= STEP_DECOMPRESS; in f2fs_read_multi_pages()
2323 refcount_inc(&dic->refcnt); in f2fs_read_multi_pages()
2340 for (i = 0; i < cc->cluster_size; i++) { in f2fs_read_multi_pages()
2341 if (cc->rpages[i]) { in f2fs_read_multi_pages()
2342 ClearPageUptodate(cc->rpages[i]); in f2fs_read_multi_pages()
2343 unlock_page(cc->rpages[i]); in f2fs_read_multi_pages()
2364 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, in f2fs_mpage_readpages()
2365 .cluster_size = F2FS_I(inode)->i_cluster_size, in f2fs_mpage_readpages()
2388 for (; nr_pages; nr_pages--) { in f2fs_mpage_readpages()
2391 prefetchw(&folio->flags); in f2fs_mpage_readpages()
2395 index = folio->index; in f2fs_mpage_readpages()
2462 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_mpage_readpages()
2468 struct inode *inode = folio->mapping->host; in f2fs_read_data_folio()
2469 int ret = -EAGAIN; in f2fs_read_data_folio()
2471 trace_f2fs_readpage(folio, DATA); in f2fs_read_data_folio()
2475 return -EOPNOTSUPP; in f2fs_read_data_folio()
2478 /* If the file has inline data, try to read it directly */ in f2fs_read_data_folio()
2481 if (ret == -EAGAIN) in f2fs_read_data_folio()
2488 struct inode *inode = rac->mapping->host; in f2fs_readahead()
2495 /* If the file has inline data, skip readahead */ in f2fs_readahead()
2512 page = fio->compressed_page ? fio->compressed_page : fio->page; in f2fs_encrypt_one_page()
2518 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page), in f2fs_encrypt_one_page()
2520 if (IS_ERR(fio->encrypted_page)) { in f2fs_encrypt_one_page()
2522 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { in f2fs_encrypt_one_page()
2523 f2fs_flush_merged_writes(fio->sbi); in f2fs_encrypt_one_page()
2528 return PTR_ERR(fio->encrypted_page); in f2fs_encrypt_one_page()
2531 mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr); in f2fs_encrypt_one_page()
2535 page_address(fio->encrypted_page), PAGE_SIZE); in f2fs_encrypt_one_page()
2553 if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) in check_inplace_update_policy()
2556 utilization(sbi) > SM_I(sbi)->min_ipu_util) in check_inplace_update_policy()
2562 if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE && in check_inplace_update_policy()
2563 !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode)) in check_inplace_update_policy()
2571 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) in check_inplace_update_policy()
2604 if (S_ISDIR(inode->i_mode)) in f2fs_should_update_outplace()
2610 /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */ in f2fs_should_update_outplace()
2624 if (page_private_gcing(fio->page)) in f2fs_should_update_outplace()
2627 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) in f2fs_should_update_outplace()
2645 struct folio *folio = page_folio(fio->page); in f2fs_do_write_data_page()
2646 struct inode *inode = folio->mapping->host; in f2fs_do_write_data_page()
2657 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); in f2fs_do_write_data_page()
2662 f2fs_lookup_read_extent_cache_block(inode, folio->index, in f2fs_do_write_data_page()
2663 &fio->old_blkaddr)) { in f2fs_do_write_data_page()
2664 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
2666 return -EFSCORRUPTED; in f2fs_do_write_data_page()
2669 fio->need_lock = LOCK_DONE; in f2fs_do_write_data_page()
2673 /* Deadlock due to between page->lock and f2fs_lock_op */ in f2fs_do_write_data_page()
2674 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) in f2fs_do_write_data_page()
2675 return -EAGAIN; in f2fs_do_write_data_page()
2677 err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE); in f2fs_do_write_data_page()
2681 fio->old_blkaddr = dn.data_blkaddr; in f2fs_do_write_data_page()
2684 if (fio->old_blkaddr == NULL_ADDR) { in f2fs_do_write_data_page()
2690 if (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2691 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
2693 err = -EFSCORRUPTED; in f2fs_do_write_data_page()
2698 if (fio->meta_gc) in f2fs_do_write_data_page()
2699 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); in f2fs_do_write_data_page()
2703 * it had better in-place writes for updated data. in f2fs_do_write_data_page()
2706 (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2714 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2715 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
2719 fscrypt_finalize_bounce_page(&fio->encrypted_page); in f2fs_do_write_data_page()
2728 if (fio->need_lock == LOCK_RETRY) { in f2fs_do_write_data_page()
2729 if (!f2fs_trylock_op(fio->sbi)) { in f2fs_do_write_data_page()
2730 err = -EAGAIN; in f2fs_do_write_data_page()
2733 fio->need_lock = LOCK_REQ; in f2fs_do_write_data_page()
2736 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false); in f2fs_do_write_data_page()
2740 fio->version = ni.version; in f2fs_do_write_data_page()
2748 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR) in f2fs_do_write_data_page()
2749 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false); in f2fs_do_write_data_page()
2760 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2761 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
2773 struct inode *inode = folio->mapping->host; in f2fs_write_single_data_page()
2779 loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT; in f2fs_write_single_data_page()
2786 .ino = inode->i_ino, in f2fs_write_single_data_page()
2787 .type = DATA, in f2fs_write_single_data_page()
2803 trace_f2fs_writepage(folio, DATA); in f2fs_write_single_data_page()
2805 /* we should bypass data pages to proceed the kworker jobs */ in f2fs_write_single_data_page()
2807 mapping_set_error(folio->mapping, -EIO); in f2fs_write_single_data_page()
2812 if (S_ISDIR(inode->i_mode) && in f2fs_write_single_data_page()
2816 /* keep data pages in remount-ro mode */ in f2fs_write_single_data_page()
2825 if (folio->index < end_index || in f2fs_write_single_data_page()
2831 * If the offset is out-of-range of file size, in f2fs_write_single_data_page()
2834 offset = i_size & (PAGE_SIZE - 1); in f2fs_write_single_data_page()
2835 if ((folio->index >= end_index + 1) || !offset) in f2fs_write_single_data_page()
2841 if (S_ISDIR(inode->i_mode) || quota_inode) { in f2fs_write_single_data_page()
2848 f2fs_down_read(&sbi->node_write); in f2fs_write_single_data_page()
2854 f2fs_up_read(&sbi->node_write); in f2fs_write_single_data_page()
2860 err = -EAGAIN; in f2fs_write_single_data_page()
2867 if (err == -EAGAIN) { in f2fs_write_single_data_page()
2869 if (err == -EAGAIN) { in f2fs_write_single_data_page()
2879 spin_lock(&F2FS_I(inode)->i_size_lock); in f2fs_write_single_data_page()
2880 if (F2FS_I(inode)->last_disk_size < psize) in f2fs_write_single_data_page()
2881 F2FS_I(inode)->last_disk_size = psize; in f2fs_write_single_data_page()
2882 spin_unlock(&F2FS_I(inode)->i_size_lock); in f2fs_write_single_data_page()
2886 if (err && err != -ENOENT) in f2fs_write_single_data_page()
2896 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && in f2fs_write_single_data_page()
2897 !F2FS_I(inode)->wb_task && allow_balance) in f2fs_write_single_data_page()
2901 f2fs_submit_merged_write(sbi, DATA); in f2fs_write_single_data_page()
2916 * -> mapping_set_error() -> set_bit(AS_EIO, ...). in f2fs_write_single_data_page()
2927 * This function was copied from write_cache_pages from mm/page-writeback.c.
2928 * The major change is making write step of cold data page separately from
2929 * warm/hot data page.
2931 static int f2fs_write_cache_pages(struct address_space *mapping, in f2fs_write_cache_pages() argument
2940 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_cache_pages()
2944 struct inode *inode = mapping->host; in f2fs_write_cache_pages()
2947 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, in f2fs_write_cache_pages()
2948 .cluster_size = F2FS_I(inode)->i_cluster_size, in f2fs_write_cache_pages()
2956 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size, in f2fs_write_cache_pages()
2983 if (get_dirty_pages(mapping->host) <= in f2fs_write_cache_pages()
2984 SM_I(F2FS_M_SB(mapping))->min_hot_blocks) in f2fs_write_cache_pages()
2985 set_inode_flag(mapping->host, FI_HOT_DATA); in f2fs_write_cache_pages()
2987 clear_inode_flag(mapping->host, FI_HOT_DATA); in f2fs_write_cache_pages()
2989 if (wbc->range_cyclic) { in f2fs_write_cache_pages()
2990 index = mapping->writeback_index; /* prev offset */ in f2fs_write_cache_pages()
2991 end = -1; in f2fs_write_cache_pages()
2993 index = wbc->range_start >> PAGE_SHIFT; in f2fs_write_cache_pages()
2994 end = wbc->range_end >> PAGE_SHIFT; in f2fs_write_cache_pages()
2995 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in f2fs_write_cache_pages()
2998 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
3004 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
3005 tag_pages_for_writeback(mapping, index, end); in f2fs_write_cache_pages()
3010 nr_folios = filemap_get_folios_tag(mapping, &index, end, in f2fs_write_cache_pages()
3027 index = folio->index + idx + 1; in f2fs_write_cache_pages()
3056 folio->index)) { in f2fs_write_cache_pages()
3076 folio->index, &fsdata); in f2fs_write_cache_pages()
3083 fsdata, folio->index, 1) || in f2fs_write_cache_pages()
3093 if (atomic_read(&sbi->wb_sync_req[DATA]) && in f2fs_write_cache_pages()
3094 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_write_cache_pages()
3101 done_index = folio->index; in f2fs_write_cache_pages()
3105 if (unlikely(folio->mapping != mapping)) { in f2fs_write_cache_pages()
3117 if (wbc->sync_mode == WB_SYNC_NONE) in f2fs_write_cache_pages()
3119 f2fs_folio_wait_writeback(folio, DATA, true, true); in f2fs_write_cache_pages()
3140 wbc->nr_to_write -= submitted; in f2fs_write_cache_pages()
3150 } else if (ret == -EAGAIN) { in f2fs_write_cache_pages()
3152 if (wbc->sync_mode == WB_SYNC_ALL) { in f2fs_write_cache_pages()
3164 if (wbc->nr_to_write <= 0 && in f2fs_write_cache_pages()
3165 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_write_cache_pages()
3181 wbc->nr_to_write -= submitted; in f2fs_write_cache_pages()
3192 end = -1; in f2fs_write_cache_pages()
3195 if (wbc->range_cyclic && !done) in f2fs_write_cache_pages()
3197 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in f2fs_write_cache_pages()
3198 mapping->writeback_index = done_index; in f2fs_write_cache_pages()
3201 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, in f2fs_write_cache_pages()
3202 NULL, 0, DATA); in f2fs_write_cache_pages()
3218 /* to avoid deadlock in path of data flush */ in __should_serialize_io()
3219 if (F2FS_I(inode)->wb_task) in __should_serialize_io()
3222 if (!S_ISREG(inode->i_mode)) in __should_serialize_io()
3229 if (wbc->sync_mode != WB_SYNC_ALL) in __should_serialize_io()
3231 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) in __should_serialize_io()
3236 static int __f2fs_write_data_pages(struct address_space *mapping, in __f2fs_write_data_pages() argument
3240 struct inode *inode = mapping->host; in __f2fs_write_data_pages()
3247 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) in __f2fs_write_data_pages()
3254 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) && in __f2fs_write_data_pages()
3255 wbc->sync_mode == WB_SYNC_NONE && in __f2fs_write_data_pages()
3256 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && in __f2fs_write_data_pages()
3264 trace_f2fs_writepages(mapping->host, wbc, DATA); in __f2fs_write_data_pages()
3267 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
3268 atomic_inc(&sbi->wb_sync_req[DATA]); in __f2fs_write_data_pages()
3269 else if (atomic_read(&sbi->wb_sync_req[DATA])) { in __f2fs_write_data_pages()
3271 if (current->plug) in __f2fs_write_data_pages()
3272 blk_finish_plug(current->plug); in __f2fs_write_data_pages()
3277 mutex_lock(&sbi->writepages); in __f2fs_write_data_pages()
3282 ret = f2fs_write_cache_pages(mapping, wbc, io_type); in __f2fs_write_data_pages()
3286 mutex_unlock(&sbi->writepages); in __f2fs_write_data_pages()
3288 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
3289 atomic_dec(&sbi->wb_sync_req[DATA]); in __f2fs_write_data_pages()
3291 * if some pages were truncated, we cannot guarantee its mapping->host in __f2fs_write_data_pages()
3299 wbc->pages_skipped += get_dirty_pages(inode); in __f2fs_write_data_pages()
3300 trace_f2fs_writepages(mapping->host, wbc, DATA); in __f2fs_write_data_pages()
3304 static int f2fs_write_data_pages(struct address_space *mapping, in f2fs_write_data_pages() argument
3307 struct inode *inode = mapping->host; in f2fs_write_data_pages()
3309 return __f2fs_write_data_pages(mapping, wbc, in f2fs_write_data_pages()
3310 F2FS_I(inode)->cp_task == current ? in f2fs_write_data_pages()
3321 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ in f2fs_write_failed()
3323 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_write_failed()
3324 filemap_invalidate_lock(inode->i_mapping); in f2fs_write_failed()
3329 filemap_invalidate_unlock(inode->i_mapping); in f2fs_write_failed()
3330 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_write_failed()
3338 struct inode *inode = folio->mapping->host; in prepare_write_begin()
3339 pgoff_t index = folio->index; in prepare_write_begin()
3366 ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); in prepare_write_begin()
3378 if (inode->i_nlink) in prepare_write_begin()
3379 set_page_private_inline(&ifolio->page); in prepare_write_begin()
3390 err = -ENODATA; in prepare_write_begin()
3429 ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); in __find_data_block()
3459 ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); in __reserve_data_block()
3483 struct inode *inode = folio->mapping->host; in prepare_atomic_write_begin()
3484 struct inode *cow_inode = F2FS_I(inode)->cow_inode; in prepare_atomic_write_begin()
3485 pgoff_t index = folio->index; in prepare_atomic_write_begin()
3522 static int f2fs_write_begin(struct file *file, struct address_space *mapping, in f2fs_write_begin() argument
3525 struct inode *inode = mapping->host; in f2fs_write_begin()
3537 err = -ENOSPC; in f2fs_write_begin()
3544 * folio_lock(folio #0) -> folio_lock(inode_page) in f2fs_write_begin()
3579 folio = __filemap_get_folio(mapping, index, in f2fs_write_begin()
3604 if (folio->mapping != mapping) { in f2fs_write_begin()
3612 f2fs_folio_wait_writeback(folio, DATA, false, true); in f2fs_write_begin()
3617 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && in f2fs_write_begin()
3629 err = -EFSCORRUPTED; in f2fs_write_begin()
3633 F2FS_I(inode)->cow_inode : inode, in f2fs_write_begin()
3639 if (unlikely(folio->mapping != mapping)) { in f2fs_write_begin()
3645 err = -EIO; in f2fs_write_begin()
3660 struct address_space *mapping, in f2fs_write_end() argument
3664 struct inode *inode = folio->mapping->host; in f2fs_write_end()
3671 * let generic_perform_write() try to copy data again through copied=0. in f2fs_write_end()
3683 f2fs_compress_write_end(inode, fsdata, folio->index, copied); in f2fs_write_end()
3705 f2fs_i_size_write(F2FS_I(inode)->cow_inode, in f2fs_write_end()
3717 struct inode *inode = folio->mapping->host; in f2fs_invalidate_folio()
3720 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && in f2fs_invalidate_folio()
3725 if (inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_invalidate_folio()
3727 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { in f2fs_invalidate_folio()
3734 clear_page_private_all(&folio->page); in f2fs_invalidate_folio()
3739 /* If this is dirty folio, keep private data */ in f2fs_release_folio()
3743 clear_page_private_all(&folio->page); in f2fs_release_folio()
3747 static bool f2fs_dirty_data_folio(struct address_space *mapping, in f2fs_dirty_data_folio() argument
3750 struct inode *inode = mapping->host; in f2fs_dirty_data_folio()
3752 trace_f2fs_set_page_dirty(folio, DATA); in f2fs_dirty_data_folio()
3758 if (filemap_dirty_folio(mapping, folio)) { in f2fs_dirty_data_folio()
3773 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size); in f2fs_bmap_compress()
3781 dn.ofs_in_node += block - start_idx; in f2fs_bmap_compress()
3795 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) in f2fs_bmap() argument
3797 struct inode *inode = mapping->host; in f2fs_bmap()
3804 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in f2fs_bmap()
3805 filemap_write_and_wait(mapping); in f2fs_bmap()
3837 unsigned int end_blk = start_blk + blkcnt - 1; in f2fs_migrate_blocks()
3846 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_migrate_blocks()
3847 filemap_invalidate_lock(inode->i_mapping); in f2fs_migrate_blocks()
3854 end_blk % blk_per_sec : blk_per_sec - 1; in f2fs_migrate_blocks()
3856 f2fs_down_write(&sbi->pin_sem); in f2fs_migrate_blocks()
3860 f2fs_up_write(&sbi->pin_sem); in f2fs_migrate_blocks()
3872 f2fs_up_write(&sbi->pin_sem); in f2fs_migrate_blocks()
3883 ret = filemap_fdatawrite(inode->i_mapping); in f2fs_migrate_blocks()
3885 f2fs_up_write(&sbi->pin_sem); in f2fs_migrate_blocks()
3896 filemap_invalidate_unlock(inode->i_mapping); in f2fs_migrate_blocks()
3897 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_migrate_blocks()
3905 struct address_space *mapping = swap_file->f_mapping; in check_swap_activate() local
3906 struct inode *inode = mapping->host; in check_swap_activate()
3911 block_t lowest_pblock = -1; in check_swap_activate()
3926 while (cur_lblock < last_lblock && cur_lblock < sis->max) { in check_swap_activate()
3933 map.m_len = last_lblock - cur_lblock; in check_swap_activate()
3946 ret = -EINVAL; in check_swap_activate()
3953 if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec || in check_swap_activate()
3961 if (cur_lblock + nr_pblocks > sis->max) in check_swap_activate()
3962 nr_pblocks -= blks_per_sec; in check_swap_activate()
3966 nr_pblocks = last_lblock - cur_lblock; in check_swap_activate()
3973 if (ret == -ENOENT) in check_swap_activate()
3974 ret = -EINVAL; in check_swap_activate()
3982 if (cur_lblock + nr_pblocks >= sis->max) in check_swap_activate()
3983 nr_pblocks = sis->max - cur_lblock; in check_swap_activate()
3988 if (pblock + nr_pblocks - 1 > highest_pblock) in check_swap_activate()
3989 highest_pblock = pblock + nr_pblocks - 1; in check_swap_activate()
3993 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks in check_swap_activate()
4002 *span = 1 + highest_pblock - lowest_pblock; in check_swap_activate()
4005 sis->max = cur_lblock; in check_swap_activate()
4006 sis->pages = cur_lblock - 1; in check_swap_activate()
4021 if (!S_ISREG(inode->i_mode)) in f2fs_swap_activate()
4022 return -EINVAL; in f2fs_swap_activate()
4024 if (f2fs_readonly(sbi->sb)) in f2fs_swap_activate()
4025 return -EROFS; in f2fs_swap_activate()
4029 return -EINVAL; in f2fs_swap_activate()
4037 return -EINVAL; in f2fs_swap_activate()
4039 ret = filemap_fdatawrite(inode->i_mapping); in f2fs_swap_activate()
4066 return -EOPNOTSUPP; in f2fs_swap_activate()
4091 struct address_space *mapping = folio->mapping; in f2fs_clear_page_cache_dirty_tag() local
4094 xa_lock_irqsave(&mapping->i_pages, flags); in f2fs_clear_page_cache_dirty_tag()
4095 __xa_clear_mark(&mapping->i_pages, folio->index, in f2fs_clear_page_cache_dirty_tag()
4097 xa_unlock_irqrestore(&mapping->i_pages, flags); in f2fs_clear_page_cache_dirty_tag()
4117 return -ENOMEM; in f2fs_init_post_read_processing()
4133 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq", in f2fs_init_post_read_wq()
4136 return sbi->post_read_wq ? 0 : -ENOMEM; in f2fs_init_post_read_wq()
4141 if (sbi->post_read_wq) in f2fs_destroy_post_read_wq()
4142 destroy_workqueue(sbi->post_read_wq); in f2fs_destroy_post_read_wq()
4149 return bio_entry_slab ? 0 : -ENOMEM; in f2fs_init_bio_entry_cache()
4166 map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1; in f2fs_iomap_begin()
4169 inode->i_write_hint); in f2fs_iomap_begin()
4183 iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk); in f2fs_iomap_begin()
4188 * limiting the length of the mapping returned. in f2fs_iomap_begin()
4197 return -EINVAL; in f2fs_iomap_begin()
4201 return -EINVAL; in f2fs_iomap_begin()
4203 iomap->length = F2FS_BLK_TO_BYTES(map.m_len); in f2fs_iomap_begin()
4204 iomap->type = IOMAP_MAPPED; in f2fs_iomap_begin()
4205 iomap->flags |= IOMAP_F_MERGED; in f2fs_iomap_begin()
4206 iomap->bdev = map.m_bdev; in f2fs_iomap_begin()
4207 iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk); in f2fs_iomap_begin()
4210 return -ENOTBLK; in f2fs_iomap_begin()
4213 iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) - in f2fs_iomap_begin()
4214 iomap->offset; in f2fs_iomap_begin()
4215 iomap->type = IOMAP_HOLE; in f2fs_iomap_begin()
4217 iomap->length = F2FS_BLK_TO_BYTES(map.m_len); in f2fs_iomap_begin()
4218 iomap->type = IOMAP_UNWRITTEN; in f2fs_iomap_begin()
4222 iomap->addr = IOMAP_NULL_ADDR; in f2fs_iomap_begin()
4226 iomap->flags |= IOMAP_F_NEW; in f2fs_iomap_begin()
4227 if ((inode->i_state & I_DIRTY_DATASYNC) || in f2fs_iomap_begin()
4229 iomap->flags |= IOMAP_F_DIRTY; in f2fs_iomap_begin()