xref: /linux/fs/f2fs/data.c (revision 73b0140bf0fe9df90fb267c00673c4b9bf285430)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/data.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/prefetch.h>
18 #include <linux/uio.h>
19 #include <linux/cleancache.h>
20 #include <linux/sched/signal.h>
21 
22 #include "f2fs.h"
23 #include "node.h"
24 #include "segment.h"
25 #include "trace.h"
26 #include <trace/events/f2fs.h>
27 
28 #define NUM_PREALLOC_POST_READ_CTXS	128
29 
30 static struct kmem_cache *bio_post_read_ctx_cache;
31 static mempool_t *bio_post_read_ctx_pool;
32 
33 static bool __is_cp_guaranteed(struct page *page)
34 {
35 	struct address_space *mapping = page->mapping;
36 	struct inode *inode;
37 	struct f2fs_sb_info *sbi;
38 
39 	if (!mapping)
40 		return false;
41 
42 	inode = mapping->host;
43 	sbi = F2FS_I_SB(inode);
44 
45 	if (inode->i_ino == F2FS_META_INO(sbi) ||
46 			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
47 			S_ISDIR(inode->i_mode) ||
48 			(S_ISREG(inode->i_mode) &&
49 			(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
50 			is_cold_data(page))
51 		return true;
52 	return false;
53 }
54 
55 static enum count_type __read_io_type(struct page *page)
56 {
57 	struct address_space *mapping = page->mapping;
58 
59 	if (mapping) {
60 		struct inode *inode = mapping->host;
61 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
62 
63 		if (inode->i_ino == F2FS_META_INO(sbi))
64 			return F2FS_RD_META;
65 
66 		if (inode->i_ino == F2FS_NODE_INO(sbi))
67 			return F2FS_RD_NODE;
68 	}
69 	return F2FS_RD_DATA;
70 }
71 
72 /* postprocessing steps for read bios */
73 enum bio_post_read_step {
74 	STEP_INITIAL = 0,
75 	STEP_DECRYPT,
76 };
77 
78 struct bio_post_read_ctx {
79 	struct bio *bio;
80 	struct work_struct work;
81 	unsigned int cur_step;
82 	unsigned int enabled_steps;
83 };
84 
85 static void __read_end_io(struct bio *bio)
86 {
87 	struct page *page;
88 	struct bio_vec *bv;
89 	struct bvec_iter_all iter_all;
90 
91 	bio_for_each_segment_all(bv, bio, iter_all) {
92 		page = bv->bv_page;
93 
94 		/* PG_error was set if any post_read step failed */
95 		if (bio->bi_status || PageError(page)) {
96 			ClearPageUptodate(page);
97 			/* will re-read again later */
98 			ClearPageError(page);
99 		} else {
100 			SetPageUptodate(page);
101 		}
102 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
103 		unlock_page(page);
104 	}
105 	if (bio->bi_private)
106 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
107 	bio_put(bio);
108 }
109 
110 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
111 
112 static void decrypt_work(struct work_struct *work)
113 {
114 	struct bio_post_read_ctx *ctx =
115 		container_of(work, struct bio_post_read_ctx, work);
116 
117 	fscrypt_decrypt_bio(ctx->bio);
118 
119 	bio_post_read_processing(ctx);
120 }
121 
122 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
123 {
124 	switch (++ctx->cur_step) {
125 	case STEP_DECRYPT:
126 		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
127 			INIT_WORK(&ctx->work, decrypt_work);
128 			fscrypt_enqueue_decrypt_work(&ctx->work);
129 			return;
130 		}
131 		ctx->cur_step++;
132 		/* fall-through */
133 	default:
134 		__read_end_io(ctx->bio);
135 	}
136 }
137 
138 static bool f2fs_bio_post_read_required(struct bio *bio)
139 {
140 	return bio->bi_private && !bio->bi_status;
141 }
142 
143 static void f2fs_read_end_io(struct bio *bio)
144 {
145 	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
146 						FAULT_READ_IO)) {
147 		f2fs_show_injection_info(FAULT_READ_IO);
148 		bio->bi_status = BLK_STS_IOERR;
149 	}
150 
151 	if (f2fs_bio_post_read_required(bio)) {
152 		struct bio_post_read_ctx *ctx = bio->bi_private;
153 
154 		ctx->cur_step = STEP_INITIAL;
155 		bio_post_read_processing(ctx);
156 		return;
157 	}
158 
159 	__read_end_io(bio);
160 }
161 
162 static void f2fs_write_end_io(struct bio *bio)
163 {
164 	struct f2fs_sb_info *sbi = bio->bi_private;
165 	struct bio_vec *bvec;
166 	struct bvec_iter_all iter_all;
167 
168 	if (time_to_inject(sbi, FAULT_WRITE_IO)) {
169 		f2fs_show_injection_info(FAULT_WRITE_IO);
170 		bio->bi_status = BLK_STS_IOERR;
171 	}
172 
173 	bio_for_each_segment_all(bvec, bio, iter_all) {
174 		struct page *page = bvec->bv_page;
175 		enum count_type type = WB_DATA_TYPE(page);
176 
177 		if (IS_DUMMY_WRITTEN_PAGE(page)) {
178 			set_page_private(page, (unsigned long)NULL);
179 			ClearPagePrivate(page);
180 			unlock_page(page);
181 			mempool_free(page, sbi->write_io_dummy);
182 
183 			if (unlikely(bio->bi_status))
184 				f2fs_stop_checkpoint(sbi, true);
185 			continue;
186 		}
187 
188 		fscrypt_pullback_bio_page(&page, true);
189 
190 		if (unlikely(bio->bi_status)) {
191 			mapping_set_error(page->mapping, -EIO);
192 			if (type == F2FS_WB_CP_DATA)
193 				f2fs_stop_checkpoint(sbi, true);
194 		}
195 
196 		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
197 					page->index != nid_of_node(page));
198 
199 		dec_page_count(sbi, type);
200 		if (f2fs_in_warm_node_list(sbi, page))
201 			f2fs_del_fsync_node_entry(sbi, page);
202 		clear_cold_data(page);
203 		end_page_writeback(page);
204 	}
205 	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
206 				wq_has_sleeper(&sbi->cp_wait))
207 		wake_up(&sbi->cp_wait);
208 
209 	bio_put(bio);
210 }
211 
212 /*
213  * Return true, if pre_bio's bdev is same as its target device.
214  */
215 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
216 				block_t blk_addr, struct bio *bio)
217 {
218 	struct block_device *bdev = sbi->sb->s_bdev;
219 	int i;
220 
221 	for (i = 0; i < sbi->s_ndevs; i++) {
222 		if (FDEV(i).start_blk <= blk_addr &&
223 					FDEV(i).end_blk >= blk_addr) {
224 			blk_addr -= FDEV(i).start_blk;
225 			bdev = FDEV(i).bdev;
226 			break;
227 		}
228 	}
229 	if (bio) {
230 		bio_set_dev(bio, bdev);
231 		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
232 	}
233 	return bdev;
234 }
235 
236 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
237 {
238 	int i;
239 
240 	for (i = 0; i < sbi->s_ndevs; i++)
241 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
242 			return i;
243 	return 0;
244 }
245 
246 static bool __same_bdev(struct f2fs_sb_info *sbi,
247 				block_t blk_addr, struct bio *bio)
248 {
249 	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
250 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
251 }
252 
253 /*
254  * Low-level block read/write IO operations.
255  */
256 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
257 				struct writeback_control *wbc,
258 				int npages, bool is_read,
259 				enum page_type type, enum temp_type temp)
260 {
261 	struct bio *bio;
262 
263 	bio = f2fs_bio_alloc(sbi, npages, true);
264 
265 	f2fs_target_device(sbi, blk_addr, bio);
266 	if (is_read) {
267 		bio->bi_end_io = f2fs_read_end_io;
268 		bio->bi_private = NULL;
269 	} else {
270 		bio->bi_end_io = f2fs_write_end_io;
271 		bio->bi_private = sbi;
272 		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
273 	}
274 	if (wbc)
275 		wbc_init_bio(wbc, bio);
276 
277 	return bio;
278 }
279 
280 static inline void __submit_bio(struct f2fs_sb_info *sbi,
281 				struct bio *bio, enum page_type type)
282 {
283 	if (!is_read_io(bio_op(bio))) {
284 		unsigned int start;
285 
286 		if (type != DATA && type != NODE)
287 			goto submit_io;
288 
289 		if (test_opt(sbi, LFS) && current->plug)
290 			blk_finish_plug(current->plug);
291 
292 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
293 		start %= F2FS_IO_SIZE(sbi);
294 
295 		if (start == 0)
296 			goto submit_io;
297 
298 		/* fill dummy pages */
299 		for (; start < F2FS_IO_SIZE(sbi); start++) {
300 			struct page *page =
301 				mempool_alloc(sbi->write_io_dummy,
302 					      GFP_NOIO | __GFP_NOFAIL);
303 			f2fs_bug_on(sbi, !page);
304 
305 			zero_user_segment(page, 0, PAGE_SIZE);
306 			SetPagePrivate(page);
307 			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
308 			lock_page(page);
309 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
310 				f2fs_bug_on(sbi, 1);
311 		}
312 		/*
313 		 * In the NODE case, we lose next block address chain. So, we
314 		 * need to do checkpoint in f2fs_sync_file.
315 		 */
316 		if (type == NODE)
317 			set_sbi_flag(sbi, SBI_NEED_CP);
318 	}
319 submit_io:
320 	if (is_read_io(bio_op(bio)))
321 		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
322 	else
323 		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
324 	submit_bio(bio);
325 }
326 
327 static void __submit_merged_bio(struct f2fs_bio_info *io)
328 {
329 	struct f2fs_io_info *fio = &io->fio;
330 
331 	if (!io->bio)
332 		return;
333 
334 	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
335 
336 	if (is_read_io(fio->op))
337 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
338 	else
339 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
340 
341 	__submit_bio(io->sbi, io->bio, fio->type);
342 	io->bio = NULL;
343 }
344 
345 static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
346 						struct page *page, nid_t ino)
347 {
348 	struct bio_vec *bvec;
349 	struct page *target;
350 	struct bvec_iter_all iter_all;
351 
352 	if (!io->bio)
353 		return false;
354 
355 	if (!inode && !page && !ino)
356 		return true;
357 
358 	bio_for_each_segment_all(bvec, io->bio, iter_all) {
359 
360 		if (bvec->bv_page->mapping)
361 			target = bvec->bv_page;
362 		else
363 			target = fscrypt_control_page(bvec->bv_page);
364 
365 		if (inode && inode == target->mapping->host)
366 			return true;
367 		if (page && page == target)
368 			return true;
369 		if (ino && ino == ino_of_node(target))
370 			return true;
371 	}
372 
373 	return false;
374 }
375 
376 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
377 				enum page_type type, enum temp_type temp)
378 {
379 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
380 	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
381 
382 	down_write(&io->io_rwsem);
383 
384 	/* change META to META_FLUSH in the checkpoint procedure */
385 	if (type >= META_FLUSH) {
386 		io->fio.type = META_FLUSH;
387 		io->fio.op = REQ_OP_WRITE;
388 		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
389 		if (!test_opt(sbi, NOBARRIER))
390 			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
391 	}
392 	__submit_merged_bio(io);
393 	up_write(&io->io_rwsem);
394 }
395 
396 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
397 				struct inode *inode, struct page *page,
398 				nid_t ino, enum page_type type, bool force)
399 {
400 	enum temp_type temp;
401 	bool ret = true;
402 
403 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
404 		if (!force)	{
405 			enum page_type btype = PAGE_TYPE_OF_BIO(type);
406 			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
407 
408 			down_read(&io->io_rwsem);
409 			ret = __has_merged_page(io, inode, page, ino);
410 			up_read(&io->io_rwsem);
411 		}
412 		if (ret)
413 			__f2fs_submit_merged_write(sbi, type, temp);
414 
415 		/* TODO: use HOT temp only for meta pages now. */
416 		if (type >= META)
417 			break;
418 	}
419 }
420 
421 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
422 {
423 	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
424 }
425 
426 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
427 				struct inode *inode, struct page *page,
428 				nid_t ino, enum page_type type)
429 {
430 	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
431 }
432 
433 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
434 {
435 	f2fs_submit_merged_write(sbi, DATA);
436 	f2fs_submit_merged_write(sbi, NODE);
437 	f2fs_submit_merged_write(sbi, META);
438 }
439 
440 /*
441  * Fill the locked page with data located in the block address.
442  * A caller needs to unlock the page on failure.
443  */
444 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
445 {
446 	struct bio *bio;
447 	struct page *page = fio->encrypted_page ?
448 			fio->encrypted_page : fio->page;
449 
450 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
451 			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
452 		return -EFAULT;
453 
454 	trace_f2fs_submit_page_bio(page, fio);
455 	f2fs_trace_ios(fio, 0);
456 
457 	/* Allocate a new bio */
458 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
459 				1, is_read_io(fio->op), fio->type, fio->temp);
460 
461 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
462 		bio_put(bio);
463 		return -EFAULT;
464 	}
465 
466 	if (fio->io_wbc && !is_read_io(fio->op))
467 		wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
468 
469 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
470 
471 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
472 			__read_io_type(page): WB_DATA_TYPE(fio->page));
473 
474 	__submit_bio(fio->sbi, bio, fio->type);
475 	return 0;
476 }
477 
478 void f2fs_submit_page_write(struct f2fs_io_info *fio)
479 {
480 	struct f2fs_sb_info *sbi = fio->sbi;
481 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
482 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
483 	struct page *bio_page;
484 
485 	f2fs_bug_on(sbi, is_read_io(fio->op));
486 
487 	down_write(&io->io_rwsem);
488 next:
489 	if (fio->in_list) {
490 		spin_lock(&io->io_lock);
491 		if (list_empty(&io->io_list)) {
492 			spin_unlock(&io->io_lock);
493 			goto out;
494 		}
495 		fio = list_first_entry(&io->io_list,
496 						struct f2fs_io_info, list);
497 		list_del(&fio->list);
498 		spin_unlock(&io->io_lock);
499 	}
500 
501 	if (__is_valid_data_blkaddr(fio->old_blkaddr))
502 		verify_block_addr(fio, fio->old_blkaddr);
503 	verify_block_addr(fio, fio->new_blkaddr);
504 
505 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
506 
507 	/* set submitted = true as a return value */
508 	fio->submitted = true;
509 
510 	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
511 
512 	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
513 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
514 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
515 		__submit_merged_bio(io);
516 alloc_new:
517 	if (io->bio == NULL) {
518 		if ((fio->type == DATA || fio->type == NODE) &&
519 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
520 			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
521 			fio->retry = true;
522 			goto skip;
523 		}
524 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
525 						BIO_MAX_PAGES, false,
526 						fio->type, fio->temp);
527 		io->fio = *fio;
528 	}
529 
530 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
531 		__submit_merged_bio(io);
532 		goto alloc_new;
533 	}
534 
535 	if (fio->io_wbc)
536 		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);
537 
538 	io->last_block_in_bio = fio->new_blkaddr;
539 	f2fs_trace_ios(fio, 0);
540 
541 	trace_f2fs_submit_page_write(fio->page, fio);
542 skip:
543 	if (fio->in_list)
544 		goto next;
545 out:
546 	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
547 				f2fs_is_checkpoint_ready(sbi))
548 		__submit_merged_bio(io);
549 	up_write(&io->io_rwsem);
550 }
551 
552 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
553 					unsigned nr_pages, unsigned op_flag)
554 {
555 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
556 	struct bio *bio;
557 	struct bio_post_read_ctx *ctx;
558 	unsigned int post_read_steps = 0;
559 
560 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
561 		return ERR_PTR(-EFAULT);
562 
563 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
564 	if (!bio)
565 		return ERR_PTR(-ENOMEM);
566 	f2fs_target_device(sbi, blkaddr, bio);
567 	bio->bi_end_io = f2fs_read_end_io;
568 	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
569 
570 	if (f2fs_encrypted_file(inode))
571 		post_read_steps |= 1 << STEP_DECRYPT;
572 	if (post_read_steps) {
573 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
574 		if (!ctx) {
575 			bio_put(bio);
576 			return ERR_PTR(-ENOMEM);
577 		}
578 		ctx->bio = bio;
579 		ctx->enabled_steps = post_read_steps;
580 		bio->bi_private = ctx;
581 	}
582 
583 	return bio;
584 }
585 
586 /* This can handle encryption stuffs */
587 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
588 							block_t blkaddr)
589 {
590 	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
591 
592 	if (IS_ERR(bio))
593 		return PTR_ERR(bio);
594 
595 	/* wait for GCed page writeback via META_MAPPING */
596 	f2fs_wait_on_block_writeback(inode, blkaddr);
597 
598 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
599 		bio_put(bio);
600 		return -EFAULT;
601 	}
602 	ClearPageError(page);
603 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
604 	__submit_bio(F2FS_I_SB(inode), bio, DATA);
605 	return 0;
606 }
607 
608 static void __set_data_blkaddr(struct dnode_of_data *dn)
609 {
610 	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
611 	__le32 *addr_array;
612 	int base = 0;
613 
614 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
615 		base = get_extra_isize(dn->inode);
616 
617 	/* Get physical address of data block */
618 	addr_array = blkaddr_in_node(rn);
619 	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
620 }
621 
622 /*
623  * Lock ordering for the change of data block address:
624  * ->data_page
625  *  ->node_page
626  *    update block addresses in the node page
627  */
628 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
629 {
630 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
631 	__set_data_blkaddr(dn);
632 	if (set_page_dirty(dn->node_page))
633 		dn->node_changed = true;
634 }
635 
636 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
637 {
638 	dn->data_blkaddr = blkaddr;
639 	f2fs_set_data_blkaddr(dn);
640 	f2fs_update_extent_cache(dn);
641 }
642 
643 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
644 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
645 {
646 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
647 	int err;
648 
649 	if (!count)
650 		return 0;
651 
652 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
653 		return -EPERM;
654 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
655 		return err;
656 
657 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
658 						dn->ofs_in_node, count);
659 
660 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
661 
662 	for (; count > 0; dn->ofs_in_node++) {
663 		block_t blkaddr = datablock_addr(dn->inode,
664 					dn->node_page, dn->ofs_in_node);
665 		if (blkaddr == NULL_ADDR) {
666 			dn->data_blkaddr = NEW_ADDR;
667 			__set_data_blkaddr(dn);
668 			count--;
669 		}
670 	}
671 
672 	if (set_page_dirty(dn->node_page))
673 		dn->node_changed = true;
674 	return 0;
675 }
676 
677 /* Should keep dn->ofs_in_node unchanged */
678 int f2fs_reserve_new_block(struct dnode_of_data *dn)
679 {
680 	unsigned int ofs_in_node = dn->ofs_in_node;
681 	int ret;
682 
683 	ret = f2fs_reserve_new_blocks(dn, 1);
684 	dn->ofs_in_node = ofs_in_node;
685 	return ret;
686 }
687 
688 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
689 {
690 	bool need_put = dn->inode_page ? false : true;
691 	int err;
692 
693 	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
694 	if (err)
695 		return err;
696 
697 	if (dn->data_blkaddr == NULL_ADDR)
698 		err = f2fs_reserve_new_block(dn);
699 	if (err || need_put)
700 		f2fs_put_dnode(dn);
701 	return err;
702 }
703 
704 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
705 {
706 	struct extent_info ei  = {0,0,0};
707 	struct inode *inode = dn->inode;
708 
709 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
710 		dn->data_blkaddr = ei.blk + index - ei.fofs;
711 		return 0;
712 	}
713 
714 	return f2fs_reserve_block(dn, index);
715 }
716 
717 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
718 						int op_flags, bool for_write)
719 {
720 	struct address_space *mapping = inode->i_mapping;
721 	struct dnode_of_data dn;
722 	struct page *page;
723 	struct extent_info ei = {0,0,0};
724 	int err;
725 
726 	page = f2fs_grab_cache_page(mapping, index, for_write);
727 	if (!page)
728 		return ERR_PTR(-ENOMEM);
729 
730 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
731 		dn.data_blkaddr = ei.blk + index - ei.fofs;
732 		goto got_it;
733 	}
734 
735 	set_new_dnode(&dn, inode, NULL, NULL, 0);
736 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
737 	if (err)
738 		goto put_err;
739 	f2fs_put_dnode(&dn);
740 
741 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
742 		err = -ENOENT;
743 		goto put_err;
744 	}
745 got_it:
746 	if (PageUptodate(page)) {
747 		unlock_page(page);
748 		return page;
749 	}
750 
751 	/*
752 	 * A new dentry page is allocated but not able to be written, since its
753 	 * new inode page couldn't be allocated due to -ENOSPC.
754 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
755 	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
756 	 * f2fs_init_inode_metadata.
757 	 */
758 	if (dn.data_blkaddr == NEW_ADDR) {
759 		zero_user_segment(page, 0, PAGE_SIZE);
760 		if (!PageUptodate(page))
761 			SetPageUptodate(page);
762 		unlock_page(page);
763 		return page;
764 	}
765 
766 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
767 	if (err)
768 		goto put_err;
769 	return page;
770 
771 put_err:
772 	f2fs_put_page(page, 1);
773 	return ERR_PTR(err);
774 }
775 
776 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
777 {
778 	struct address_space *mapping = inode->i_mapping;
779 	struct page *page;
780 
781 	page = find_get_page(mapping, index);
782 	if (page && PageUptodate(page))
783 		return page;
784 	f2fs_put_page(page, 0);
785 
786 	page = f2fs_get_read_data_page(inode, index, 0, false);
787 	if (IS_ERR(page))
788 		return page;
789 
790 	if (PageUptodate(page))
791 		return page;
792 
793 	wait_on_page_locked(page);
794 	if (unlikely(!PageUptodate(page))) {
795 		f2fs_put_page(page, 0);
796 		return ERR_PTR(-EIO);
797 	}
798 	return page;
799 }
800 
801 /*
802  * If it tries to access a hole, return an error.
803  * Because, the callers, functions in dir.c and GC, should be able to know
804  * whether this page exists or not.
805  */
806 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
807 							bool for_write)
808 {
809 	struct address_space *mapping = inode->i_mapping;
810 	struct page *page;
811 repeat:
812 	page = f2fs_get_read_data_page(inode, index, 0, for_write);
813 	if (IS_ERR(page))
814 		return page;
815 
816 	/* wait for read completion */
817 	lock_page(page);
818 	if (unlikely(page->mapping != mapping)) {
819 		f2fs_put_page(page, 1);
820 		goto repeat;
821 	}
822 	if (unlikely(!PageUptodate(page))) {
823 		f2fs_put_page(page, 1);
824 		return ERR_PTR(-EIO);
825 	}
826 	return page;
827 }
828 
829 /*
830  * Caller ensures that this data page is never allocated.
831  * A new zero-filled data page is allocated in the page cache.
832  *
833  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
834  * f2fs_unlock_op().
835  * Note that, ipage is set only by make_empty_dir, and if any error occur,
836  * ipage should be released by this function.
837  */
838 struct page *f2fs_get_new_data_page(struct inode *inode,
839 		struct page *ipage, pgoff_t index, bool new_i_size)
840 {
841 	struct address_space *mapping = inode->i_mapping;
842 	struct page *page;
843 	struct dnode_of_data dn;
844 	int err;
845 
846 	page = f2fs_grab_cache_page(mapping, index, true);
847 	if (!page) {
848 		/*
849 		 * before exiting, we should make sure ipage will be released
850 		 * if any error occur.
851 		 */
852 		f2fs_put_page(ipage, 1);
853 		return ERR_PTR(-ENOMEM);
854 	}
855 
856 	set_new_dnode(&dn, inode, ipage, NULL, 0);
857 	err = f2fs_reserve_block(&dn, index);
858 	if (err) {
859 		f2fs_put_page(page, 1);
860 		return ERR_PTR(err);
861 	}
862 	if (!ipage)
863 		f2fs_put_dnode(&dn);
864 
865 	if (PageUptodate(page))
866 		goto got_it;
867 
868 	if (dn.data_blkaddr == NEW_ADDR) {
869 		zero_user_segment(page, 0, PAGE_SIZE);
870 		if (!PageUptodate(page))
871 			SetPageUptodate(page);
872 	} else {
873 		f2fs_put_page(page, 1);
874 
875 		/* if ipage exists, blkaddr should be NEW_ADDR */
876 		f2fs_bug_on(F2FS_I_SB(inode), ipage);
877 		page = f2fs_get_lock_data_page(inode, index, true);
878 		if (IS_ERR(page))
879 			return page;
880 	}
881 got_it:
882 	if (new_i_size && i_size_read(inode) <
883 				((loff_t)(index + 1) << PAGE_SHIFT))
884 		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
885 	return page;
886 }
887 
888 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
889 {
890 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
891 	struct f2fs_summary sum;
892 	struct node_info ni;
893 	block_t old_blkaddr;
894 	blkcnt_t count = 1;
895 	int err;
896 
897 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
898 		return -EPERM;
899 
900 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
901 	if (err)
902 		return err;
903 
904 	dn->data_blkaddr = datablock_addr(dn->inode,
905 				dn->node_page, dn->ofs_in_node);
906 	if (dn->data_blkaddr != NULL_ADDR)
907 		goto alloc;
908 
909 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
910 		return err;
911 
912 alloc:
913 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
914 	old_blkaddr = dn->data_blkaddr;
915 	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
916 					&sum, seg_type, NULL, false);
917 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
918 		invalidate_mapping_pages(META_MAPPING(sbi),
919 					old_blkaddr, old_blkaddr);
920 	f2fs_set_data_blkaddr(dn);
921 
922 	/*
923 	 * i_size will be updated by direct_IO. Otherwise, we'll get stale
924 	 * data from unwritten block via dio_read.
925 	 */
926 	return 0;
927 }
928 
929 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
930 {
931 	struct inode *inode = file_inode(iocb->ki_filp);
932 	struct f2fs_map_blocks map;
933 	int flag;
934 	int err = 0;
935 	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
936 
937 	/* convert inline data for Direct I/O*/
938 	if (direct_io) {
939 		err = f2fs_convert_inline_inode(inode);
940 		if (err)
941 			return err;
942 	}
943 
944 	if (direct_io && allow_outplace_dio(inode, iocb, from))
945 		return 0;
946 
947 	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
948 		return 0;
949 
950 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
951 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
952 	if (map.m_len > map.m_lblk)
953 		map.m_len -= map.m_lblk;
954 	else
955 		map.m_len = 0;
956 
957 	map.m_next_pgofs = NULL;
958 	map.m_next_extent = NULL;
959 	map.m_seg_type = NO_CHECK_TYPE;
960 	map.m_may_create = true;
961 
962 	if (direct_io) {
963 		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
964 		flag = f2fs_force_buffered_io(inode, iocb, from) ?
965 					F2FS_GET_BLOCK_PRE_AIO :
966 					F2FS_GET_BLOCK_PRE_DIO;
967 		goto map_blocks;
968 	}
969 	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
970 		err = f2fs_convert_inline_inode(inode);
971 		if (err)
972 			return err;
973 	}
974 	if (f2fs_has_inline_data(inode))
975 		return err;
976 
977 	flag = F2FS_GET_BLOCK_PRE_AIO;
978 
979 map_blocks:
980 	err = f2fs_map_blocks(inode, &map, 1, flag);
981 	if (map.m_len > 0 && err == -ENOSPC) {
982 		if (!direct_io)
983 			set_inode_flag(inode, FI_NO_PREALLOC);
984 		err = 0;
985 	}
986 	return err;
987 }
988 
989 void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
990 {
991 	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
992 		if (lock)
993 			down_read(&sbi->node_change);
994 		else
995 			up_read(&sbi->node_change);
996 	} else {
997 		if (lock)
998 			f2fs_lock_op(sbi);
999 		else
1000 			f2fs_unlock_op(sbi);
1001 	}
1002 }
1003 
1004 /*
1005  * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1006  * f2fs_map_blocks structure.
1007  * If original data blocks are allocated, then give them to blockdev.
1008  * Otherwise,
1009  *     a. preallocate requested block addresses
1010  *     b. do not use extent cache for better performance
1011  *     c. give the block addresses to blockdev
1012  */
1013 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1014 						int create, int flag)
1015 {
1016 	unsigned int maxblocks = map->m_len;
1017 	struct dnode_of_data dn;
1018 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1019 	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1020 	pgoff_t pgofs, end_offset, end;
1021 	int err = 0, ofs = 1;
1022 	unsigned int ofs_in_node, last_ofs_in_node;
1023 	blkcnt_t prealloc;
1024 	struct extent_info ei = {0,0,0};
1025 	block_t blkaddr;
1026 	unsigned int start_pgofs;
1027 
1028 	if (!maxblocks)
1029 		return 0;
1030 
1031 	map->m_len = 0;
1032 	map->m_flags = 0;
1033 
1034 	/* it only supports block size == page size */
1035 	pgofs =	(pgoff_t)map->m_lblk;
1036 	end = pgofs + maxblocks;
1037 
1038 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1039 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
1040 							map->m_may_create)
1041 			goto next_dnode;
1042 
1043 		map->m_pblk = ei.blk + pgofs - ei.fofs;
1044 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1045 		map->m_flags = F2FS_MAP_MAPPED;
1046 		if (map->m_next_extent)
1047 			*map->m_next_extent = pgofs + map->m_len;
1048 
1049 		/* for hardware encryption, but to avoid potential issue in future */
1050 		if (flag == F2FS_GET_BLOCK_DIO)
1051 			f2fs_wait_on_block_writeback_range(inode,
1052 						map->m_pblk, map->m_len);
1053 		goto out;
1054 	}
1055 
1056 next_dnode:
1057 	if (map->m_may_create)
1058 		__do_map_lock(sbi, flag, true);
1059 
1060 	/* When reading holes, we need its node page */
1061 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1062 	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1063 	if (err) {
1064 		if (flag == F2FS_GET_BLOCK_BMAP)
1065 			map->m_pblk = 0;
1066 		if (err == -ENOENT) {
1067 			err = 0;
1068 			if (map->m_next_pgofs)
1069 				*map->m_next_pgofs =
1070 					f2fs_get_next_page_offset(&dn, pgofs);
1071 			if (map->m_next_extent)
1072 				*map->m_next_extent =
1073 					f2fs_get_next_page_offset(&dn, pgofs);
1074 		}
1075 		goto unlock_out;
1076 	}
1077 
1078 	start_pgofs = pgofs;
1079 	prealloc = 0;
1080 	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1081 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1082 
1083 next_block:
1084 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
1085 
1086 	if (__is_valid_data_blkaddr(blkaddr) &&
1087 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
1088 		err = -EFAULT;
1089 		goto sync_out;
1090 	}
1091 
1092 	if (is_valid_data_blkaddr(sbi, blkaddr)) {
1093 		/* use out-place-update for driect IO under LFS mode */
1094 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
1095 							map->m_may_create) {
1096 			err = __allocate_data_block(&dn, map->m_seg_type);
1097 			if (!err) {
1098 				blkaddr = dn.data_blkaddr;
1099 				set_inode_flag(inode, FI_APPEND_WRITE);
1100 			}
1101 		}
1102 	} else {
1103 		if (create) {
1104 			if (unlikely(f2fs_cp_error(sbi))) {
1105 				err = -EIO;
1106 				goto sync_out;
1107 			}
1108 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1109 				if (blkaddr == NULL_ADDR) {
1110 					prealloc++;
1111 					last_ofs_in_node = dn.ofs_in_node;
1112 				}
1113 			} else {
1114 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1115 					flag != F2FS_GET_BLOCK_DIO);
1116 				err = __allocate_data_block(&dn,
1117 							map->m_seg_type);
1118 				if (!err)
1119 					set_inode_flag(inode, FI_APPEND_WRITE);
1120 			}
1121 			if (err)
1122 				goto sync_out;
1123 			map->m_flags |= F2FS_MAP_NEW;
1124 			blkaddr = dn.data_blkaddr;
1125 		} else {
1126 			if (flag == F2FS_GET_BLOCK_BMAP) {
1127 				map->m_pblk = 0;
1128 				goto sync_out;
1129 			}
1130 			if (flag == F2FS_GET_BLOCK_PRECACHE)
1131 				goto sync_out;
1132 			if (flag == F2FS_GET_BLOCK_FIEMAP &&
1133 						blkaddr == NULL_ADDR) {
1134 				if (map->m_next_pgofs)
1135 					*map->m_next_pgofs = pgofs + 1;
1136 				goto sync_out;
1137 			}
1138 			if (flag != F2FS_GET_BLOCK_FIEMAP) {
1139 				/* for defragment case */
1140 				if (map->m_next_pgofs)
1141 					*map->m_next_pgofs = pgofs + 1;
1142 				goto sync_out;
1143 			}
1144 		}
1145 	}
1146 
1147 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
1148 		goto skip;
1149 
1150 	if (map->m_len == 0) {
1151 		/* preallocated unwritten block should be mapped for fiemap. */
1152 		if (blkaddr == NEW_ADDR)
1153 			map->m_flags |= F2FS_MAP_UNWRITTEN;
1154 		map->m_flags |= F2FS_MAP_MAPPED;
1155 
1156 		map->m_pblk = blkaddr;
1157 		map->m_len = 1;
1158 	} else if ((map->m_pblk != NEW_ADDR &&
1159 			blkaddr == (map->m_pblk + ofs)) ||
1160 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1161 			flag == F2FS_GET_BLOCK_PRE_DIO) {
1162 		ofs++;
1163 		map->m_len++;
1164 	} else {
1165 		goto sync_out;
1166 	}
1167 
1168 skip:
1169 	dn.ofs_in_node++;
1170 	pgofs++;
1171 
1172 	/* preallocate blocks in batch for one dnode page */
1173 	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1174 			(pgofs == end || dn.ofs_in_node == end_offset)) {
1175 
1176 		dn.ofs_in_node = ofs_in_node;
1177 		err = f2fs_reserve_new_blocks(&dn, prealloc);
1178 		if (err)
1179 			goto sync_out;
1180 
1181 		map->m_len += dn.ofs_in_node - ofs_in_node;
1182 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1183 			err = -ENOSPC;
1184 			goto sync_out;
1185 		}
1186 		dn.ofs_in_node = end_offset;
1187 	}
1188 
1189 	if (pgofs >= end)
1190 		goto sync_out;
1191 	else if (dn.ofs_in_node < end_offset)
1192 		goto next_block;
1193 
1194 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1195 		if (map->m_flags & F2FS_MAP_MAPPED) {
1196 			unsigned int ofs = start_pgofs - map->m_lblk;
1197 
1198 			f2fs_update_extent_cache_range(&dn,
1199 				start_pgofs, map->m_pblk + ofs,
1200 				map->m_len - ofs);
1201 		}
1202 	}
1203 
1204 	f2fs_put_dnode(&dn);
1205 
1206 	if (map->m_may_create) {
1207 		__do_map_lock(sbi, flag, false);
1208 		f2fs_balance_fs(sbi, dn.node_changed);
1209 	}
1210 	goto next_dnode;
1211 
1212 sync_out:
1213 
1214 	/* for hardware encryption, but to avoid potential issue in future */
1215 	if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1216 		f2fs_wait_on_block_writeback_range(inode,
1217 						map->m_pblk, map->m_len);
1218 
1219 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1220 		if (map->m_flags & F2FS_MAP_MAPPED) {
1221 			unsigned int ofs = start_pgofs - map->m_lblk;
1222 
1223 			f2fs_update_extent_cache_range(&dn,
1224 				start_pgofs, map->m_pblk + ofs,
1225 				map->m_len - ofs);
1226 		}
1227 		if (map->m_next_extent)
1228 			*map->m_next_extent = pgofs + 1;
1229 	}
1230 	f2fs_put_dnode(&dn);
1231 unlock_out:
1232 	if (map->m_may_create) {
1233 		__do_map_lock(sbi, flag, false);
1234 		f2fs_balance_fs(sbi, dn.node_changed);
1235 	}
1236 out:
1237 	trace_f2fs_map_blocks(inode, map, err);
1238 	return err;
1239 }
1240 
1241 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1242 {
1243 	struct f2fs_map_blocks map;
1244 	block_t last_lblk;
1245 	int err;
1246 
1247 	if (pos + len > i_size_read(inode))
1248 		return false;
1249 
1250 	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1251 	map.m_next_pgofs = NULL;
1252 	map.m_next_extent = NULL;
1253 	map.m_seg_type = NO_CHECK_TYPE;
1254 	map.m_may_create = false;
1255 	last_lblk = F2FS_BLK_ALIGN(pos + len);
1256 
1257 	while (map.m_lblk < last_lblk) {
1258 		map.m_len = last_lblk - map.m_lblk;
1259 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1260 		if (err || map.m_len == 0)
1261 			return false;
1262 		map.m_lblk += map.m_len;
1263 	}
1264 	return true;
1265 }
1266 
1267 static int __get_data_block(struct inode *inode, sector_t iblock,
1268 			struct buffer_head *bh, int create, int flag,
1269 			pgoff_t *next_pgofs, int seg_type, bool may_write)
1270 {
1271 	struct f2fs_map_blocks map;
1272 	int err;
1273 
1274 	map.m_lblk = iblock;
1275 	map.m_len = bh->b_size >> inode->i_blkbits;
1276 	map.m_next_pgofs = next_pgofs;
1277 	map.m_next_extent = NULL;
1278 	map.m_seg_type = seg_type;
1279 	map.m_may_create = may_write;
1280 
1281 	err = f2fs_map_blocks(inode, &map, create, flag);
1282 	if (!err) {
1283 		map_bh(bh, inode->i_sb, map.m_pblk);
1284 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1285 		bh->b_size = (u64)map.m_len << inode->i_blkbits;
1286 	}
1287 	return err;
1288 }
1289 
1290 static int get_data_block(struct inode *inode, sector_t iblock,
1291 			struct buffer_head *bh_result, int create, int flag,
1292 			pgoff_t *next_pgofs)
1293 {
1294 	return __get_data_block(inode, iblock, bh_result, create,
1295 							flag, next_pgofs,
1296 							NO_CHECK_TYPE, create);
1297 }
1298 
1299 static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1300 			struct buffer_head *bh_result, int create)
1301 {
1302 	return __get_data_block(inode, iblock, bh_result, create,
1303 				F2FS_GET_BLOCK_DIO, NULL,
1304 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1305 				true);
1306 }
1307 
1308 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1309 			struct buffer_head *bh_result, int create)
1310 {
1311 	return __get_data_block(inode, iblock, bh_result, create,
1312 				F2FS_GET_BLOCK_DIO, NULL,
1313 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1314 				false);
1315 }
1316 
1317 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1318 			struct buffer_head *bh_result, int create)
1319 {
1320 	/* Block number less than F2FS MAX BLOCKS */
1321 	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1322 		return -EFBIG;
1323 
1324 	return __get_data_block(inode, iblock, bh_result, create,
1325 						F2FS_GET_BLOCK_BMAP, NULL,
1326 						NO_CHECK_TYPE, create);
1327 }
1328 
1329 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1330 {
1331 	return (offset >> inode->i_blkbits);
1332 }
1333 
1334 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1335 {
1336 	return (blk << inode->i_blkbits);
1337 }
1338 
1339 static int f2fs_xattr_fiemap(struct inode *inode,
1340 				struct fiemap_extent_info *fieinfo)
1341 {
1342 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1343 	struct page *page;
1344 	struct node_info ni;
1345 	__u64 phys = 0, len;
1346 	__u32 flags;
1347 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1348 	int err = 0;
1349 
1350 	if (f2fs_has_inline_xattr(inode)) {
1351 		int offset;
1352 
1353 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1354 						inode->i_ino, false);
1355 		if (!page)
1356 			return -ENOMEM;
1357 
1358 		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1359 		if (err) {
1360 			f2fs_put_page(page, 1);
1361 			return err;
1362 		}
1363 
1364 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1365 		offset = offsetof(struct f2fs_inode, i_addr) +
1366 					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1367 					get_inline_xattr_addrs(inode));
1368 
1369 		phys += offset;
1370 		len = inline_xattr_size(inode);
1371 
1372 		f2fs_put_page(page, 1);
1373 
1374 		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1375 
1376 		if (!xnid)
1377 			flags |= FIEMAP_EXTENT_LAST;
1378 
1379 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1380 		if (err || err == 1)
1381 			return err;
1382 	}
1383 
1384 	if (xnid) {
1385 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1386 		if (!page)
1387 			return -ENOMEM;
1388 
1389 		err = f2fs_get_node_info(sbi, xnid, &ni);
1390 		if (err) {
1391 			f2fs_put_page(page, 1);
1392 			return err;
1393 		}
1394 
1395 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1396 		len = inode->i_sb->s_blocksize;
1397 
1398 		f2fs_put_page(page, 1);
1399 
1400 		flags = FIEMAP_EXTENT_LAST;
1401 	}
1402 
1403 	if (phys)
1404 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1405 
1406 	return (err < 0 ? err : 0);
1407 }
1408 
1409 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1410 		u64 start, u64 len)
1411 {
1412 	struct buffer_head map_bh;
1413 	sector_t start_blk, last_blk;
1414 	pgoff_t next_pgofs;
1415 	u64 logical = 0, phys = 0, size = 0;
1416 	u32 flags = 0;
1417 	int ret = 0;
1418 
1419 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1420 		ret = f2fs_precache_extents(inode);
1421 		if (ret)
1422 			return ret;
1423 	}
1424 
1425 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1426 	if (ret)
1427 		return ret;
1428 
1429 	inode_lock(inode);
1430 
1431 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1432 		ret = f2fs_xattr_fiemap(inode, fieinfo);
1433 		goto out;
1434 	}
1435 
1436 	if (f2fs_has_inline_data(inode)) {
1437 		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1438 		if (ret != -EAGAIN)
1439 			goto out;
1440 	}
1441 
1442 	if (logical_to_blk(inode, len) == 0)
1443 		len = blk_to_logical(inode, 1);
1444 
1445 	start_blk = logical_to_blk(inode, start);
1446 	last_blk = logical_to_blk(inode, start + len - 1);
1447 
1448 next:
1449 	memset(&map_bh, 0, sizeof(struct buffer_head));
1450 	map_bh.b_size = len;
1451 
1452 	ret = get_data_block(inode, start_blk, &map_bh, 0,
1453 					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1454 	if (ret)
1455 		goto out;
1456 
1457 	/* HOLE */
1458 	if (!buffer_mapped(&map_bh)) {
1459 		start_blk = next_pgofs;
1460 
1461 		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1462 					F2FS_I_SB(inode)->max_file_blocks))
1463 			goto prep_next;
1464 
1465 		flags |= FIEMAP_EXTENT_LAST;
1466 	}
1467 
1468 	if (size) {
1469 		if (IS_ENCRYPTED(inode))
1470 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1471 
1472 		ret = fiemap_fill_next_extent(fieinfo, logical,
1473 				phys, size, flags);
1474 	}
1475 
1476 	if (start_blk > last_blk || ret)
1477 		goto out;
1478 
1479 	logical = blk_to_logical(inode, start_blk);
1480 	phys = blk_to_logical(inode, map_bh.b_blocknr);
1481 	size = map_bh.b_size;
1482 	flags = 0;
1483 	if (buffer_unwritten(&map_bh))
1484 		flags = FIEMAP_EXTENT_UNWRITTEN;
1485 
1486 	start_blk += logical_to_blk(inode, size);
1487 
1488 prep_next:
1489 	cond_resched();
1490 	if (fatal_signal_pending(current))
1491 		ret = -EINTR;
1492 	else
1493 		goto next;
1494 out:
1495 	if (ret == 1)
1496 		ret = 0;
1497 
1498 	inode_unlock(inode);
1499 	return ret;
1500 }
1501 
1502 /*
1503  * This function was originally taken from fs/mpage.c, and customized for f2fs.
1504  * Major change was from block_size == page_size in f2fs by default.
1505  *
1506  * Note that the aops->readpages() function is ONLY used for read-ahead. If
1507  * this function ever deviates from doing just read-ahead, it should either
1508  * use ->readpage() or do the necessary surgery to decouple ->readpages()
1509  * from read-ahead.
1510  */
1511 static int f2fs_mpage_readpages(struct address_space *mapping,
1512 			struct list_head *pages, struct page *page,
1513 			unsigned nr_pages, bool is_readahead)
1514 {
1515 	struct bio *bio = NULL;
1516 	sector_t last_block_in_bio = 0;
1517 	struct inode *inode = mapping->host;
1518 	const unsigned blkbits = inode->i_blkbits;
1519 	const unsigned blocksize = 1 << blkbits;
1520 	sector_t block_in_file;
1521 	sector_t last_block;
1522 	sector_t last_block_in_file;
1523 	sector_t block_nr;
1524 	struct f2fs_map_blocks map;
1525 
1526 	map.m_pblk = 0;
1527 	map.m_lblk = 0;
1528 	map.m_len = 0;
1529 	map.m_flags = 0;
1530 	map.m_next_pgofs = NULL;
1531 	map.m_next_extent = NULL;
1532 	map.m_seg_type = NO_CHECK_TYPE;
1533 	map.m_may_create = false;
1534 
1535 	for (; nr_pages; nr_pages--) {
1536 		if (pages) {
1537 			page = list_last_entry(pages, struct page, lru);
1538 
1539 			prefetchw(&page->flags);
1540 			list_del(&page->lru);
1541 			if (add_to_page_cache_lru(page, mapping,
1542 						  page->index,
1543 						  readahead_gfp_mask(mapping)))
1544 				goto next_page;
1545 		}
1546 
1547 		block_in_file = (sector_t)page->index;
1548 		last_block = block_in_file + nr_pages;
1549 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1550 								blkbits;
1551 		if (last_block > last_block_in_file)
1552 			last_block = last_block_in_file;
1553 
1554 		/* just zeroing out page which is beyond EOF */
1555 		if (block_in_file >= last_block)
1556 			goto zero_out;
1557 		/*
1558 		 * Map blocks using the previous result first.
1559 		 */
1560 		if ((map.m_flags & F2FS_MAP_MAPPED) &&
1561 				block_in_file > map.m_lblk &&
1562 				block_in_file < (map.m_lblk + map.m_len))
1563 			goto got_it;
1564 
1565 		/*
1566 		 * Then do more f2fs_map_blocks() calls until we are
1567 		 * done with this page.
1568 		 */
1569 		map.m_lblk = block_in_file;
1570 		map.m_len = last_block - block_in_file;
1571 
1572 		if (f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT))
1573 			goto set_error_page;
1574 got_it:
1575 		if ((map.m_flags & F2FS_MAP_MAPPED)) {
1576 			block_nr = map.m_pblk + block_in_file - map.m_lblk;
1577 			SetPageMappedToDisk(page);
1578 
1579 			if (!PageUptodate(page) && !cleancache_get_page(page)) {
1580 				SetPageUptodate(page);
1581 				goto confused;
1582 			}
1583 
1584 			if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
1585 								DATA_GENERIC))
1586 				goto set_error_page;
1587 		} else {
1588 zero_out:
1589 			zero_user_segment(page, 0, PAGE_SIZE);
1590 			if (!PageUptodate(page))
1591 				SetPageUptodate(page);
1592 			unlock_page(page);
1593 			goto next_page;
1594 		}
1595 
1596 		/*
1597 		 * This page will go to BIO.  Do we need to send this
1598 		 * BIO off first?
1599 		 */
1600 		if (bio && (last_block_in_bio != block_nr - 1 ||
1601 			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
1602 submit_and_realloc:
1603 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1604 			bio = NULL;
1605 		}
1606 		if (bio == NULL) {
1607 			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
1608 					is_readahead ? REQ_RAHEAD : 0);
1609 			if (IS_ERR(bio)) {
1610 				bio = NULL;
1611 				goto set_error_page;
1612 			}
1613 		}
1614 
1615 		/*
1616 		 * If the page is under writeback, we need to wait for
1617 		 * its completion to see the correct decrypted data.
1618 		 */
1619 		f2fs_wait_on_block_writeback(inode, block_nr);
1620 
1621 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1622 			goto submit_and_realloc;
1623 
1624 		inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
1625 		ClearPageError(page);
1626 		last_block_in_bio = block_nr;
1627 		goto next_page;
1628 set_error_page:
1629 		SetPageError(page);
1630 		zero_user_segment(page, 0, PAGE_SIZE);
1631 		unlock_page(page);
1632 		goto next_page;
1633 confused:
1634 		if (bio) {
1635 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1636 			bio = NULL;
1637 		}
1638 		unlock_page(page);
1639 next_page:
1640 		if (pages)
1641 			put_page(page);
1642 	}
1643 	BUG_ON(pages && !list_empty(pages));
1644 	if (bio)
1645 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1646 	return 0;
1647 }
1648 
1649 static int f2fs_read_data_page(struct file *file, struct page *page)
1650 {
1651 	struct inode *inode = page->mapping->host;
1652 	int ret = -EAGAIN;
1653 
1654 	trace_f2fs_readpage(page, DATA);
1655 
1656 	/* If the file has inline data, try to read it directly */
1657 	if (f2fs_has_inline_data(inode))
1658 		ret = f2fs_read_inline_data(inode, page);
1659 	if (ret == -EAGAIN)
1660 		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
1661 	return ret;
1662 }
1663 
1664 static int f2fs_read_data_pages(struct file *file,
1665 			struct address_space *mapping,
1666 			struct list_head *pages, unsigned nr_pages)
1667 {
1668 	struct inode *inode = mapping->host;
1669 	struct page *page = list_last_entry(pages, struct page, lru);
1670 
1671 	trace_f2fs_readpages(inode, page, nr_pages);
1672 
1673 	/* If the file has inline data, skip readpages */
1674 	if (f2fs_has_inline_data(inode))
1675 		return 0;
1676 
1677 	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
1678 }
1679 
1680 static int encrypt_one_page(struct f2fs_io_info *fio)
1681 {
1682 	struct inode *inode = fio->page->mapping->host;
1683 	struct page *mpage;
1684 	gfp_t gfp_flags = GFP_NOFS;
1685 
1686 	if (!f2fs_encrypted_file(inode))
1687 		return 0;
1688 
1689 	/* wait for GCed page writeback via META_MAPPING */
1690 	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
1691 
1692 retry_encrypt:
1693 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1694 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
1695 	if (IS_ERR(fio->encrypted_page)) {
1696 		/* flush pending IOs and wait for a while in the ENOMEM case */
1697 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1698 			f2fs_flush_merged_writes(fio->sbi);
1699 			congestion_wait(BLK_RW_ASYNC, HZ/50);
1700 			gfp_flags |= __GFP_NOFAIL;
1701 			goto retry_encrypt;
1702 		}
1703 		return PTR_ERR(fio->encrypted_page);
1704 	}
1705 
1706 	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
1707 	if (mpage) {
1708 		if (PageUptodate(mpage))
1709 			memcpy(page_address(mpage),
1710 				page_address(fio->encrypted_page), PAGE_SIZE);
1711 		f2fs_put_page(mpage, 1);
1712 	}
1713 	return 0;
1714 }
1715 
1716 static inline bool check_inplace_update_policy(struct inode *inode,
1717 				struct f2fs_io_info *fio)
1718 {
1719 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1720 	unsigned int policy = SM_I(sbi)->ipu_policy;
1721 
1722 	if (policy & (0x1 << F2FS_IPU_FORCE))
1723 		return true;
1724 	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
1725 		return true;
1726 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
1727 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
1728 		return true;
1729 	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
1730 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
1731 		return true;
1732 
1733 	/*
1734 	 * IPU for rewrite async pages
1735 	 */
1736 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
1737 			fio && fio->op == REQ_OP_WRITE &&
1738 			!(fio->op_flags & REQ_SYNC) &&
1739 			!IS_ENCRYPTED(inode))
1740 		return true;
1741 
1742 	/* this is only set during fdatasync */
1743 	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
1744 			is_inode_flag_set(inode, FI_NEED_IPU))
1745 		return true;
1746 
1747 	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1748 			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
1749 		return true;
1750 
1751 	return false;
1752 }
1753 
1754 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
1755 {
1756 	if (f2fs_is_pinned_file(inode))
1757 		return true;
1758 
1759 	/* if this is cold file, we should overwrite to avoid fragmentation */
1760 	if (file_is_cold(inode))
1761 		return true;
1762 
1763 	return check_inplace_update_policy(inode, fio);
1764 }
1765 
1766 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
1767 {
1768 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1769 
1770 	if (test_opt(sbi, LFS))
1771 		return true;
1772 	if (S_ISDIR(inode->i_mode))
1773 		return true;
1774 	if (IS_NOQUOTA(inode))
1775 		return true;
1776 	if (f2fs_is_atomic_file(inode))
1777 		return true;
1778 	if (fio) {
1779 		if (is_cold_data(fio->page))
1780 			return true;
1781 		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
1782 			return true;
1783 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1784 			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
1785 			return true;
1786 	}
1787 	return false;
1788 }
1789 
1790 static inline bool need_inplace_update(struct f2fs_io_info *fio)
1791 {
1792 	struct inode *inode = fio->page->mapping->host;
1793 
1794 	if (f2fs_should_update_outplace(inode, fio))
1795 		return false;
1796 
1797 	return f2fs_should_update_inplace(inode, fio);
1798 }
1799 
1800 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
1801 {
1802 	struct page *page = fio->page;
1803 	struct inode *inode = page->mapping->host;
1804 	struct dnode_of_data dn;
1805 	struct extent_info ei = {0,0,0};
1806 	struct node_info ni;
1807 	bool ipu_force = false;
1808 	int err = 0;
1809 
1810 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1811 	if (need_inplace_update(fio) &&
1812 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
1813 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1814 
1815 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1816 							DATA_GENERIC))
1817 			return -EFAULT;
1818 
1819 		ipu_force = true;
1820 		fio->need_lock = LOCK_DONE;
1821 		goto got_it;
1822 	}
1823 
1824 	/* Deadlock due to between page->lock and f2fs_lock_op */
1825 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
1826 		return -EAGAIN;
1827 
1828 	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1829 	if (err)
1830 		goto out;
1831 
1832 	fio->old_blkaddr = dn.data_blkaddr;
1833 
1834 	/* This page is already truncated */
1835 	if (fio->old_blkaddr == NULL_ADDR) {
1836 		ClearPageUptodate(page);
1837 		clear_cold_data(page);
1838 		goto out_writepage;
1839 	}
1840 got_it:
1841 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
1842 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1843 							DATA_GENERIC)) {
1844 		err = -EFAULT;
1845 		goto out_writepage;
1846 	}
1847 	/*
1848 	 * If current allocation needs SSR,
1849 	 * it had better in-place writes for updated data.
1850 	 */
1851 	if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
1852 					need_inplace_update(fio))) {
1853 		err = encrypt_one_page(fio);
1854 		if (err)
1855 			goto out_writepage;
1856 
1857 		set_page_writeback(page);
1858 		ClearPageError(page);
1859 		f2fs_put_dnode(&dn);
1860 		if (fio->need_lock == LOCK_REQ)
1861 			f2fs_unlock_op(fio->sbi);
1862 		err = f2fs_inplace_write_data(fio);
1863 		if (err) {
1864 			if (f2fs_encrypted_file(inode))
1865 				fscrypt_pullback_bio_page(&fio->encrypted_page,
1866 									true);
1867 			if (PageWriteback(page))
1868 				end_page_writeback(page);
1869 		}
1870 		trace_f2fs_do_write_data_page(fio->page, IPU);
1871 		set_inode_flag(inode, FI_UPDATE_WRITE);
1872 		return err;
1873 	}
1874 
1875 	if (fio->need_lock == LOCK_RETRY) {
1876 		if (!f2fs_trylock_op(fio->sbi)) {
1877 			err = -EAGAIN;
1878 			goto out_writepage;
1879 		}
1880 		fio->need_lock = LOCK_REQ;
1881 	}
1882 
1883 	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
1884 	if (err)
1885 		goto out_writepage;
1886 
1887 	fio->version = ni.version;
1888 
1889 	err = encrypt_one_page(fio);
1890 	if (err)
1891 		goto out_writepage;
1892 
1893 	set_page_writeback(page);
1894 	ClearPageError(page);
1895 
1896 	/* LFS mode write path */
1897 	f2fs_outplace_write_data(&dn, fio);
1898 	trace_f2fs_do_write_data_page(page, OPU);
1899 	set_inode_flag(inode, FI_APPEND_WRITE);
1900 	if (page->index == 0)
1901 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1902 out_writepage:
1903 	f2fs_put_dnode(&dn);
1904 out:
1905 	if (fio->need_lock == LOCK_REQ)
1906 		f2fs_unlock_op(fio->sbi);
1907 	return err;
1908 }
1909 
1910 static int __write_data_page(struct page *page, bool *submitted,
1911 				struct writeback_control *wbc,
1912 				enum iostat_type io_type)
1913 {
1914 	struct inode *inode = page->mapping->host;
1915 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1916 	loff_t i_size = i_size_read(inode);
1917 	const pgoff_t end_index = ((unsigned long long) i_size)
1918 							>> PAGE_SHIFT;
1919 	loff_t psize = (page->index + 1) << PAGE_SHIFT;
1920 	unsigned offset = 0;
1921 	bool need_balance_fs = false;
1922 	int err = 0;
1923 	struct f2fs_io_info fio = {
1924 		.sbi = sbi,
1925 		.ino = inode->i_ino,
1926 		.type = DATA,
1927 		.op = REQ_OP_WRITE,
1928 		.op_flags = wbc_to_write_flags(wbc),
1929 		.old_blkaddr = NULL_ADDR,
1930 		.page = page,
1931 		.encrypted_page = NULL,
1932 		.submitted = false,
1933 		.need_lock = LOCK_RETRY,
1934 		.io_type = io_type,
1935 		.io_wbc = wbc,
1936 	};
1937 
1938 	trace_f2fs_writepage(page, DATA);
1939 
1940 	/* we should bypass data pages to proceed the kworkder jobs */
1941 	if (unlikely(f2fs_cp_error(sbi))) {
1942 		mapping_set_error(page->mapping, -EIO);
1943 		/*
1944 		 * don't drop any dirty dentry pages for keeping lastest
1945 		 * directory structure.
1946 		 */
1947 		if (S_ISDIR(inode->i_mode))
1948 			goto redirty_out;
1949 		goto out;
1950 	}
1951 
1952 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1953 		goto redirty_out;
1954 
1955 	if (page->index < end_index)
1956 		goto write;
1957 
1958 	/*
1959 	 * If the offset is out-of-range of file size,
1960 	 * this page does not have to be written to disk.
1961 	 */
1962 	offset = i_size & (PAGE_SIZE - 1);
1963 	if ((page->index >= end_index + 1) || !offset)
1964 		goto out;
1965 
1966 	zero_user_segment(page, offset, PAGE_SIZE);
1967 write:
1968 	if (f2fs_is_drop_cache(inode))
1969 		goto out;
1970 	/* we should not write 0'th page having journal header */
1971 	if (f2fs_is_volatile_file(inode) && (!page->index ||
1972 			(!wbc->for_reclaim &&
1973 			f2fs_available_free_memory(sbi, BASE_CHECK))))
1974 		goto redirty_out;
1975 
1976 	/* Dentry blocks are controlled by checkpoint */
1977 	if (S_ISDIR(inode->i_mode)) {
1978 		fio.need_lock = LOCK_DONE;
1979 		err = f2fs_do_write_data_page(&fio);
1980 		goto done;
1981 	}
1982 
1983 	if (!wbc->for_reclaim)
1984 		need_balance_fs = true;
1985 	else if (has_not_enough_free_secs(sbi, 0, 0))
1986 		goto redirty_out;
1987 	else
1988 		set_inode_flag(inode, FI_HOT_DATA);
1989 
1990 	err = -EAGAIN;
1991 	if (f2fs_has_inline_data(inode)) {
1992 		err = f2fs_write_inline_data(inode, page);
1993 		if (!err)
1994 			goto out;
1995 	}
1996 
1997 	if (err == -EAGAIN) {
1998 		err = f2fs_do_write_data_page(&fio);
1999 		if (err == -EAGAIN) {
2000 			fio.need_lock = LOCK_REQ;
2001 			err = f2fs_do_write_data_page(&fio);
2002 		}
2003 	}
2004 
2005 	if (err) {
2006 		file_set_keep_isize(inode);
2007 	} else {
2008 		down_write(&F2FS_I(inode)->i_sem);
2009 		if (F2FS_I(inode)->last_disk_size < psize)
2010 			F2FS_I(inode)->last_disk_size = psize;
2011 		up_write(&F2FS_I(inode)->i_sem);
2012 	}
2013 
2014 done:
2015 	if (err && err != -ENOENT)
2016 		goto redirty_out;
2017 
2018 out:
2019 	inode_dec_dirty_pages(inode);
2020 	if (err) {
2021 		ClearPageUptodate(page);
2022 		clear_cold_data(page);
2023 	}
2024 
2025 	if (wbc->for_reclaim) {
2026 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2027 		clear_inode_flag(inode, FI_HOT_DATA);
2028 		f2fs_remove_dirty_inode(inode);
2029 		submitted = NULL;
2030 	}
2031 
2032 	unlock_page(page);
2033 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
2034 		f2fs_balance_fs(sbi, need_balance_fs);
2035 
2036 	if (unlikely(f2fs_cp_error(sbi))) {
2037 		f2fs_submit_merged_write(sbi, DATA);
2038 		submitted = NULL;
2039 	}
2040 
2041 	if (submitted)
2042 		*submitted = fio.submitted;
2043 
2044 	return 0;
2045 
2046 redirty_out:
2047 	redirty_page_for_writepage(wbc, page);
2048 	/*
2049 	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2050 	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2051 	 * file_write_and_wait_range() will see EIO error, which is critical
2052 	 * to return value of fsync() followed by atomic_write failure to user.
2053 	 */
2054 	if (!err || wbc->for_reclaim)
2055 		return AOP_WRITEPAGE_ACTIVATE;
2056 	unlock_page(page);
2057 	return err;
2058 }
2059 
2060 static int f2fs_write_data_page(struct page *page,
2061 					struct writeback_control *wbc)
2062 {
2063 	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
2064 }
2065 
2066 /*
2067  * This function was copied from write_cche_pages from mm/page-writeback.c.
2068  * The major change is making write step of cold data page separately from
2069  * warm/hot data page.
2070  */
2071 static int f2fs_write_cache_pages(struct address_space *mapping,
2072 					struct writeback_control *wbc,
2073 					enum iostat_type io_type)
2074 {
2075 	int ret = 0;
2076 	int done = 0;
2077 	struct pagevec pvec;
2078 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2079 	int nr_pages;
2080 	pgoff_t uninitialized_var(writeback_index);
2081 	pgoff_t index;
2082 	pgoff_t end;		/* Inclusive */
2083 	pgoff_t done_index;
2084 	int cycled;
2085 	int range_whole = 0;
2086 	xa_mark_t tag;
2087 	int nwritten = 0;
2088 
2089 	pagevec_init(&pvec);
2090 
2091 	if (get_dirty_pages(mapping->host) <=
2092 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2093 		set_inode_flag(mapping->host, FI_HOT_DATA);
2094 	else
2095 		clear_inode_flag(mapping->host, FI_HOT_DATA);
2096 
2097 	if (wbc->range_cyclic) {
2098 		writeback_index = mapping->writeback_index; /* prev offset */
2099 		index = writeback_index;
2100 		if (index == 0)
2101 			cycled = 1;
2102 		else
2103 			cycled = 0;
2104 		end = -1;
2105 	} else {
2106 		index = wbc->range_start >> PAGE_SHIFT;
2107 		end = wbc->range_end >> PAGE_SHIFT;
2108 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2109 			range_whole = 1;
2110 		cycled = 1; /* ignore range_cyclic tests */
2111 	}
2112 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2113 		tag = PAGECACHE_TAG_TOWRITE;
2114 	else
2115 		tag = PAGECACHE_TAG_DIRTY;
2116 retry:
2117 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2118 		tag_pages_for_writeback(mapping, index, end);
2119 	done_index = index;
2120 	while (!done && (index <= end)) {
2121 		int i;
2122 
2123 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2124 				tag);
2125 		if (nr_pages == 0)
2126 			break;
2127 
2128 		for (i = 0; i < nr_pages; i++) {
2129 			struct page *page = pvec.pages[i];
2130 			bool submitted = false;
2131 
2132 			/* give a priority to WB_SYNC threads */
2133 			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2134 					wbc->sync_mode == WB_SYNC_NONE) {
2135 				done = 1;
2136 				break;
2137 			}
2138 
2139 			done_index = page->index;
2140 retry_write:
2141 			lock_page(page);
2142 
2143 			if (unlikely(page->mapping != mapping)) {
2144 continue_unlock:
2145 				unlock_page(page);
2146 				continue;
2147 			}
2148 
2149 			if (!PageDirty(page)) {
2150 				/* someone wrote it for us */
2151 				goto continue_unlock;
2152 			}
2153 
2154 			if (PageWriteback(page)) {
2155 				if (wbc->sync_mode != WB_SYNC_NONE)
2156 					f2fs_wait_on_page_writeback(page,
2157 							DATA, true, true);
2158 				else
2159 					goto continue_unlock;
2160 			}
2161 
2162 			if (!clear_page_dirty_for_io(page))
2163 				goto continue_unlock;
2164 
2165 			ret = __write_data_page(page, &submitted, wbc, io_type);
2166 			if (unlikely(ret)) {
2167 				/*
2168 				 * keep nr_to_write, since vfs uses this to
2169 				 * get # of written pages.
2170 				 */
2171 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2172 					unlock_page(page);
2173 					ret = 0;
2174 					continue;
2175 				} else if (ret == -EAGAIN) {
2176 					ret = 0;
2177 					if (wbc->sync_mode == WB_SYNC_ALL) {
2178 						cond_resched();
2179 						congestion_wait(BLK_RW_ASYNC,
2180 									HZ/50);
2181 						goto retry_write;
2182 					}
2183 					continue;
2184 				}
2185 				done_index = page->index + 1;
2186 				done = 1;
2187 				break;
2188 			} else if (submitted) {
2189 				nwritten++;
2190 			}
2191 
2192 			if (--wbc->nr_to_write <= 0 &&
2193 					wbc->sync_mode == WB_SYNC_NONE) {
2194 				done = 1;
2195 				break;
2196 			}
2197 		}
2198 		pagevec_release(&pvec);
2199 		cond_resched();
2200 	}
2201 
2202 	if (!cycled && !done) {
2203 		cycled = 1;
2204 		index = 0;
2205 		end = writeback_index - 1;
2206 		goto retry;
2207 	}
2208 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2209 		mapping->writeback_index = done_index;
2210 
2211 	if (nwritten)
2212 		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
2213 								NULL, 0, DATA);
2214 
2215 	return ret;
2216 }
2217 
2218 static inline bool __should_serialize_io(struct inode *inode,
2219 					struct writeback_control *wbc)
2220 {
2221 	if (!S_ISREG(inode->i_mode))
2222 		return false;
2223 	if (IS_NOQUOTA(inode))
2224 		return false;
2225 	if (wbc->sync_mode != WB_SYNC_ALL)
2226 		return true;
2227 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
2228 		return true;
2229 	return false;
2230 }
2231 
2232 static int __f2fs_write_data_pages(struct address_space *mapping,
2233 						struct writeback_control *wbc,
2234 						enum iostat_type io_type)
2235 {
2236 	struct inode *inode = mapping->host;
2237 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2238 	struct blk_plug plug;
2239 	int ret;
2240 	bool locked = false;
2241 
2242 	/* deal with chardevs and other special file */
2243 	if (!mapping->a_ops->writepage)
2244 		return 0;
2245 
2246 	/* skip writing if there is no dirty page in this inode */
2247 	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
2248 		return 0;
2249 
2250 	/* during POR, we don't need to trigger writepage at all. */
2251 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2252 		goto skip_write;
2253 
2254 	if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
2255 			wbc->sync_mode == WB_SYNC_NONE &&
2256 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
2257 			f2fs_available_free_memory(sbi, DIRTY_DENTS))
2258 		goto skip_write;
2259 
2260 	/* skip writing during file defragment */
2261 	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
2262 		goto skip_write;
2263 
2264 	trace_f2fs_writepages(mapping->host, wbc, DATA);
2265 
2266 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
2267 	if (wbc->sync_mode == WB_SYNC_ALL)
2268 		atomic_inc(&sbi->wb_sync_req[DATA]);
2269 	else if (atomic_read(&sbi->wb_sync_req[DATA]))
2270 		goto skip_write;
2271 
2272 	if (__should_serialize_io(inode, wbc)) {
2273 		mutex_lock(&sbi->writepages);
2274 		locked = true;
2275 	}
2276 
2277 	blk_start_plug(&plug);
2278 	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2279 	blk_finish_plug(&plug);
2280 
2281 	if (locked)
2282 		mutex_unlock(&sbi->writepages);
2283 
2284 	if (wbc->sync_mode == WB_SYNC_ALL)
2285 		atomic_dec(&sbi->wb_sync_req[DATA]);
2286 	/*
2287 	 * if some pages were truncated, we cannot guarantee its mapping->host
2288 	 * to detect pending bios.
2289 	 */
2290 
2291 	f2fs_remove_dirty_inode(inode);
2292 	return ret;
2293 
2294 skip_write:
2295 	wbc->pages_skipped += get_dirty_pages(inode);
2296 	trace_f2fs_writepages(mapping->host, wbc, DATA);
2297 	return 0;
2298 }
2299 
2300 static int f2fs_write_data_pages(struct address_space *mapping,
2301 			    struct writeback_control *wbc)
2302 {
2303 	struct inode *inode = mapping->host;
2304 
2305 	return __f2fs_write_data_pages(mapping, wbc,
2306 			F2FS_I(inode)->cp_task == current ?
2307 			FS_CP_DATA_IO : FS_DATA_IO);
2308 }
2309 
2310 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
2311 {
2312 	struct inode *inode = mapping->host;
2313 	loff_t i_size = i_size_read(inode);
2314 
2315 	if (to > i_size) {
2316 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2317 		down_write(&F2FS_I(inode)->i_mmap_sem);
2318 
2319 		truncate_pagecache(inode, i_size);
2320 		if (!IS_NOQUOTA(inode))
2321 			f2fs_truncate_blocks(inode, i_size, true);
2322 
2323 		up_write(&F2FS_I(inode)->i_mmap_sem);
2324 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2325 	}
2326 }
2327 
2328 static int prepare_write_begin(struct f2fs_sb_info *sbi,
2329 			struct page *page, loff_t pos, unsigned len,
2330 			block_t *blk_addr, bool *node_changed)
2331 {
2332 	struct inode *inode = page->mapping->host;
2333 	pgoff_t index = page->index;
2334 	struct dnode_of_data dn;
2335 	struct page *ipage;
2336 	bool locked = false;
2337 	struct extent_info ei = {0,0,0};
2338 	int err = 0;
2339 	int flag;
2340 
2341 	/*
2342 	 * we already allocated all the blocks, so we don't need to get
2343 	 * the block addresses when there is no need to fill the page.
2344 	 */
2345 	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
2346 			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2347 		return 0;
2348 
2349 	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
2350 	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
2351 		flag = F2FS_GET_BLOCK_DEFAULT;
2352 	else
2353 		flag = F2FS_GET_BLOCK_PRE_AIO;
2354 
2355 	if (f2fs_has_inline_data(inode) ||
2356 			(pos & PAGE_MASK) >= i_size_read(inode)) {
2357 		__do_map_lock(sbi, flag, true);
2358 		locked = true;
2359 	}
2360 restart:
2361 	/* check inline_data */
2362 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2363 	if (IS_ERR(ipage)) {
2364 		err = PTR_ERR(ipage);
2365 		goto unlock_out;
2366 	}
2367 
2368 	set_new_dnode(&dn, inode, ipage, ipage, 0);
2369 
2370 	if (f2fs_has_inline_data(inode)) {
2371 		if (pos + len <= MAX_INLINE_DATA(inode)) {
2372 			f2fs_do_read_inline_data(page, ipage);
2373 			set_inode_flag(inode, FI_DATA_EXIST);
2374 			if (inode->i_nlink)
2375 				set_inline_node(ipage);
2376 		} else {
2377 			err = f2fs_convert_inline_page(&dn, page);
2378 			if (err)
2379 				goto out;
2380 			if (dn.data_blkaddr == NULL_ADDR)
2381 				err = f2fs_get_block(&dn, index);
2382 		}
2383 	} else if (locked) {
2384 		err = f2fs_get_block(&dn, index);
2385 	} else {
2386 		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
2387 			dn.data_blkaddr = ei.blk + index - ei.fofs;
2388 		} else {
2389 			/* hole case */
2390 			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
2391 			if (err || dn.data_blkaddr == NULL_ADDR) {
2392 				f2fs_put_dnode(&dn);
2393 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
2394 								true);
2395 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
2396 				locked = true;
2397 				goto restart;
2398 			}
2399 		}
2400 	}
2401 
2402 	/* convert_inline_page can make node_changed */
2403 	*blk_addr = dn.data_blkaddr;
2404 	*node_changed = dn.node_changed;
2405 out:
2406 	f2fs_put_dnode(&dn);
2407 unlock_out:
2408 	if (locked)
2409 		__do_map_lock(sbi, flag, false);
2410 	return err;
2411 }
2412 
2413 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
2414 		loff_t pos, unsigned len, unsigned flags,
2415 		struct page **pagep, void **fsdata)
2416 {
2417 	struct inode *inode = mapping->host;
2418 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2419 	struct page *page = NULL;
2420 	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2421 	bool need_balance = false, drop_atomic = false;
2422 	block_t blkaddr = NULL_ADDR;
2423 	int err = 0;
2424 
2425 	trace_f2fs_write_begin(inode, pos, len, flags);
2426 
2427 	err = f2fs_is_checkpoint_ready(sbi);
2428 	if (err)
2429 		goto fail;
2430 
2431 	if ((f2fs_is_atomic_file(inode) &&
2432 			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
2433 			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2434 		err = -ENOMEM;
2435 		drop_atomic = true;
2436 		goto fail;
2437 	}
2438 
2439 	/*
2440 	 * We should check this at this moment to avoid deadlock on inode page
2441 	 * and #0 page. The locking rule for inline_data conversion should be:
2442 	 * lock_page(page #0) -> lock_page(inode_page)
2443 	 */
2444 	if (index != 0) {
2445 		err = f2fs_convert_inline_inode(inode);
2446 		if (err)
2447 			goto fail;
2448 	}
2449 repeat:
2450 	/*
2451 	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
2452 	 * wait_for_stable_page. Will wait that below with our IO control.
2453 	 */
2454 	page = f2fs_pagecache_get_page(mapping, index,
2455 				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2456 	if (!page) {
2457 		err = -ENOMEM;
2458 		goto fail;
2459 	}
2460 
2461 	*pagep = page;
2462 
2463 	err = prepare_write_begin(sbi, page, pos, len,
2464 					&blkaddr, &need_balance);
2465 	if (err)
2466 		goto fail;
2467 
2468 	if (need_balance && !IS_NOQUOTA(inode) &&
2469 			has_not_enough_free_secs(sbi, 0, 0)) {
2470 		unlock_page(page);
2471 		f2fs_balance_fs(sbi, true);
2472 		lock_page(page);
2473 		if (page->mapping != mapping) {
2474 			/* The page got truncated from under us */
2475 			f2fs_put_page(page, 1);
2476 			goto repeat;
2477 		}
2478 	}
2479 
2480 	f2fs_wait_on_page_writeback(page, DATA, false, true);
2481 
2482 	if (len == PAGE_SIZE || PageUptodate(page))
2483 		return 0;
2484 
2485 	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
2486 		zero_user_segment(page, len, PAGE_SIZE);
2487 		return 0;
2488 	}
2489 
2490 	if (blkaddr == NEW_ADDR) {
2491 		zero_user_segment(page, 0, PAGE_SIZE);
2492 		SetPageUptodate(page);
2493 	} else {
2494 		err = f2fs_submit_page_read(inode, page, blkaddr);
2495 		if (err)
2496 			goto fail;
2497 
2498 		lock_page(page);
2499 		if (unlikely(page->mapping != mapping)) {
2500 			f2fs_put_page(page, 1);
2501 			goto repeat;
2502 		}
2503 		if (unlikely(!PageUptodate(page))) {
2504 			err = -EIO;
2505 			goto fail;
2506 		}
2507 	}
2508 	return 0;
2509 
2510 fail:
2511 	f2fs_put_page(page, 1);
2512 	f2fs_write_failed(mapping, pos + len);
2513 	if (drop_atomic)
2514 		f2fs_drop_inmem_pages_all(sbi, false);
2515 	return err;
2516 }
2517 
2518 static int f2fs_write_end(struct file *file,
2519 			struct address_space *mapping,
2520 			loff_t pos, unsigned len, unsigned copied,
2521 			struct page *page, void *fsdata)
2522 {
2523 	struct inode *inode = page->mapping->host;
2524 
2525 	trace_f2fs_write_end(inode, pos, len, copied);
2526 
2527 	/*
2528 	 * This should be come from len == PAGE_SIZE, and we expect copied
2529 	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2530 	 * let generic_perform_write() try to copy data again through copied=0.
2531 	 */
2532 	if (!PageUptodate(page)) {
2533 		if (unlikely(copied != len))
2534 			copied = 0;
2535 		else
2536 			SetPageUptodate(page);
2537 	}
2538 	if (!copied)
2539 		goto unlock_out;
2540 
2541 	set_page_dirty(page);
2542 
2543 	if (pos + copied > i_size_read(inode))
2544 		f2fs_i_size_write(inode, pos + copied);
2545 unlock_out:
2546 	f2fs_put_page(page, 1);
2547 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2548 	return copied;
2549 }
2550 
2551 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
2552 			   loff_t offset)
2553 {
2554 	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
2555 	unsigned blkbits = i_blkbits;
2556 	unsigned blocksize_mask = (1 << blkbits) - 1;
2557 	unsigned long align = offset | iov_iter_alignment(iter);
2558 	struct block_device *bdev = inode->i_sb->s_bdev;
2559 
2560 	if (align & blocksize_mask) {
2561 		if (bdev)
2562 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
2563 		blocksize_mask = (1 << blkbits) - 1;
2564 		if (align & blocksize_mask)
2565 			return -EINVAL;
2566 		return 1;
2567 	}
2568 	return 0;
2569 }
2570 
2571 static void f2fs_dio_end_io(struct bio *bio)
2572 {
2573 	struct f2fs_private_dio *dio = bio->bi_private;
2574 
2575 	dec_page_count(F2FS_I_SB(dio->inode),
2576 			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
2577 
2578 	bio->bi_private = dio->orig_private;
2579 	bio->bi_end_io = dio->orig_end_io;
2580 
2581 	kvfree(dio);
2582 
2583 	bio_endio(bio);
2584 }
2585 
2586 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
2587 							loff_t file_offset)
2588 {
2589 	struct f2fs_private_dio *dio;
2590 	bool write = (bio_op(bio) == REQ_OP_WRITE);
2591 
2592 	dio = f2fs_kzalloc(F2FS_I_SB(inode),
2593 			sizeof(struct f2fs_private_dio), GFP_NOFS);
2594 	if (!dio)
2595 		goto out;
2596 
2597 	dio->inode = inode;
2598 	dio->orig_end_io = bio->bi_end_io;
2599 	dio->orig_private = bio->bi_private;
2600 	dio->write = write;
2601 
2602 	bio->bi_end_io = f2fs_dio_end_io;
2603 	bio->bi_private = dio;
2604 
2605 	inc_page_count(F2FS_I_SB(inode),
2606 			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
2607 
2608 	submit_bio(bio);
2609 	return;
2610 out:
2611 	bio->bi_status = BLK_STS_IOERR;
2612 	bio_endio(bio);
2613 }
2614 
2615 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2616 {
2617 	struct address_space *mapping = iocb->ki_filp->f_mapping;
2618 	struct inode *inode = mapping->host;
2619 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2620 	struct f2fs_inode_info *fi = F2FS_I(inode);
2621 	size_t count = iov_iter_count(iter);
2622 	loff_t offset = iocb->ki_pos;
2623 	int rw = iov_iter_rw(iter);
2624 	int err;
2625 	enum rw_hint hint = iocb->ki_hint;
2626 	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2627 	bool do_opu;
2628 
2629 	err = check_direct_IO(inode, iter, offset);
2630 	if (err)
2631 		return err < 0 ? err : 0;
2632 
2633 	if (f2fs_force_buffered_io(inode, iocb, iter))
2634 		return 0;
2635 
2636 	do_opu = allow_outplace_dio(inode, iocb, iter);
2637 
2638 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2639 
2640 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
2641 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
2642 
2643 	if (iocb->ki_flags & IOCB_NOWAIT) {
2644 		if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
2645 			iocb->ki_hint = hint;
2646 			err = -EAGAIN;
2647 			goto out;
2648 		}
2649 		if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
2650 			up_read(&fi->i_gc_rwsem[rw]);
2651 			iocb->ki_hint = hint;
2652 			err = -EAGAIN;
2653 			goto out;
2654 		}
2655 	} else {
2656 		down_read(&fi->i_gc_rwsem[rw]);
2657 		if (do_opu)
2658 			down_read(&fi->i_gc_rwsem[READ]);
2659 	}
2660 
2661 	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2662 			iter, rw == WRITE ? get_data_block_dio_write :
2663 			get_data_block_dio, NULL, f2fs_dio_submit_bio,
2664 			DIO_LOCKING | DIO_SKIP_HOLES);
2665 
2666 	if (do_opu)
2667 		up_read(&fi->i_gc_rwsem[READ]);
2668 
2669 	up_read(&fi->i_gc_rwsem[rw]);
2670 
2671 	if (rw == WRITE) {
2672 		if (whint_mode == WHINT_MODE_OFF)
2673 			iocb->ki_hint = hint;
2674 		if (err > 0) {
2675 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
2676 									err);
2677 			if (!do_opu)
2678 				set_inode_flag(inode, FI_UPDATE_WRITE);
2679 		} else if (err < 0) {
2680 			f2fs_write_failed(mapping, offset + count);
2681 		}
2682 	}
2683 
2684 out:
2685 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2686 
2687 	return err;
2688 }
2689 
2690 void f2fs_invalidate_page(struct page *page, unsigned int offset,
2691 							unsigned int length)
2692 {
2693 	struct inode *inode = page->mapping->host;
2694 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2695 
2696 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2697 		(offset % PAGE_SIZE || length != PAGE_SIZE))
2698 		return;
2699 
2700 	if (PageDirty(page)) {
2701 		if (inode->i_ino == F2FS_META_INO(sbi)) {
2702 			dec_page_count(sbi, F2FS_DIRTY_META);
2703 		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2704 			dec_page_count(sbi, F2FS_DIRTY_NODES);
2705 		} else {
2706 			inode_dec_dirty_pages(inode);
2707 			f2fs_remove_dirty_inode(inode);
2708 		}
2709 	}
2710 
2711 	clear_cold_data(page);
2712 
2713 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2714 		return f2fs_drop_inmem_page(inode, page);
2715 
2716 	f2fs_clear_page_private(page);
2717 }
2718 
2719 int f2fs_release_page(struct page *page, gfp_t wait)
2720 {
2721 	/* If this is dirty page, keep PagePrivate */
2722 	if (PageDirty(page))
2723 		return 0;
2724 
2725 	/* This is atomic written page, keep Private */
2726 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2727 		return 0;
2728 
2729 	clear_cold_data(page);
2730 	f2fs_clear_page_private(page);
2731 	return 1;
2732 }
2733 
2734 static int f2fs_set_data_page_dirty(struct page *page)
2735 {
2736 	struct address_space *mapping = page->mapping;
2737 	struct inode *inode = mapping->host;
2738 
2739 	trace_f2fs_set_page_dirty(page, DATA);
2740 
2741 	if (!PageUptodate(page))
2742 		SetPageUptodate(page);
2743 
2744 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
2745 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
2746 			f2fs_register_inmem_page(inode, page);
2747 			return 1;
2748 		}
2749 		/*
2750 		 * Previously, this page has been registered, we just
2751 		 * return here.
2752 		 */
2753 		return 0;
2754 	}
2755 
2756 	if (!PageDirty(page)) {
2757 		__set_page_dirty_nobuffers(page);
2758 		f2fs_update_dirty_page(inode, page);
2759 		return 1;
2760 	}
2761 	return 0;
2762 }
2763 
2764 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
2765 {
2766 	struct inode *inode = mapping->host;
2767 
2768 	if (f2fs_has_inline_data(inode))
2769 		return 0;
2770 
2771 	/* make sure allocating whole blocks */
2772 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2773 		filemap_write_and_wait(mapping);
2774 
2775 	return generic_block_bmap(mapping, block, get_data_block_bmap);
2776 }
2777 
2778 #ifdef CONFIG_MIGRATION
2779 #include <linux/migrate.h>
2780 
2781 int f2fs_migrate_page(struct address_space *mapping,
2782 		struct page *newpage, struct page *page, enum migrate_mode mode)
2783 {
2784 	int rc, extra_count;
2785 	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
2786 	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
2787 
2788 	BUG_ON(PageWriteback(page));
2789 
2790 	/* migrating an atomic written page is safe with the inmem_lock hold */
2791 	if (atomic_written) {
2792 		if (mode != MIGRATE_SYNC)
2793 			return -EBUSY;
2794 		if (!mutex_trylock(&fi->inmem_lock))
2795 			return -EAGAIN;
2796 	}
2797 
2798 	/* one extra reference was held for atomic_write page */
2799 	extra_count = atomic_written ? 1 : 0;
2800 	rc = migrate_page_move_mapping(mapping, newpage,
2801 				page, mode, extra_count);
2802 	if (rc != MIGRATEPAGE_SUCCESS) {
2803 		if (atomic_written)
2804 			mutex_unlock(&fi->inmem_lock);
2805 		return rc;
2806 	}
2807 
2808 	if (atomic_written) {
2809 		struct inmem_pages *cur;
2810 		list_for_each_entry(cur, &fi->inmem_pages, list)
2811 			if (cur->page == page) {
2812 				cur->page = newpage;
2813 				break;
2814 			}
2815 		mutex_unlock(&fi->inmem_lock);
2816 		put_page(page);
2817 		get_page(newpage);
2818 	}
2819 
2820 	if (PagePrivate(page)) {
2821 		f2fs_set_page_private(newpage, page_private(page));
2822 		f2fs_clear_page_private(page);
2823 	}
2824 
2825 	if (mode != MIGRATE_SYNC_NO_COPY)
2826 		migrate_page_copy(newpage, page);
2827 	else
2828 		migrate_page_states(newpage, page);
2829 
2830 	return MIGRATEPAGE_SUCCESS;
2831 }
2832 #endif
2833 
2834 const struct address_space_operations f2fs_dblock_aops = {
2835 	.readpage	= f2fs_read_data_page,
2836 	.readpages	= f2fs_read_data_pages,
2837 	.writepage	= f2fs_write_data_page,
2838 	.writepages	= f2fs_write_data_pages,
2839 	.write_begin	= f2fs_write_begin,
2840 	.write_end	= f2fs_write_end,
2841 	.set_page_dirty	= f2fs_set_data_page_dirty,
2842 	.invalidatepage	= f2fs_invalidate_page,
2843 	.releasepage	= f2fs_release_page,
2844 	.direct_IO	= f2fs_direct_IO,
2845 	.bmap		= f2fs_bmap,
2846 #ifdef CONFIG_MIGRATION
2847 	.migratepage    = f2fs_migrate_page,
2848 #endif
2849 };
2850 
2851 void f2fs_clear_page_cache_dirty_tag(struct page *page)
2852 {
2853 	struct address_space *mapping = page_mapping(page);
2854 	unsigned long flags;
2855 
2856 	xa_lock_irqsave(&mapping->i_pages, flags);
2857 	__xa_clear_mark(&mapping->i_pages, page_index(page),
2858 						PAGECACHE_TAG_DIRTY);
2859 	xa_unlock_irqrestore(&mapping->i_pages, flags);
2860 }
2861 
2862 int __init f2fs_init_post_read_processing(void)
2863 {
2864 	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
2865 	if (!bio_post_read_ctx_cache)
2866 		goto fail;
2867 	bio_post_read_ctx_pool =
2868 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
2869 					 bio_post_read_ctx_cache);
2870 	if (!bio_post_read_ctx_pool)
2871 		goto fail_free_cache;
2872 	return 0;
2873 
2874 fail_free_cache:
2875 	kmem_cache_destroy(bio_post_read_ctx_cache);
2876 fail:
2877 	return -ENOMEM;
2878 }
2879 
2880 void __exit f2fs_destroy_post_read_processing(void)
2881 {
2882 	mempool_destroy(bio_post_read_ctx_pool);
2883 	kmem_cache_destroy(bio_post_read_ctx_cache);
2884 }
2885