xref: /linux/fs/f2fs/data.c (revision ba6d10ab8014ac10d25ca513352b6665e73b5785)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/data.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/prefetch.h>
18 #include <linux/uio.h>
19 #include <linux/cleancache.h>
20 #include <linux/sched/signal.h>
21 
22 #include "f2fs.h"
23 #include "node.h"
24 #include "segment.h"
25 #include "trace.h"
26 #include <trace/events/f2fs.h>
27 
28 #define NUM_PREALLOC_POST_READ_CTXS	128
29 
30 static struct kmem_cache *bio_post_read_ctx_cache;
31 static mempool_t *bio_post_read_ctx_pool;
32 
33 static bool __is_cp_guaranteed(struct page *page)
34 {
35 	struct address_space *mapping = page->mapping;
36 	struct inode *inode;
37 	struct f2fs_sb_info *sbi;
38 
39 	if (!mapping)
40 		return false;
41 
42 	inode = mapping->host;
43 	sbi = F2FS_I_SB(inode);
44 
45 	if (inode->i_ino == F2FS_META_INO(sbi) ||
46 			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
47 			S_ISDIR(inode->i_mode) ||
48 			(S_ISREG(inode->i_mode) &&
49 			(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
50 			is_cold_data(page))
51 		return true;
52 	return false;
53 }
54 
55 static enum count_type __read_io_type(struct page *page)
56 {
57 	struct address_space *mapping = page->mapping;
58 
59 	if (mapping) {
60 		struct inode *inode = mapping->host;
61 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
62 
63 		if (inode->i_ino == F2FS_META_INO(sbi))
64 			return F2FS_RD_META;
65 
66 		if (inode->i_ino == F2FS_NODE_INO(sbi))
67 			return F2FS_RD_NODE;
68 	}
69 	return F2FS_RD_DATA;
70 }
71 
72 /* postprocessing steps for read bios */
73 enum bio_post_read_step {
74 	STEP_INITIAL = 0,
75 	STEP_DECRYPT,
76 };
77 
78 struct bio_post_read_ctx {
79 	struct bio *bio;
80 	struct work_struct work;
81 	unsigned int cur_step;
82 	unsigned int enabled_steps;
83 };
84 
85 static void __read_end_io(struct bio *bio)
86 {
87 	struct page *page;
88 	struct bio_vec *bv;
89 	struct bvec_iter_all iter_all;
90 
91 	bio_for_each_segment_all(bv, bio, iter_all) {
92 		page = bv->bv_page;
93 
94 		/* PG_error was set if any post_read step failed */
95 		if (bio->bi_status || PageError(page)) {
96 			ClearPageUptodate(page);
97 			/* will re-read again later */
98 			ClearPageError(page);
99 		} else {
100 			SetPageUptodate(page);
101 		}
102 		dec_page_count(F2FS_P_SB(page), __read_io_type(page));
103 		unlock_page(page);
104 	}
105 	if (bio->bi_private)
106 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
107 	bio_put(bio);
108 }
109 
110 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
111 
112 static void decrypt_work(struct work_struct *work)
113 {
114 	struct bio_post_read_ctx *ctx =
115 		container_of(work, struct bio_post_read_ctx, work);
116 
117 	fscrypt_decrypt_bio(ctx->bio);
118 
119 	bio_post_read_processing(ctx);
120 }
121 
122 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
123 {
124 	switch (++ctx->cur_step) {
125 	case STEP_DECRYPT:
126 		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
127 			INIT_WORK(&ctx->work, decrypt_work);
128 			fscrypt_enqueue_decrypt_work(&ctx->work);
129 			return;
130 		}
131 		ctx->cur_step++;
132 		/* fall-through */
133 	default:
134 		__read_end_io(ctx->bio);
135 	}
136 }
137 
138 static bool f2fs_bio_post_read_required(struct bio *bio)
139 {
140 	return bio->bi_private && !bio->bi_status;
141 }
142 
143 static void f2fs_read_end_io(struct bio *bio)
144 {
145 	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
146 						FAULT_READ_IO)) {
147 		f2fs_show_injection_info(FAULT_READ_IO);
148 		bio->bi_status = BLK_STS_IOERR;
149 	}
150 
151 	if (f2fs_bio_post_read_required(bio)) {
152 		struct bio_post_read_ctx *ctx = bio->bi_private;
153 
154 		ctx->cur_step = STEP_INITIAL;
155 		bio_post_read_processing(ctx);
156 		return;
157 	}
158 
159 	__read_end_io(bio);
160 }
161 
162 static void f2fs_write_end_io(struct bio *bio)
163 {
164 	struct f2fs_sb_info *sbi = bio->bi_private;
165 	struct bio_vec *bvec;
166 	struct bvec_iter_all iter_all;
167 
168 	if (time_to_inject(sbi, FAULT_WRITE_IO)) {
169 		f2fs_show_injection_info(FAULT_WRITE_IO);
170 		bio->bi_status = BLK_STS_IOERR;
171 	}
172 
173 	bio_for_each_segment_all(bvec, bio, iter_all) {
174 		struct page *page = bvec->bv_page;
175 		enum count_type type = WB_DATA_TYPE(page);
176 
177 		if (IS_DUMMY_WRITTEN_PAGE(page)) {
178 			set_page_private(page, (unsigned long)NULL);
179 			ClearPagePrivate(page);
180 			unlock_page(page);
181 			mempool_free(page, sbi->write_io_dummy);
182 
183 			if (unlikely(bio->bi_status))
184 				f2fs_stop_checkpoint(sbi, true);
185 			continue;
186 		}
187 
188 		fscrypt_finalize_bounce_page(&page);
189 
190 		if (unlikely(bio->bi_status)) {
191 			mapping_set_error(page->mapping, -EIO);
192 			if (type == F2FS_WB_CP_DATA)
193 				f2fs_stop_checkpoint(sbi, true);
194 		}
195 
196 		f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
197 					page->index != nid_of_node(page));
198 
199 		dec_page_count(sbi, type);
200 		if (f2fs_in_warm_node_list(sbi, page))
201 			f2fs_del_fsync_node_entry(sbi, page);
202 		clear_cold_data(page);
203 		end_page_writeback(page);
204 	}
205 	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
206 				wq_has_sleeper(&sbi->cp_wait))
207 		wake_up(&sbi->cp_wait);
208 
209 	bio_put(bio);
210 }
211 
212 /*
213  * Return true, if pre_bio's bdev is same as its target device.
214  */
215 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
216 				block_t blk_addr, struct bio *bio)
217 {
218 	struct block_device *bdev = sbi->sb->s_bdev;
219 	int i;
220 
221 	if (f2fs_is_multi_device(sbi)) {
222 		for (i = 0; i < sbi->s_ndevs; i++) {
223 			if (FDEV(i).start_blk <= blk_addr &&
224 			    FDEV(i).end_blk >= blk_addr) {
225 				blk_addr -= FDEV(i).start_blk;
226 				bdev = FDEV(i).bdev;
227 				break;
228 			}
229 		}
230 	}
231 	if (bio) {
232 		bio_set_dev(bio, bdev);
233 		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
234 	}
235 	return bdev;
236 }
237 
238 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
239 {
240 	int i;
241 
242 	if (!f2fs_is_multi_device(sbi))
243 		return 0;
244 
245 	for (i = 0; i < sbi->s_ndevs; i++)
246 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
247 			return i;
248 	return 0;
249 }
250 
251 static bool __same_bdev(struct f2fs_sb_info *sbi,
252 				block_t blk_addr, struct bio *bio)
253 {
254 	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
255 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
256 }
257 
258 /*
259  * Low-level block read/write IO operations.
260  */
261 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
262 				struct writeback_control *wbc,
263 				int npages, bool is_read,
264 				enum page_type type, enum temp_type temp)
265 {
266 	struct bio *bio;
267 
268 	bio = f2fs_bio_alloc(sbi, npages, true);
269 
270 	f2fs_target_device(sbi, blk_addr, bio);
271 	if (is_read) {
272 		bio->bi_end_io = f2fs_read_end_io;
273 		bio->bi_private = NULL;
274 	} else {
275 		bio->bi_end_io = f2fs_write_end_io;
276 		bio->bi_private = sbi;
277 		bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp);
278 	}
279 	if (wbc)
280 		wbc_init_bio(wbc, bio);
281 
282 	return bio;
283 }
284 
285 static inline void __submit_bio(struct f2fs_sb_info *sbi,
286 				struct bio *bio, enum page_type type)
287 {
288 	if (!is_read_io(bio_op(bio))) {
289 		unsigned int start;
290 
291 		if (type != DATA && type != NODE)
292 			goto submit_io;
293 
294 		if (test_opt(sbi, LFS) && current->plug)
295 			blk_finish_plug(current->plug);
296 
297 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
298 		start %= F2FS_IO_SIZE(sbi);
299 
300 		if (start == 0)
301 			goto submit_io;
302 
303 		/* fill dummy pages */
304 		for (; start < F2FS_IO_SIZE(sbi); start++) {
305 			struct page *page =
306 				mempool_alloc(sbi->write_io_dummy,
307 					      GFP_NOIO | __GFP_NOFAIL);
308 			f2fs_bug_on(sbi, !page);
309 
310 			zero_user_segment(page, 0, PAGE_SIZE);
311 			SetPagePrivate(page);
312 			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
313 			lock_page(page);
314 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
315 				f2fs_bug_on(sbi, 1);
316 		}
317 		/*
318 		 * In the NODE case, we lose next block address chain. So, we
319 		 * need to do checkpoint in f2fs_sync_file.
320 		 */
321 		if (type == NODE)
322 			set_sbi_flag(sbi, SBI_NEED_CP);
323 	}
324 submit_io:
325 	if (is_read_io(bio_op(bio)))
326 		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
327 	else
328 		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
329 	submit_bio(bio);
330 }
331 
332 static void __submit_merged_bio(struct f2fs_bio_info *io)
333 {
334 	struct f2fs_io_info *fio = &io->fio;
335 
336 	if (!io->bio)
337 		return;
338 
339 	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
340 
341 	if (is_read_io(fio->op))
342 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
343 	else
344 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
345 
346 	__submit_bio(io->sbi, io->bio, fio->type);
347 	io->bio = NULL;
348 }
349 
350 static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
351 						struct page *page, nid_t ino)
352 {
353 	struct bio_vec *bvec;
354 	struct page *target;
355 	struct bvec_iter_all iter_all;
356 
357 	if (!io->bio)
358 		return false;
359 
360 	if (!inode && !page && !ino)
361 		return true;
362 
363 	bio_for_each_segment_all(bvec, io->bio, iter_all) {
364 
365 		target = bvec->bv_page;
366 		if (fscrypt_is_bounce_page(target))
367 			target = fscrypt_pagecache_page(target);
368 
369 		if (inode && inode == target->mapping->host)
370 			return true;
371 		if (page && page == target)
372 			return true;
373 		if (ino && ino == ino_of_node(target))
374 			return true;
375 	}
376 
377 	return false;
378 }
379 
380 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
381 				enum page_type type, enum temp_type temp)
382 {
383 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
384 	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
385 
386 	down_write(&io->io_rwsem);
387 
388 	/* change META to META_FLUSH in the checkpoint procedure */
389 	if (type >= META_FLUSH) {
390 		io->fio.type = META_FLUSH;
391 		io->fio.op = REQ_OP_WRITE;
392 		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
393 		if (!test_opt(sbi, NOBARRIER))
394 			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
395 	}
396 	__submit_merged_bio(io);
397 	up_write(&io->io_rwsem);
398 }
399 
400 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
401 				struct inode *inode, struct page *page,
402 				nid_t ino, enum page_type type, bool force)
403 {
404 	enum temp_type temp;
405 	bool ret = true;
406 
407 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
408 		if (!force)	{
409 			enum page_type btype = PAGE_TYPE_OF_BIO(type);
410 			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
411 
412 			down_read(&io->io_rwsem);
413 			ret = __has_merged_page(io, inode, page, ino);
414 			up_read(&io->io_rwsem);
415 		}
416 		if (ret)
417 			__f2fs_submit_merged_write(sbi, type, temp);
418 
419 		/* TODO: use HOT temp only for meta pages now. */
420 		if (type >= META)
421 			break;
422 	}
423 }
424 
425 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
426 {
427 	__submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
428 }
429 
430 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
431 				struct inode *inode, struct page *page,
432 				nid_t ino, enum page_type type)
433 {
434 	__submit_merged_write_cond(sbi, inode, page, ino, type, false);
435 }
436 
437 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
438 {
439 	f2fs_submit_merged_write(sbi, DATA);
440 	f2fs_submit_merged_write(sbi, NODE);
441 	f2fs_submit_merged_write(sbi, META);
442 }
443 
444 /*
445  * Fill the locked page with data located in the block address.
446  * A caller needs to unlock the page on failure.
447  */
448 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
449 {
450 	struct bio *bio;
451 	struct page *page = fio->encrypted_page ?
452 			fio->encrypted_page : fio->page;
453 
454 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
455 			fio->is_por ? META_POR : (__is_meta_io(fio) ?
456 			META_GENERIC : DATA_GENERIC_ENHANCE)))
457 		return -EFAULT;
458 
459 	trace_f2fs_submit_page_bio(page, fio);
460 	f2fs_trace_ios(fio, 0);
461 
462 	/* Allocate a new bio */
463 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
464 				1, is_read_io(fio->op), fio->type, fio->temp);
465 
466 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
467 		bio_put(bio);
468 		return -EFAULT;
469 	}
470 
471 	if (fio->io_wbc && !is_read_io(fio->op))
472 		wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
473 
474 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
475 
476 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
477 			__read_io_type(page): WB_DATA_TYPE(fio->page));
478 
479 	__submit_bio(fio->sbi, bio, fio->type);
480 	return 0;
481 }
482 
483 void f2fs_submit_page_write(struct f2fs_io_info *fio)
484 {
485 	struct f2fs_sb_info *sbi = fio->sbi;
486 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
487 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
488 	struct page *bio_page;
489 
490 	f2fs_bug_on(sbi, is_read_io(fio->op));
491 
492 	down_write(&io->io_rwsem);
493 next:
494 	if (fio->in_list) {
495 		spin_lock(&io->io_lock);
496 		if (list_empty(&io->io_list)) {
497 			spin_unlock(&io->io_lock);
498 			goto out;
499 		}
500 		fio = list_first_entry(&io->io_list,
501 						struct f2fs_io_info, list);
502 		list_del(&fio->list);
503 		spin_unlock(&io->io_lock);
504 	}
505 
506 	verify_fio_blkaddr(fio);
507 
508 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
509 
510 	/* set submitted = true as a return value */
511 	fio->submitted = true;
512 
513 	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
514 
515 	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
516 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
517 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
518 		__submit_merged_bio(io);
519 alloc_new:
520 	if (io->bio == NULL) {
521 		if ((fio->type == DATA || fio->type == NODE) &&
522 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
523 			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
524 			fio->retry = true;
525 			goto skip;
526 		}
527 		io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
528 						BIO_MAX_PAGES, false,
529 						fio->type, fio->temp);
530 		io->fio = *fio;
531 	}
532 
533 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
534 		__submit_merged_bio(io);
535 		goto alloc_new;
536 	}
537 
538 	if (fio->io_wbc)
539 		wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);
540 
541 	io->last_block_in_bio = fio->new_blkaddr;
542 	f2fs_trace_ios(fio, 0);
543 
544 	trace_f2fs_submit_page_write(fio->page, fio);
545 skip:
546 	if (fio->in_list)
547 		goto next;
548 out:
549 	if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
550 				f2fs_is_checkpoint_ready(sbi))
551 		__submit_merged_bio(io);
552 	up_write(&io->io_rwsem);
553 }
554 
555 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
556 					unsigned nr_pages, unsigned op_flag)
557 {
558 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
559 	struct bio *bio;
560 	struct bio_post_read_ctx *ctx;
561 	unsigned int post_read_steps = 0;
562 
563 	bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
564 	if (!bio)
565 		return ERR_PTR(-ENOMEM);
566 	f2fs_target_device(sbi, blkaddr, bio);
567 	bio->bi_end_io = f2fs_read_end_io;
568 	bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
569 
570 	if (f2fs_encrypted_file(inode))
571 		post_read_steps |= 1 << STEP_DECRYPT;
572 	if (post_read_steps) {
573 		ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
574 		if (!ctx) {
575 			bio_put(bio);
576 			return ERR_PTR(-ENOMEM);
577 		}
578 		ctx->bio = bio;
579 		ctx->enabled_steps = post_read_steps;
580 		bio->bi_private = ctx;
581 	}
582 
583 	return bio;
584 }
585 
586 /* This can handle encryption stuffs */
587 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
588 							block_t blkaddr)
589 {
590 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
591 	struct bio *bio;
592 
593 	bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
594 	if (IS_ERR(bio))
595 		return PTR_ERR(bio);
596 
597 	/* wait for GCed page writeback via META_MAPPING */
598 	f2fs_wait_on_block_writeback(inode, blkaddr);
599 
600 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
601 		bio_put(bio);
602 		return -EFAULT;
603 	}
604 	ClearPageError(page);
605 	inc_page_count(sbi, F2FS_RD_DATA);
606 	__submit_bio(sbi, bio, DATA);
607 	return 0;
608 }
609 
610 static void __set_data_blkaddr(struct dnode_of_data *dn)
611 {
612 	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
613 	__le32 *addr_array;
614 	int base = 0;
615 
616 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
617 		base = get_extra_isize(dn->inode);
618 
619 	/* Get physical address of data block */
620 	addr_array = blkaddr_in_node(rn);
621 	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
622 }
623 
624 /*
625  * Lock ordering for the change of data block address:
626  * ->data_page
627  *  ->node_page
628  *    update block addresses in the node page
629  */
630 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
631 {
632 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
633 	__set_data_blkaddr(dn);
634 	if (set_page_dirty(dn->node_page))
635 		dn->node_changed = true;
636 }
637 
638 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
639 {
640 	dn->data_blkaddr = blkaddr;
641 	f2fs_set_data_blkaddr(dn);
642 	f2fs_update_extent_cache(dn);
643 }
644 
645 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
646 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
647 {
648 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
649 	int err;
650 
651 	if (!count)
652 		return 0;
653 
654 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
655 		return -EPERM;
656 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
657 		return err;
658 
659 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
660 						dn->ofs_in_node, count);
661 
662 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
663 
664 	for (; count > 0; dn->ofs_in_node++) {
665 		block_t blkaddr = datablock_addr(dn->inode,
666 					dn->node_page, dn->ofs_in_node);
667 		if (blkaddr == NULL_ADDR) {
668 			dn->data_blkaddr = NEW_ADDR;
669 			__set_data_blkaddr(dn);
670 			count--;
671 		}
672 	}
673 
674 	if (set_page_dirty(dn->node_page))
675 		dn->node_changed = true;
676 	return 0;
677 }
678 
679 /* Should keep dn->ofs_in_node unchanged */
680 int f2fs_reserve_new_block(struct dnode_of_data *dn)
681 {
682 	unsigned int ofs_in_node = dn->ofs_in_node;
683 	int ret;
684 
685 	ret = f2fs_reserve_new_blocks(dn, 1);
686 	dn->ofs_in_node = ofs_in_node;
687 	return ret;
688 }
689 
690 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
691 {
692 	bool need_put = dn->inode_page ? false : true;
693 	int err;
694 
695 	err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
696 	if (err)
697 		return err;
698 
699 	if (dn->data_blkaddr == NULL_ADDR)
700 		err = f2fs_reserve_new_block(dn);
701 	if (err || need_put)
702 		f2fs_put_dnode(dn);
703 	return err;
704 }
705 
706 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
707 {
708 	struct extent_info ei  = {0,0,0};
709 	struct inode *inode = dn->inode;
710 
711 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
712 		dn->data_blkaddr = ei.blk + index - ei.fofs;
713 		return 0;
714 	}
715 
716 	return f2fs_reserve_block(dn, index);
717 }
718 
719 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
720 						int op_flags, bool for_write)
721 {
722 	struct address_space *mapping = inode->i_mapping;
723 	struct dnode_of_data dn;
724 	struct page *page;
725 	struct extent_info ei = {0,0,0};
726 	int err;
727 
728 	page = f2fs_grab_cache_page(mapping, index, for_write);
729 	if (!page)
730 		return ERR_PTR(-ENOMEM);
731 
732 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
733 		dn.data_blkaddr = ei.blk + index - ei.fofs;
734 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
735 						DATA_GENERIC_ENHANCE_READ)) {
736 			err = -EFAULT;
737 			goto put_err;
738 		}
739 		goto got_it;
740 	}
741 
742 	set_new_dnode(&dn, inode, NULL, NULL, 0);
743 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
744 	if (err)
745 		goto put_err;
746 	f2fs_put_dnode(&dn);
747 
748 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
749 		err = -ENOENT;
750 		goto put_err;
751 	}
752 	if (dn.data_blkaddr != NEW_ADDR &&
753 			!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
754 						dn.data_blkaddr,
755 						DATA_GENERIC_ENHANCE)) {
756 		err = -EFAULT;
757 		goto put_err;
758 	}
759 got_it:
760 	if (PageUptodate(page)) {
761 		unlock_page(page);
762 		return page;
763 	}
764 
765 	/*
766 	 * A new dentry page is allocated but not able to be written, since its
767 	 * new inode page couldn't be allocated due to -ENOSPC.
768 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
769 	 * see, f2fs_add_link -> f2fs_get_new_data_page ->
770 	 * f2fs_init_inode_metadata.
771 	 */
772 	if (dn.data_blkaddr == NEW_ADDR) {
773 		zero_user_segment(page, 0, PAGE_SIZE);
774 		if (!PageUptodate(page))
775 			SetPageUptodate(page);
776 		unlock_page(page);
777 		return page;
778 	}
779 
780 	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
781 	if (err)
782 		goto put_err;
783 	return page;
784 
785 put_err:
786 	f2fs_put_page(page, 1);
787 	return ERR_PTR(err);
788 }
789 
790 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
791 {
792 	struct address_space *mapping = inode->i_mapping;
793 	struct page *page;
794 
795 	page = find_get_page(mapping, index);
796 	if (page && PageUptodate(page))
797 		return page;
798 	f2fs_put_page(page, 0);
799 
800 	page = f2fs_get_read_data_page(inode, index, 0, false);
801 	if (IS_ERR(page))
802 		return page;
803 
804 	if (PageUptodate(page))
805 		return page;
806 
807 	wait_on_page_locked(page);
808 	if (unlikely(!PageUptodate(page))) {
809 		f2fs_put_page(page, 0);
810 		return ERR_PTR(-EIO);
811 	}
812 	return page;
813 }
814 
815 /*
816  * If it tries to access a hole, return an error.
817  * Because, the callers, functions in dir.c and GC, should be able to know
818  * whether this page exists or not.
819  */
820 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
821 							bool for_write)
822 {
823 	struct address_space *mapping = inode->i_mapping;
824 	struct page *page;
825 repeat:
826 	page = f2fs_get_read_data_page(inode, index, 0, for_write);
827 	if (IS_ERR(page))
828 		return page;
829 
830 	/* wait for read completion */
831 	lock_page(page);
832 	if (unlikely(page->mapping != mapping)) {
833 		f2fs_put_page(page, 1);
834 		goto repeat;
835 	}
836 	if (unlikely(!PageUptodate(page))) {
837 		f2fs_put_page(page, 1);
838 		return ERR_PTR(-EIO);
839 	}
840 	return page;
841 }
842 
843 /*
844  * Caller ensures that this data page is never allocated.
845  * A new zero-filled data page is allocated in the page cache.
846  *
847  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
848  * f2fs_unlock_op().
849  * Note that, ipage is set only by make_empty_dir, and if any error occur,
850  * ipage should be released by this function.
851  */
852 struct page *f2fs_get_new_data_page(struct inode *inode,
853 		struct page *ipage, pgoff_t index, bool new_i_size)
854 {
855 	struct address_space *mapping = inode->i_mapping;
856 	struct page *page;
857 	struct dnode_of_data dn;
858 	int err;
859 
860 	page = f2fs_grab_cache_page(mapping, index, true);
861 	if (!page) {
862 		/*
863 		 * before exiting, we should make sure ipage will be released
864 		 * if any error occur.
865 		 */
866 		f2fs_put_page(ipage, 1);
867 		return ERR_PTR(-ENOMEM);
868 	}
869 
870 	set_new_dnode(&dn, inode, ipage, NULL, 0);
871 	err = f2fs_reserve_block(&dn, index);
872 	if (err) {
873 		f2fs_put_page(page, 1);
874 		return ERR_PTR(err);
875 	}
876 	if (!ipage)
877 		f2fs_put_dnode(&dn);
878 
879 	if (PageUptodate(page))
880 		goto got_it;
881 
882 	if (dn.data_blkaddr == NEW_ADDR) {
883 		zero_user_segment(page, 0, PAGE_SIZE);
884 		if (!PageUptodate(page))
885 			SetPageUptodate(page);
886 	} else {
887 		f2fs_put_page(page, 1);
888 
889 		/* if ipage exists, blkaddr should be NEW_ADDR */
890 		f2fs_bug_on(F2FS_I_SB(inode), ipage);
891 		page = f2fs_get_lock_data_page(inode, index, true);
892 		if (IS_ERR(page))
893 			return page;
894 	}
895 got_it:
896 	if (new_i_size && i_size_read(inode) <
897 				((loff_t)(index + 1) << PAGE_SHIFT))
898 		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
899 	return page;
900 }
901 
902 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
903 {
904 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
905 	struct f2fs_summary sum;
906 	struct node_info ni;
907 	block_t old_blkaddr;
908 	blkcnt_t count = 1;
909 	int err;
910 
911 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
912 		return -EPERM;
913 
914 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
915 	if (err)
916 		return err;
917 
918 	dn->data_blkaddr = datablock_addr(dn->inode,
919 				dn->node_page, dn->ofs_in_node);
920 	if (dn->data_blkaddr != NULL_ADDR)
921 		goto alloc;
922 
923 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
924 		return err;
925 
926 alloc:
927 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
928 	old_blkaddr = dn->data_blkaddr;
929 	f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
930 					&sum, seg_type, NULL, false);
931 	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
932 		invalidate_mapping_pages(META_MAPPING(sbi),
933 					old_blkaddr, old_blkaddr);
934 	f2fs_set_data_blkaddr(dn);
935 
936 	/*
937 	 * i_size will be updated by direct_IO. Otherwise, we'll get stale
938 	 * data from unwritten block via dio_read.
939 	 */
940 	return 0;
941 }
942 
943 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
944 {
945 	struct inode *inode = file_inode(iocb->ki_filp);
946 	struct f2fs_map_blocks map;
947 	int flag;
948 	int err = 0;
949 	bool direct_io = iocb->ki_flags & IOCB_DIRECT;
950 
951 	/* convert inline data for Direct I/O*/
952 	if (direct_io) {
953 		err = f2fs_convert_inline_inode(inode);
954 		if (err)
955 			return err;
956 	}
957 
958 	if (direct_io && allow_outplace_dio(inode, iocb, from))
959 		return 0;
960 
961 	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
962 		return 0;
963 
964 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
965 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
966 	if (map.m_len > map.m_lblk)
967 		map.m_len -= map.m_lblk;
968 	else
969 		map.m_len = 0;
970 
971 	map.m_next_pgofs = NULL;
972 	map.m_next_extent = NULL;
973 	map.m_seg_type = NO_CHECK_TYPE;
974 	map.m_may_create = true;
975 
976 	if (direct_io) {
977 		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
978 		flag = f2fs_force_buffered_io(inode, iocb, from) ?
979 					F2FS_GET_BLOCK_PRE_AIO :
980 					F2FS_GET_BLOCK_PRE_DIO;
981 		goto map_blocks;
982 	}
983 	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
984 		err = f2fs_convert_inline_inode(inode);
985 		if (err)
986 			return err;
987 	}
988 	if (f2fs_has_inline_data(inode))
989 		return err;
990 
991 	flag = F2FS_GET_BLOCK_PRE_AIO;
992 
993 map_blocks:
994 	err = f2fs_map_blocks(inode, &map, 1, flag);
995 	if (map.m_len > 0 && err == -ENOSPC) {
996 		if (!direct_io)
997 			set_inode_flag(inode, FI_NO_PREALLOC);
998 		err = 0;
999 	}
1000 	return err;
1001 }
1002 
1003 void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1004 {
1005 	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1006 		if (lock)
1007 			down_read(&sbi->node_change);
1008 		else
1009 			up_read(&sbi->node_change);
1010 	} else {
1011 		if (lock)
1012 			f2fs_lock_op(sbi);
1013 		else
1014 			f2fs_unlock_op(sbi);
1015 	}
1016 }
1017 
1018 /*
1019  * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1020  * f2fs_map_blocks structure.
1021  * If original data blocks are allocated, then give them to blockdev.
1022  * Otherwise,
1023  *     a. preallocate requested block addresses
1024  *     b. do not use extent cache for better performance
1025  *     c. give the block addresses to blockdev
1026  */
1027 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1028 						int create, int flag)
1029 {
1030 	unsigned int maxblocks = map->m_len;
1031 	struct dnode_of_data dn;
1032 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1033 	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1034 	pgoff_t pgofs, end_offset, end;
1035 	int err = 0, ofs = 1;
1036 	unsigned int ofs_in_node, last_ofs_in_node;
1037 	blkcnt_t prealloc;
1038 	struct extent_info ei = {0,0,0};
1039 	block_t blkaddr;
1040 	unsigned int start_pgofs;
1041 
1042 	if (!maxblocks)
1043 		return 0;
1044 
1045 	map->m_len = 0;
1046 	map->m_flags = 0;
1047 
1048 	/* it only supports block size == page size */
1049 	pgofs =	(pgoff_t)map->m_lblk;
1050 	end = pgofs + maxblocks;
1051 
1052 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1053 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
1054 							map->m_may_create)
1055 			goto next_dnode;
1056 
1057 		map->m_pblk = ei.blk + pgofs - ei.fofs;
1058 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1059 		map->m_flags = F2FS_MAP_MAPPED;
1060 		if (map->m_next_extent)
1061 			*map->m_next_extent = pgofs + map->m_len;
1062 
1063 		/* for hardware encryption, but to avoid potential issue in future */
1064 		if (flag == F2FS_GET_BLOCK_DIO)
1065 			f2fs_wait_on_block_writeback_range(inode,
1066 						map->m_pblk, map->m_len);
1067 		goto out;
1068 	}
1069 
1070 next_dnode:
1071 	if (map->m_may_create)
1072 		__do_map_lock(sbi, flag, true);
1073 
1074 	/* When reading holes, we need its node page */
1075 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1076 	err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1077 	if (err) {
1078 		if (flag == F2FS_GET_BLOCK_BMAP)
1079 			map->m_pblk = 0;
1080 		if (err == -ENOENT) {
1081 			err = 0;
1082 			if (map->m_next_pgofs)
1083 				*map->m_next_pgofs =
1084 					f2fs_get_next_page_offset(&dn, pgofs);
1085 			if (map->m_next_extent)
1086 				*map->m_next_extent =
1087 					f2fs_get_next_page_offset(&dn, pgofs);
1088 		}
1089 		goto unlock_out;
1090 	}
1091 
1092 	start_pgofs = pgofs;
1093 	prealloc = 0;
1094 	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1095 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1096 
1097 next_block:
1098 	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
1099 
1100 	if (__is_valid_data_blkaddr(blkaddr) &&
1101 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1102 		err = -EFAULT;
1103 		goto sync_out;
1104 	}
1105 
1106 	if (__is_valid_data_blkaddr(blkaddr)) {
1107 		/* use out-place-update for driect IO under LFS mode */
1108 		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
1109 							map->m_may_create) {
1110 			err = __allocate_data_block(&dn, map->m_seg_type);
1111 			if (!err) {
1112 				blkaddr = dn.data_blkaddr;
1113 				set_inode_flag(inode, FI_APPEND_WRITE);
1114 			}
1115 		}
1116 	} else {
1117 		if (create) {
1118 			if (unlikely(f2fs_cp_error(sbi))) {
1119 				err = -EIO;
1120 				goto sync_out;
1121 			}
1122 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1123 				if (blkaddr == NULL_ADDR) {
1124 					prealloc++;
1125 					last_ofs_in_node = dn.ofs_in_node;
1126 				}
1127 			} else {
1128 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1129 					flag != F2FS_GET_BLOCK_DIO);
1130 				err = __allocate_data_block(&dn,
1131 							map->m_seg_type);
1132 				if (!err)
1133 					set_inode_flag(inode, FI_APPEND_WRITE);
1134 			}
1135 			if (err)
1136 				goto sync_out;
1137 			map->m_flags |= F2FS_MAP_NEW;
1138 			blkaddr = dn.data_blkaddr;
1139 		} else {
1140 			if (flag == F2FS_GET_BLOCK_BMAP) {
1141 				map->m_pblk = 0;
1142 				goto sync_out;
1143 			}
1144 			if (flag == F2FS_GET_BLOCK_PRECACHE)
1145 				goto sync_out;
1146 			if (flag == F2FS_GET_BLOCK_FIEMAP &&
1147 						blkaddr == NULL_ADDR) {
1148 				if (map->m_next_pgofs)
1149 					*map->m_next_pgofs = pgofs + 1;
1150 				goto sync_out;
1151 			}
1152 			if (flag != F2FS_GET_BLOCK_FIEMAP) {
1153 				/* for defragment case */
1154 				if (map->m_next_pgofs)
1155 					*map->m_next_pgofs = pgofs + 1;
1156 				goto sync_out;
1157 			}
1158 		}
1159 	}
1160 
1161 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
1162 		goto skip;
1163 
1164 	if (map->m_len == 0) {
1165 		/* preallocated unwritten block should be mapped for fiemap. */
1166 		if (blkaddr == NEW_ADDR)
1167 			map->m_flags |= F2FS_MAP_UNWRITTEN;
1168 		map->m_flags |= F2FS_MAP_MAPPED;
1169 
1170 		map->m_pblk = blkaddr;
1171 		map->m_len = 1;
1172 	} else if ((map->m_pblk != NEW_ADDR &&
1173 			blkaddr == (map->m_pblk + ofs)) ||
1174 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1175 			flag == F2FS_GET_BLOCK_PRE_DIO) {
1176 		ofs++;
1177 		map->m_len++;
1178 	} else {
1179 		goto sync_out;
1180 	}
1181 
1182 skip:
1183 	dn.ofs_in_node++;
1184 	pgofs++;
1185 
1186 	/* preallocate blocks in batch for one dnode page */
1187 	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1188 			(pgofs == end || dn.ofs_in_node == end_offset)) {
1189 
1190 		dn.ofs_in_node = ofs_in_node;
1191 		err = f2fs_reserve_new_blocks(&dn, prealloc);
1192 		if (err)
1193 			goto sync_out;
1194 
1195 		map->m_len += dn.ofs_in_node - ofs_in_node;
1196 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1197 			err = -ENOSPC;
1198 			goto sync_out;
1199 		}
1200 		dn.ofs_in_node = end_offset;
1201 	}
1202 
1203 	if (pgofs >= end)
1204 		goto sync_out;
1205 	else if (dn.ofs_in_node < end_offset)
1206 		goto next_block;
1207 
1208 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1209 		if (map->m_flags & F2FS_MAP_MAPPED) {
1210 			unsigned int ofs = start_pgofs - map->m_lblk;
1211 
1212 			f2fs_update_extent_cache_range(&dn,
1213 				start_pgofs, map->m_pblk + ofs,
1214 				map->m_len - ofs);
1215 		}
1216 	}
1217 
1218 	f2fs_put_dnode(&dn);
1219 
1220 	if (map->m_may_create) {
1221 		__do_map_lock(sbi, flag, false);
1222 		f2fs_balance_fs(sbi, dn.node_changed);
1223 	}
1224 	goto next_dnode;
1225 
1226 sync_out:
1227 
1228 	/* for hardware encryption, but to avoid potential issue in future */
1229 	if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1230 		f2fs_wait_on_block_writeback_range(inode,
1231 						map->m_pblk, map->m_len);
1232 
1233 	if (flag == F2FS_GET_BLOCK_PRECACHE) {
1234 		if (map->m_flags & F2FS_MAP_MAPPED) {
1235 			unsigned int ofs = start_pgofs - map->m_lblk;
1236 
1237 			f2fs_update_extent_cache_range(&dn,
1238 				start_pgofs, map->m_pblk + ofs,
1239 				map->m_len - ofs);
1240 		}
1241 		if (map->m_next_extent)
1242 			*map->m_next_extent = pgofs + 1;
1243 	}
1244 	f2fs_put_dnode(&dn);
1245 unlock_out:
1246 	if (map->m_may_create) {
1247 		__do_map_lock(sbi, flag, false);
1248 		f2fs_balance_fs(sbi, dn.node_changed);
1249 	}
1250 out:
1251 	trace_f2fs_map_blocks(inode, map, err);
1252 	return err;
1253 }
1254 
1255 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1256 {
1257 	struct f2fs_map_blocks map;
1258 	block_t last_lblk;
1259 	int err;
1260 
1261 	if (pos + len > i_size_read(inode))
1262 		return false;
1263 
1264 	map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1265 	map.m_next_pgofs = NULL;
1266 	map.m_next_extent = NULL;
1267 	map.m_seg_type = NO_CHECK_TYPE;
1268 	map.m_may_create = false;
1269 	last_lblk = F2FS_BLK_ALIGN(pos + len);
1270 
1271 	while (map.m_lblk < last_lblk) {
1272 		map.m_len = last_lblk - map.m_lblk;
1273 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1274 		if (err || map.m_len == 0)
1275 			return false;
1276 		map.m_lblk += map.m_len;
1277 	}
1278 	return true;
1279 }
1280 
1281 static int __get_data_block(struct inode *inode, sector_t iblock,
1282 			struct buffer_head *bh, int create, int flag,
1283 			pgoff_t *next_pgofs, int seg_type, bool may_write)
1284 {
1285 	struct f2fs_map_blocks map;
1286 	int err;
1287 
1288 	map.m_lblk = iblock;
1289 	map.m_len = bh->b_size >> inode->i_blkbits;
1290 	map.m_next_pgofs = next_pgofs;
1291 	map.m_next_extent = NULL;
1292 	map.m_seg_type = seg_type;
1293 	map.m_may_create = may_write;
1294 
1295 	err = f2fs_map_blocks(inode, &map, create, flag);
1296 	if (!err) {
1297 		map_bh(bh, inode->i_sb, map.m_pblk);
1298 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1299 		bh->b_size = (u64)map.m_len << inode->i_blkbits;
1300 	}
1301 	return err;
1302 }
1303 
1304 static int get_data_block(struct inode *inode, sector_t iblock,
1305 			struct buffer_head *bh_result, int create, int flag,
1306 			pgoff_t *next_pgofs)
1307 {
1308 	return __get_data_block(inode, iblock, bh_result, create,
1309 							flag, next_pgofs,
1310 							NO_CHECK_TYPE, create);
1311 }
1312 
1313 static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1314 			struct buffer_head *bh_result, int create)
1315 {
1316 	return __get_data_block(inode, iblock, bh_result, create,
1317 				F2FS_GET_BLOCK_DIO, NULL,
1318 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1319 				true);
1320 }
1321 
1322 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1323 			struct buffer_head *bh_result, int create)
1324 {
1325 	return __get_data_block(inode, iblock, bh_result, create,
1326 				F2FS_GET_BLOCK_DIO, NULL,
1327 				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1328 				false);
1329 }
1330 
1331 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1332 			struct buffer_head *bh_result, int create)
1333 {
1334 	/* Block number less than F2FS MAX BLOCKS */
1335 	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1336 		return -EFBIG;
1337 
1338 	return __get_data_block(inode, iblock, bh_result, create,
1339 						F2FS_GET_BLOCK_BMAP, NULL,
1340 						NO_CHECK_TYPE, create);
1341 }
1342 
1343 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1344 {
1345 	return (offset >> inode->i_blkbits);
1346 }
1347 
1348 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1349 {
1350 	return (blk << inode->i_blkbits);
1351 }
1352 
1353 static int f2fs_xattr_fiemap(struct inode *inode,
1354 				struct fiemap_extent_info *fieinfo)
1355 {
1356 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1357 	struct page *page;
1358 	struct node_info ni;
1359 	__u64 phys = 0, len;
1360 	__u32 flags;
1361 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1362 	int err = 0;
1363 
1364 	if (f2fs_has_inline_xattr(inode)) {
1365 		int offset;
1366 
1367 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1368 						inode->i_ino, false);
1369 		if (!page)
1370 			return -ENOMEM;
1371 
1372 		err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1373 		if (err) {
1374 			f2fs_put_page(page, 1);
1375 			return err;
1376 		}
1377 
1378 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1379 		offset = offsetof(struct f2fs_inode, i_addr) +
1380 					sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1381 					get_inline_xattr_addrs(inode));
1382 
1383 		phys += offset;
1384 		len = inline_xattr_size(inode);
1385 
1386 		f2fs_put_page(page, 1);
1387 
1388 		flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1389 
1390 		if (!xnid)
1391 			flags |= FIEMAP_EXTENT_LAST;
1392 
1393 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1394 		if (err || err == 1)
1395 			return err;
1396 	}
1397 
1398 	if (xnid) {
1399 		page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1400 		if (!page)
1401 			return -ENOMEM;
1402 
1403 		err = f2fs_get_node_info(sbi, xnid, &ni);
1404 		if (err) {
1405 			f2fs_put_page(page, 1);
1406 			return err;
1407 		}
1408 
1409 		phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1410 		len = inode->i_sb->s_blocksize;
1411 
1412 		f2fs_put_page(page, 1);
1413 
1414 		flags = FIEMAP_EXTENT_LAST;
1415 	}
1416 
1417 	if (phys)
1418 		err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1419 
1420 	return (err < 0 ? err : 0);
1421 }
1422 
1423 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1424 		u64 start, u64 len)
1425 {
1426 	struct buffer_head map_bh;
1427 	sector_t start_blk, last_blk;
1428 	pgoff_t next_pgofs;
1429 	u64 logical = 0, phys = 0, size = 0;
1430 	u32 flags = 0;
1431 	int ret = 0;
1432 
1433 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1434 		ret = f2fs_precache_extents(inode);
1435 		if (ret)
1436 			return ret;
1437 	}
1438 
1439 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
1440 	if (ret)
1441 		return ret;
1442 
1443 	inode_lock(inode);
1444 
1445 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1446 		ret = f2fs_xattr_fiemap(inode, fieinfo);
1447 		goto out;
1448 	}
1449 
1450 	if (f2fs_has_inline_data(inode)) {
1451 		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1452 		if (ret != -EAGAIN)
1453 			goto out;
1454 	}
1455 
1456 	if (logical_to_blk(inode, len) == 0)
1457 		len = blk_to_logical(inode, 1);
1458 
1459 	start_blk = logical_to_blk(inode, start);
1460 	last_blk = logical_to_blk(inode, start + len - 1);
1461 
1462 next:
1463 	memset(&map_bh, 0, sizeof(struct buffer_head));
1464 	map_bh.b_size = len;
1465 
1466 	ret = get_data_block(inode, start_blk, &map_bh, 0,
1467 					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1468 	if (ret)
1469 		goto out;
1470 
1471 	/* HOLE */
1472 	if (!buffer_mapped(&map_bh)) {
1473 		start_blk = next_pgofs;
1474 
1475 		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1476 					F2FS_I_SB(inode)->max_file_blocks))
1477 			goto prep_next;
1478 
1479 		flags |= FIEMAP_EXTENT_LAST;
1480 	}
1481 
1482 	if (size) {
1483 		if (IS_ENCRYPTED(inode))
1484 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1485 
1486 		ret = fiemap_fill_next_extent(fieinfo, logical,
1487 				phys, size, flags);
1488 	}
1489 
1490 	if (start_blk > last_blk || ret)
1491 		goto out;
1492 
1493 	logical = blk_to_logical(inode, start_blk);
1494 	phys = blk_to_logical(inode, map_bh.b_blocknr);
1495 	size = map_bh.b_size;
1496 	flags = 0;
1497 	if (buffer_unwritten(&map_bh))
1498 		flags = FIEMAP_EXTENT_UNWRITTEN;
1499 
1500 	start_blk += logical_to_blk(inode, size);
1501 
1502 prep_next:
1503 	cond_resched();
1504 	if (fatal_signal_pending(current))
1505 		ret = -EINTR;
1506 	else
1507 		goto next;
1508 out:
1509 	if (ret == 1)
1510 		ret = 0;
1511 
1512 	inode_unlock(inode);
1513 	return ret;
1514 }
1515 
1516 static int f2fs_read_single_page(struct inode *inode, struct page *page,
1517 					unsigned nr_pages,
1518 					struct f2fs_map_blocks *map,
1519 					struct bio **bio_ret,
1520 					sector_t *last_block_in_bio,
1521 					bool is_readahead)
1522 {
1523 	struct bio *bio = *bio_ret;
1524 	const unsigned blkbits = inode->i_blkbits;
1525 	const unsigned blocksize = 1 << blkbits;
1526 	sector_t block_in_file;
1527 	sector_t last_block;
1528 	sector_t last_block_in_file;
1529 	sector_t block_nr;
1530 	int ret = 0;
1531 
1532 	block_in_file = (sector_t)page->index;
1533 	last_block = block_in_file + nr_pages;
1534 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1535 							blkbits;
1536 	if (last_block > last_block_in_file)
1537 		last_block = last_block_in_file;
1538 
1539 	/* just zeroing out page which is beyond EOF */
1540 	if (block_in_file >= last_block)
1541 		goto zero_out;
1542 	/*
1543 	 * Map blocks using the previous result first.
1544 	 */
1545 	if ((map->m_flags & F2FS_MAP_MAPPED) &&
1546 			block_in_file > map->m_lblk &&
1547 			block_in_file < (map->m_lblk + map->m_len))
1548 		goto got_it;
1549 
1550 	/*
1551 	 * Then do more f2fs_map_blocks() calls until we are
1552 	 * done with this page.
1553 	 */
1554 	map->m_lblk = block_in_file;
1555 	map->m_len = last_block - block_in_file;
1556 
1557 	ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
1558 	if (ret)
1559 		goto out;
1560 got_it:
1561 	if ((map->m_flags & F2FS_MAP_MAPPED)) {
1562 		block_nr = map->m_pblk + block_in_file - map->m_lblk;
1563 		SetPageMappedToDisk(page);
1564 
1565 		if (!PageUptodate(page) && !cleancache_get_page(page)) {
1566 			SetPageUptodate(page);
1567 			goto confused;
1568 		}
1569 
1570 		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
1571 						DATA_GENERIC_ENHANCE_READ)) {
1572 			ret = -EFAULT;
1573 			goto out;
1574 		}
1575 	} else {
1576 zero_out:
1577 		zero_user_segment(page, 0, PAGE_SIZE);
1578 		if (!PageUptodate(page))
1579 			SetPageUptodate(page);
1580 		unlock_page(page);
1581 		goto out;
1582 	}
1583 
1584 	/*
1585 	 * This page will go to BIO.  Do we need to send this
1586 	 * BIO off first?
1587 	 */
1588 	if (bio && (*last_block_in_bio != block_nr - 1 ||
1589 		!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
1590 submit_and_realloc:
1591 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1592 		bio = NULL;
1593 	}
1594 	if (bio == NULL) {
1595 		bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
1596 				is_readahead ? REQ_RAHEAD : 0);
1597 		if (IS_ERR(bio)) {
1598 			ret = PTR_ERR(bio);
1599 			bio = NULL;
1600 			goto out;
1601 		}
1602 	}
1603 
1604 	/*
1605 	 * If the page is under writeback, we need to wait for
1606 	 * its completion to see the correct decrypted data.
1607 	 */
1608 	f2fs_wait_on_block_writeback(inode, block_nr);
1609 
1610 	if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1611 		goto submit_and_realloc;
1612 
1613 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
1614 	ClearPageError(page);
1615 	*last_block_in_bio = block_nr;
1616 	goto out;
1617 confused:
1618 	if (bio) {
1619 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1620 		bio = NULL;
1621 	}
1622 	unlock_page(page);
1623 out:
1624 	*bio_ret = bio;
1625 	return ret;
1626 }
1627 
1628 /*
1629  * This function was originally taken from fs/mpage.c, and customized for f2fs.
1630  * Major change was from block_size == page_size in f2fs by default.
1631  *
1632  * Note that the aops->readpages() function is ONLY used for read-ahead. If
1633  * this function ever deviates from doing just read-ahead, it should either
1634  * use ->readpage() or do the necessary surgery to decouple ->readpages()
1635  * from read-ahead.
1636  */
1637 static int f2fs_mpage_readpages(struct address_space *mapping,
1638 			struct list_head *pages, struct page *page,
1639 			unsigned nr_pages, bool is_readahead)
1640 {
1641 	struct bio *bio = NULL;
1642 	sector_t last_block_in_bio = 0;
1643 	struct inode *inode = mapping->host;
1644 	struct f2fs_map_blocks map;
1645 	int ret = 0;
1646 
1647 	map.m_pblk = 0;
1648 	map.m_lblk = 0;
1649 	map.m_len = 0;
1650 	map.m_flags = 0;
1651 	map.m_next_pgofs = NULL;
1652 	map.m_next_extent = NULL;
1653 	map.m_seg_type = NO_CHECK_TYPE;
1654 	map.m_may_create = false;
1655 
1656 	for (; nr_pages; nr_pages--) {
1657 		if (pages) {
1658 			page = list_last_entry(pages, struct page, lru);
1659 
1660 			prefetchw(&page->flags);
1661 			list_del(&page->lru);
1662 			if (add_to_page_cache_lru(page, mapping,
1663 						  page->index,
1664 						  readahead_gfp_mask(mapping)))
1665 				goto next_page;
1666 		}
1667 
1668 		ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
1669 					&last_block_in_bio, is_readahead);
1670 		if (ret) {
1671 			SetPageError(page);
1672 			zero_user_segment(page, 0, PAGE_SIZE);
1673 			unlock_page(page);
1674 		}
1675 next_page:
1676 		if (pages)
1677 			put_page(page);
1678 	}
1679 	BUG_ON(pages && !list_empty(pages));
1680 	if (bio)
1681 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1682 	return pages ? 0 : ret;
1683 }
1684 
1685 static int f2fs_read_data_page(struct file *file, struct page *page)
1686 {
1687 	struct inode *inode = page->mapping->host;
1688 	int ret = -EAGAIN;
1689 
1690 	trace_f2fs_readpage(page, DATA);
1691 
1692 	/* If the file has inline data, try to read it directly */
1693 	if (f2fs_has_inline_data(inode))
1694 		ret = f2fs_read_inline_data(inode, page);
1695 	if (ret == -EAGAIN)
1696 		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
1697 	return ret;
1698 }
1699 
1700 static int f2fs_read_data_pages(struct file *file,
1701 			struct address_space *mapping,
1702 			struct list_head *pages, unsigned nr_pages)
1703 {
1704 	struct inode *inode = mapping->host;
1705 	struct page *page = list_last_entry(pages, struct page, lru);
1706 
1707 	trace_f2fs_readpages(inode, page, nr_pages);
1708 
1709 	/* If the file has inline data, skip readpages */
1710 	if (f2fs_has_inline_data(inode))
1711 		return 0;
1712 
1713 	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
1714 }
1715 
1716 static int encrypt_one_page(struct f2fs_io_info *fio)
1717 {
1718 	struct inode *inode = fio->page->mapping->host;
1719 	struct page *mpage;
1720 	gfp_t gfp_flags = GFP_NOFS;
1721 
1722 	if (!f2fs_encrypted_file(inode))
1723 		return 0;
1724 
1725 	/* wait for GCed page writeback via META_MAPPING */
1726 	f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
1727 
1728 retry_encrypt:
1729 	fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
1730 							       PAGE_SIZE, 0,
1731 							       gfp_flags);
1732 	if (IS_ERR(fio->encrypted_page)) {
1733 		/* flush pending IOs and wait for a while in the ENOMEM case */
1734 		if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1735 			f2fs_flush_merged_writes(fio->sbi);
1736 			congestion_wait(BLK_RW_ASYNC, HZ/50);
1737 			gfp_flags |= __GFP_NOFAIL;
1738 			goto retry_encrypt;
1739 		}
1740 		return PTR_ERR(fio->encrypted_page);
1741 	}
1742 
1743 	mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
1744 	if (mpage) {
1745 		if (PageUptodate(mpage))
1746 			memcpy(page_address(mpage),
1747 				page_address(fio->encrypted_page), PAGE_SIZE);
1748 		f2fs_put_page(mpage, 1);
1749 	}
1750 	return 0;
1751 }
1752 
1753 static inline bool check_inplace_update_policy(struct inode *inode,
1754 				struct f2fs_io_info *fio)
1755 {
1756 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1757 	unsigned int policy = SM_I(sbi)->ipu_policy;
1758 
1759 	if (policy & (0x1 << F2FS_IPU_FORCE))
1760 		return true;
1761 	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
1762 		return true;
1763 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
1764 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
1765 		return true;
1766 	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
1767 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
1768 		return true;
1769 
1770 	/*
1771 	 * IPU for rewrite async pages
1772 	 */
1773 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
1774 			fio && fio->op == REQ_OP_WRITE &&
1775 			!(fio->op_flags & REQ_SYNC) &&
1776 			!IS_ENCRYPTED(inode))
1777 		return true;
1778 
1779 	/* this is only set during fdatasync */
1780 	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
1781 			is_inode_flag_set(inode, FI_NEED_IPU))
1782 		return true;
1783 
1784 	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1785 			!f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
1786 		return true;
1787 
1788 	return false;
1789 }
1790 
1791 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
1792 {
1793 	if (f2fs_is_pinned_file(inode))
1794 		return true;
1795 
1796 	/* if this is cold file, we should overwrite to avoid fragmentation */
1797 	if (file_is_cold(inode))
1798 		return true;
1799 
1800 	return check_inplace_update_policy(inode, fio);
1801 }
1802 
1803 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
1804 {
1805 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1806 
1807 	if (test_opt(sbi, LFS))
1808 		return true;
1809 	if (S_ISDIR(inode->i_mode))
1810 		return true;
1811 	if (IS_NOQUOTA(inode))
1812 		return true;
1813 	if (f2fs_is_atomic_file(inode))
1814 		return true;
1815 	if (fio) {
1816 		if (is_cold_data(fio->page))
1817 			return true;
1818 		if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
1819 			return true;
1820 		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1821 			f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
1822 			return true;
1823 	}
1824 	return false;
1825 }
1826 
1827 static inline bool need_inplace_update(struct f2fs_io_info *fio)
1828 {
1829 	struct inode *inode = fio->page->mapping->host;
1830 
1831 	if (f2fs_should_update_outplace(inode, fio))
1832 		return false;
1833 
1834 	return f2fs_should_update_inplace(inode, fio);
1835 }
1836 
1837 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
1838 {
1839 	struct page *page = fio->page;
1840 	struct inode *inode = page->mapping->host;
1841 	struct dnode_of_data dn;
1842 	struct extent_info ei = {0,0,0};
1843 	struct node_info ni;
1844 	bool ipu_force = false;
1845 	int err = 0;
1846 
1847 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1848 	if (need_inplace_update(fio) &&
1849 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
1850 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1851 
1852 		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1853 						DATA_GENERIC_ENHANCE))
1854 			return -EFAULT;
1855 
1856 		ipu_force = true;
1857 		fio->need_lock = LOCK_DONE;
1858 		goto got_it;
1859 	}
1860 
1861 	/* Deadlock due to between page->lock and f2fs_lock_op */
1862 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
1863 		return -EAGAIN;
1864 
1865 	err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1866 	if (err)
1867 		goto out;
1868 
1869 	fio->old_blkaddr = dn.data_blkaddr;
1870 
1871 	/* This page is already truncated */
1872 	if (fio->old_blkaddr == NULL_ADDR) {
1873 		ClearPageUptodate(page);
1874 		clear_cold_data(page);
1875 		goto out_writepage;
1876 	}
1877 got_it:
1878 	if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
1879 		!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1880 						DATA_GENERIC_ENHANCE)) {
1881 		err = -EFAULT;
1882 		goto out_writepage;
1883 	}
1884 	/*
1885 	 * If current allocation needs SSR,
1886 	 * it had better in-place writes for updated data.
1887 	 */
1888 	if (ipu_force ||
1889 		(__is_valid_data_blkaddr(fio->old_blkaddr) &&
1890 					need_inplace_update(fio))) {
1891 		err = encrypt_one_page(fio);
1892 		if (err)
1893 			goto out_writepage;
1894 
1895 		set_page_writeback(page);
1896 		ClearPageError(page);
1897 		f2fs_put_dnode(&dn);
1898 		if (fio->need_lock == LOCK_REQ)
1899 			f2fs_unlock_op(fio->sbi);
1900 		err = f2fs_inplace_write_data(fio);
1901 		if (err) {
1902 			if (f2fs_encrypted_file(inode))
1903 				fscrypt_finalize_bounce_page(&fio->encrypted_page);
1904 			if (PageWriteback(page))
1905 				end_page_writeback(page);
1906 		} else {
1907 			set_inode_flag(inode, FI_UPDATE_WRITE);
1908 		}
1909 		trace_f2fs_do_write_data_page(fio->page, IPU);
1910 		return err;
1911 	}
1912 
1913 	if (fio->need_lock == LOCK_RETRY) {
1914 		if (!f2fs_trylock_op(fio->sbi)) {
1915 			err = -EAGAIN;
1916 			goto out_writepage;
1917 		}
1918 		fio->need_lock = LOCK_REQ;
1919 	}
1920 
1921 	err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
1922 	if (err)
1923 		goto out_writepage;
1924 
1925 	fio->version = ni.version;
1926 
1927 	err = encrypt_one_page(fio);
1928 	if (err)
1929 		goto out_writepage;
1930 
1931 	set_page_writeback(page);
1932 	ClearPageError(page);
1933 
1934 	/* LFS mode write path */
1935 	f2fs_outplace_write_data(&dn, fio);
1936 	trace_f2fs_do_write_data_page(page, OPU);
1937 	set_inode_flag(inode, FI_APPEND_WRITE);
1938 	if (page->index == 0)
1939 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1940 out_writepage:
1941 	f2fs_put_dnode(&dn);
1942 out:
1943 	if (fio->need_lock == LOCK_REQ)
1944 		f2fs_unlock_op(fio->sbi);
1945 	return err;
1946 }
1947 
1948 static int __write_data_page(struct page *page, bool *submitted,
1949 				struct writeback_control *wbc,
1950 				enum iostat_type io_type)
1951 {
1952 	struct inode *inode = page->mapping->host;
1953 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1954 	loff_t i_size = i_size_read(inode);
1955 	const pgoff_t end_index = ((unsigned long long) i_size)
1956 							>> PAGE_SHIFT;
1957 	loff_t psize = (page->index + 1) << PAGE_SHIFT;
1958 	unsigned offset = 0;
1959 	bool need_balance_fs = false;
1960 	int err = 0;
1961 	struct f2fs_io_info fio = {
1962 		.sbi = sbi,
1963 		.ino = inode->i_ino,
1964 		.type = DATA,
1965 		.op = REQ_OP_WRITE,
1966 		.op_flags = wbc_to_write_flags(wbc),
1967 		.old_blkaddr = NULL_ADDR,
1968 		.page = page,
1969 		.encrypted_page = NULL,
1970 		.submitted = false,
1971 		.need_lock = LOCK_RETRY,
1972 		.io_type = io_type,
1973 		.io_wbc = wbc,
1974 	};
1975 
1976 	trace_f2fs_writepage(page, DATA);
1977 
1978 	/* we should bypass data pages to proceed the kworkder jobs */
1979 	if (unlikely(f2fs_cp_error(sbi))) {
1980 		mapping_set_error(page->mapping, -EIO);
1981 		/*
1982 		 * don't drop any dirty dentry pages for keeping lastest
1983 		 * directory structure.
1984 		 */
1985 		if (S_ISDIR(inode->i_mode))
1986 			goto redirty_out;
1987 		goto out;
1988 	}
1989 
1990 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1991 		goto redirty_out;
1992 
1993 	if (page->index < end_index)
1994 		goto write;
1995 
1996 	/*
1997 	 * If the offset is out-of-range of file size,
1998 	 * this page does not have to be written to disk.
1999 	 */
2000 	offset = i_size & (PAGE_SIZE - 1);
2001 	if ((page->index >= end_index + 1) || !offset)
2002 		goto out;
2003 
2004 	zero_user_segment(page, offset, PAGE_SIZE);
2005 write:
2006 	if (f2fs_is_drop_cache(inode))
2007 		goto out;
2008 	/* we should not write 0'th page having journal header */
2009 	if (f2fs_is_volatile_file(inode) && (!page->index ||
2010 			(!wbc->for_reclaim &&
2011 			f2fs_available_free_memory(sbi, BASE_CHECK))))
2012 		goto redirty_out;
2013 
2014 	/* Dentry blocks are controlled by checkpoint */
2015 	if (S_ISDIR(inode->i_mode)) {
2016 		fio.need_lock = LOCK_DONE;
2017 		err = f2fs_do_write_data_page(&fio);
2018 		goto done;
2019 	}
2020 
2021 	if (!wbc->for_reclaim)
2022 		need_balance_fs = true;
2023 	else if (has_not_enough_free_secs(sbi, 0, 0))
2024 		goto redirty_out;
2025 	else
2026 		set_inode_flag(inode, FI_HOT_DATA);
2027 
2028 	err = -EAGAIN;
2029 	if (f2fs_has_inline_data(inode)) {
2030 		err = f2fs_write_inline_data(inode, page);
2031 		if (!err)
2032 			goto out;
2033 	}
2034 
2035 	if (err == -EAGAIN) {
2036 		err = f2fs_do_write_data_page(&fio);
2037 		if (err == -EAGAIN) {
2038 			fio.need_lock = LOCK_REQ;
2039 			err = f2fs_do_write_data_page(&fio);
2040 		}
2041 	}
2042 
2043 	if (err) {
2044 		file_set_keep_isize(inode);
2045 	} else {
2046 		down_write(&F2FS_I(inode)->i_sem);
2047 		if (F2FS_I(inode)->last_disk_size < psize)
2048 			F2FS_I(inode)->last_disk_size = psize;
2049 		up_write(&F2FS_I(inode)->i_sem);
2050 	}
2051 
2052 done:
2053 	if (err && err != -ENOENT)
2054 		goto redirty_out;
2055 
2056 out:
2057 	inode_dec_dirty_pages(inode);
2058 	if (err) {
2059 		ClearPageUptodate(page);
2060 		clear_cold_data(page);
2061 	}
2062 
2063 	if (wbc->for_reclaim) {
2064 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2065 		clear_inode_flag(inode, FI_HOT_DATA);
2066 		f2fs_remove_dirty_inode(inode);
2067 		submitted = NULL;
2068 	}
2069 
2070 	unlock_page(page);
2071 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2072 					!F2FS_I(inode)->cp_task)
2073 		f2fs_balance_fs(sbi, need_balance_fs);
2074 
2075 	if (unlikely(f2fs_cp_error(sbi))) {
2076 		f2fs_submit_merged_write(sbi, DATA);
2077 		submitted = NULL;
2078 	}
2079 
2080 	if (submitted)
2081 		*submitted = fio.submitted;
2082 
2083 	return 0;
2084 
2085 redirty_out:
2086 	redirty_page_for_writepage(wbc, page);
2087 	/*
2088 	 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2089 	 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2090 	 * file_write_and_wait_range() will see EIO error, which is critical
2091 	 * to return value of fsync() followed by atomic_write failure to user.
2092 	 */
2093 	if (!err || wbc->for_reclaim)
2094 		return AOP_WRITEPAGE_ACTIVATE;
2095 	unlock_page(page);
2096 	return err;
2097 }
2098 
2099 static int f2fs_write_data_page(struct page *page,
2100 					struct writeback_control *wbc)
2101 {
2102 	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
2103 }
2104 
2105 /*
2106  * This function was copied from write_cche_pages from mm/page-writeback.c.
2107  * The major change is making write step of cold data page separately from
2108  * warm/hot data page.
2109  */
2110 static int f2fs_write_cache_pages(struct address_space *mapping,
2111 					struct writeback_control *wbc,
2112 					enum iostat_type io_type)
2113 {
2114 	int ret = 0;
2115 	int done = 0;
2116 	struct pagevec pvec;
2117 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2118 	int nr_pages;
2119 	pgoff_t uninitialized_var(writeback_index);
2120 	pgoff_t index;
2121 	pgoff_t end;		/* Inclusive */
2122 	pgoff_t done_index;
2123 	int cycled;
2124 	int range_whole = 0;
2125 	xa_mark_t tag;
2126 	int nwritten = 0;
2127 
2128 	pagevec_init(&pvec);
2129 
2130 	if (get_dirty_pages(mapping->host) <=
2131 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2132 		set_inode_flag(mapping->host, FI_HOT_DATA);
2133 	else
2134 		clear_inode_flag(mapping->host, FI_HOT_DATA);
2135 
2136 	if (wbc->range_cyclic) {
2137 		writeback_index = mapping->writeback_index; /* prev offset */
2138 		index = writeback_index;
2139 		if (index == 0)
2140 			cycled = 1;
2141 		else
2142 			cycled = 0;
2143 		end = -1;
2144 	} else {
2145 		index = wbc->range_start >> PAGE_SHIFT;
2146 		end = wbc->range_end >> PAGE_SHIFT;
2147 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2148 			range_whole = 1;
2149 		cycled = 1; /* ignore range_cyclic tests */
2150 	}
2151 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2152 		tag = PAGECACHE_TAG_TOWRITE;
2153 	else
2154 		tag = PAGECACHE_TAG_DIRTY;
2155 retry:
2156 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2157 		tag_pages_for_writeback(mapping, index, end);
2158 	done_index = index;
2159 	while (!done && (index <= end)) {
2160 		int i;
2161 
2162 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2163 				tag);
2164 		if (nr_pages == 0)
2165 			break;
2166 
2167 		for (i = 0; i < nr_pages; i++) {
2168 			struct page *page = pvec.pages[i];
2169 			bool submitted = false;
2170 
2171 			/* give a priority to WB_SYNC threads */
2172 			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2173 					wbc->sync_mode == WB_SYNC_NONE) {
2174 				done = 1;
2175 				break;
2176 			}
2177 
2178 			done_index = page->index;
2179 retry_write:
2180 			lock_page(page);
2181 
2182 			if (unlikely(page->mapping != mapping)) {
2183 continue_unlock:
2184 				unlock_page(page);
2185 				continue;
2186 			}
2187 
2188 			if (!PageDirty(page)) {
2189 				/* someone wrote it for us */
2190 				goto continue_unlock;
2191 			}
2192 
2193 			if (PageWriteback(page)) {
2194 				if (wbc->sync_mode != WB_SYNC_NONE)
2195 					f2fs_wait_on_page_writeback(page,
2196 							DATA, true, true);
2197 				else
2198 					goto continue_unlock;
2199 			}
2200 
2201 			if (!clear_page_dirty_for_io(page))
2202 				goto continue_unlock;
2203 
2204 			ret = __write_data_page(page, &submitted, wbc, io_type);
2205 			if (unlikely(ret)) {
2206 				/*
2207 				 * keep nr_to_write, since vfs uses this to
2208 				 * get # of written pages.
2209 				 */
2210 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
2211 					unlock_page(page);
2212 					ret = 0;
2213 					continue;
2214 				} else if (ret == -EAGAIN) {
2215 					ret = 0;
2216 					if (wbc->sync_mode == WB_SYNC_ALL) {
2217 						cond_resched();
2218 						congestion_wait(BLK_RW_ASYNC,
2219 									HZ/50);
2220 						goto retry_write;
2221 					}
2222 					continue;
2223 				}
2224 				done_index = page->index + 1;
2225 				done = 1;
2226 				break;
2227 			} else if (submitted) {
2228 				nwritten++;
2229 			}
2230 
2231 			if (--wbc->nr_to_write <= 0 &&
2232 					wbc->sync_mode == WB_SYNC_NONE) {
2233 				done = 1;
2234 				break;
2235 			}
2236 		}
2237 		pagevec_release(&pvec);
2238 		cond_resched();
2239 	}
2240 
2241 	if (!cycled && !done) {
2242 		cycled = 1;
2243 		index = 0;
2244 		end = writeback_index - 1;
2245 		goto retry;
2246 	}
2247 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2248 		mapping->writeback_index = done_index;
2249 
2250 	if (nwritten)
2251 		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
2252 								NULL, 0, DATA);
2253 
2254 	return ret;
2255 }
2256 
2257 static inline bool __should_serialize_io(struct inode *inode,
2258 					struct writeback_control *wbc)
2259 {
2260 	if (!S_ISREG(inode->i_mode))
2261 		return false;
2262 	if (IS_NOQUOTA(inode))
2263 		return false;
2264 	if (wbc->sync_mode != WB_SYNC_ALL)
2265 		return true;
2266 	if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
2267 		return true;
2268 	return false;
2269 }
2270 
2271 static int __f2fs_write_data_pages(struct address_space *mapping,
2272 						struct writeback_control *wbc,
2273 						enum iostat_type io_type)
2274 {
2275 	struct inode *inode = mapping->host;
2276 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2277 	struct blk_plug plug;
2278 	int ret;
2279 	bool locked = false;
2280 
2281 	/* deal with chardevs and other special file */
2282 	if (!mapping->a_ops->writepage)
2283 		return 0;
2284 
2285 	/* skip writing if there is no dirty page in this inode */
2286 	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
2287 		return 0;
2288 
2289 	/* during POR, we don't need to trigger writepage at all. */
2290 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2291 		goto skip_write;
2292 
2293 	if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
2294 			wbc->sync_mode == WB_SYNC_NONE &&
2295 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
2296 			f2fs_available_free_memory(sbi, DIRTY_DENTS))
2297 		goto skip_write;
2298 
2299 	/* skip writing during file defragment */
2300 	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
2301 		goto skip_write;
2302 
2303 	trace_f2fs_writepages(mapping->host, wbc, DATA);
2304 
2305 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
2306 	if (wbc->sync_mode == WB_SYNC_ALL)
2307 		atomic_inc(&sbi->wb_sync_req[DATA]);
2308 	else if (atomic_read(&sbi->wb_sync_req[DATA]))
2309 		goto skip_write;
2310 
2311 	if (__should_serialize_io(inode, wbc)) {
2312 		mutex_lock(&sbi->writepages);
2313 		locked = true;
2314 	}
2315 
2316 	blk_start_plug(&plug);
2317 	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
2318 	blk_finish_plug(&plug);
2319 
2320 	if (locked)
2321 		mutex_unlock(&sbi->writepages);
2322 
2323 	if (wbc->sync_mode == WB_SYNC_ALL)
2324 		atomic_dec(&sbi->wb_sync_req[DATA]);
2325 	/*
2326 	 * if some pages were truncated, we cannot guarantee its mapping->host
2327 	 * to detect pending bios.
2328 	 */
2329 
2330 	f2fs_remove_dirty_inode(inode);
2331 	return ret;
2332 
2333 skip_write:
2334 	wbc->pages_skipped += get_dirty_pages(inode);
2335 	trace_f2fs_writepages(mapping->host, wbc, DATA);
2336 	return 0;
2337 }
2338 
2339 static int f2fs_write_data_pages(struct address_space *mapping,
2340 			    struct writeback_control *wbc)
2341 {
2342 	struct inode *inode = mapping->host;
2343 
2344 	return __f2fs_write_data_pages(mapping, wbc,
2345 			F2FS_I(inode)->cp_task == current ?
2346 			FS_CP_DATA_IO : FS_DATA_IO);
2347 }
2348 
2349 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
2350 {
2351 	struct inode *inode = mapping->host;
2352 	loff_t i_size = i_size_read(inode);
2353 
2354 	if (to > i_size) {
2355 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2356 		down_write(&F2FS_I(inode)->i_mmap_sem);
2357 
2358 		truncate_pagecache(inode, i_size);
2359 		if (!IS_NOQUOTA(inode))
2360 			f2fs_truncate_blocks(inode, i_size, true);
2361 
2362 		up_write(&F2FS_I(inode)->i_mmap_sem);
2363 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2364 	}
2365 }
2366 
2367 static int prepare_write_begin(struct f2fs_sb_info *sbi,
2368 			struct page *page, loff_t pos, unsigned len,
2369 			block_t *blk_addr, bool *node_changed)
2370 {
2371 	struct inode *inode = page->mapping->host;
2372 	pgoff_t index = page->index;
2373 	struct dnode_of_data dn;
2374 	struct page *ipage;
2375 	bool locked = false;
2376 	struct extent_info ei = {0,0,0};
2377 	int err = 0;
2378 	int flag;
2379 
2380 	/*
2381 	 * we already allocated all the blocks, so we don't need to get
2382 	 * the block addresses when there is no need to fill the page.
2383 	 */
2384 	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
2385 			!is_inode_flag_set(inode, FI_NO_PREALLOC))
2386 		return 0;
2387 
2388 	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
2389 	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
2390 		flag = F2FS_GET_BLOCK_DEFAULT;
2391 	else
2392 		flag = F2FS_GET_BLOCK_PRE_AIO;
2393 
2394 	if (f2fs_has_inline_data(inode) ||
2395 			(pos & PAGE_MASK) >= i_size_read(inode)) {
2396 		__do_map_lock(sbi, flag, true);
2397 		locked = true;
2398 	}
2399 restart:
2400 	/* check inline_data */
2401 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2402 	if (IS_ERR(ipage)) {
2403 		err = PTR_ERR(ipage);
2404 		goto unlock_out;
2405 	}
2406 
2407 	set_new_dnode(&dn, inode, ipage, ipage, 0);
2408 
2409 	if (f2fs_has_inline_data(inode)) {
2410 		if (pos + len <= MAX_INLINE_DATA(inode)) {
2411 			f2fs_do_read_inline_data(page, ipage);
2412 			set_inode_flag(inode, FI_DATA_EXIST);
2413 			if (inode->i_nlink)
2414 				set_inline_node(ipage);
2415 		} else {
2416 			err = f2fs_convert_inline_page(&dn, page);
2417 			if (err)
2418 				goto out;
2419 			if (dn.data_blkaddr == NULL_ADDR)
2420 				err = f2fs_get_block(&dn, index);
2421 		}
2422 	} else if (locked) {
2423 		err = f2fs_get_block(&dn, index);
2424 	} else {
2425 		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
2426 			dn.data_blkaddr = ei.blk + index - ei.fofs;
2427 		} else {
2428 			/* hole case */
2429 			err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
2430 			if (err || dn.data_blkaddr == NULL_ADDR) {
2431 				f2fs_put_dnode(&dn);
2432 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
2433 								true);
2434 				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
2435 				locked = true;
2436 				goto restart;
2437 			}
2438 		}
2439 	}
2440 
2441 	/* convert_inline_page can make node_changed */
2442 	*blk_addr = dn.data_blkaddr;
2443 	*node_changed = dn.node_changed;
2444 out:
2445 	f2fs_put_dnode(&dn);
2446 unlock_out:
2447 	if (locked)
2448 		__do_map_lock(sbi, flag, false);
2449 	return err;
2450 }
2451 
2452 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
2453 		loff_t pos, unsigned len, unsigned flags,
2454 		struct page **pagep, void **fsdata)
2455 {
2456 	struct inode *inode = mapping->host;
2457 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2458 	struct page *page = NULL;
2459 	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2460 	bool need_balance = false, drop_atomic = false;
2461 	block_t blkaddr = NULL_ADDR;
2462 	int err = 0;
2463 
2464 	trace_f2fs_write_begin(inode, pos, len, flags);
2465 
2466 	err = f2fs_is_checkpoint_ready(sbi);
2467 	if (err)
2468 		goto fail;
2469 
2470 	if ((f2fs_is_atomic_file(inode) &&
2471 			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
2472 			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2473 		err = -ENOMEM;
2474 		drop_atomic = true;
2475 		goto fail;
2476 	}
2477 
2478 	/*
2479 	 * We should check this at this moment to avoid deadlock on inode page
2480 	 * and #0 page. The locking rule for inline_data conversion should be:
2481 	 * lock_page(page #0) -> lock_page(inode_page)
2482 	 */
2483 	if (index != 0) {
2484 		err = f2fs_convert_inline_inode(inode);
2485 		if (err)
2486 			goto fail;
2487 	}
2488 repeat:
2489 	/*
2490 	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
2491 	 * wait_for_stable_page. Will wait that below with our IO control.
2492 	 */
2493 	page = f2fs_pagecache_get_page(mapping, index,
2494 				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
2495 	if (!page) {
2496 		err = -ENOMEM;
2497 		goto fail;
2498 	}
2499 
2500 	*pagep = page;
2501 
2502 	err = prepare_write_begin(sbi, page, pos, len,
2503 					&blkaddr, &need_balance);
2504 	if (err)
2505 		goto fail;
2506 
2507 	if (need_balance && !IS_NOQUOTA(inode) &&
2508 			has_not_enough_free_secs(sbi, 0, 0)) {
2509 		unlock_page(page);
2510 		f2fs_balance_fs(sbi, true);
2511 		lock_page(page);
2512 		if (page->mapping != mapping) {
2513 			/* The page got truncated from under us */
2514 			f2fs_put_page(page, 1);
2515 			goto repeat;
2516 		}
2517 	}
2518 
2519 	f2fs_wait_on_page_writeback(page, DATA, false, true);
2520 
2521 	if (len == PAGE_SIZE || PageUptodate(page))
2522 		return 0;
2523 
2524 	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
2525 		zero_user_segment(page, len, PAGE_SIZE);
2526 		return 0;
2527 	}
2528 
2529 	if (blkaddr == NEW_ADDR) {
2530 		zero_user_segment(page, 0, PAGE_SIZE);
2531 		SetPageUptodate(page);
2532 	} else {
2533 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
2534 				DATA_GENERIC_ENHANCE_READ)) {
2535 			err = -EFAULT;
2536 			goto fail;
2537 		}
2538 		err = f2fs_submit_page_read(inode, page, blkaddr);
2539 		if (err)
2540 			goto fail;
2541 
2542 		lock_page(page);
2543 		if (unlikely(page->mapping != mapping)) {
2544 			f2fs_put_page(page, 1);
2545 			goto repeat;
2546 		}
2547 		if (unlikely(!PageUptodate(page))) {
2548 			err = -EIO;
2549 			goto fail;
2550 		}
2551 	}
2552 	return 0;
2553 
2554 fail:
2555 	f2fs_put_page(page, 1);
2556 	f2fs_write_failed(mapping, pos + len);
2557 	if (drop_atomic)
2558 		f2fs_drop_inmem_pages_all(sbi, false);
2559 	return err;
2560 }
2561 
2562 static int f2fs_write_end(struct file *file,
2563 			struct address_space *mapping,
2564 			loff_t pos, unsigned len, unsigned copied,
2565 			struct page *page, void *fsdata)
2566 {
2567 	struct inode *inode = page->mapping->host;
2568 
2569 	trace_f2fs_write_end(inode, pos, len, copied);
2570 
2571 	/*
2572 	 * This should be come from len == PAGE_SIZE, and we expect copied
2573 	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2574 	 * let generic_perform_write() try to copy data again through copied=0.
2575 	 */
2576 	if (!PageUptodate(page)) {
2577 		if (unlikely(copied != len))
2578 			copied = 0;
2579 		else
2580 			SetPageUptodate(page);
2581 	}
2582 	if (!copied)
2583 		goto unlock_out;
2584 
2585 	set_page_dirty(page);
2586 
2587 	if (pos + copied > i_size_read(inode))
2588 		f2fs_i_size_write(inode, pos + copied);
2589 unlock_out:
2590 	f2fs_put_page(page, 1);
2591 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2592 	return copied;
2593 }
2594 
2595 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
2596 			   loff_t offset)
2597 {
2598 	unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
2599 	unsigned blkbits = i_blkbits;
2600 	unsigned blocksize_mask = (1 << blkbits) - 1;
2601 	unsigned long align = offset | iov_iter_alignment(iter);
2602 	struct block_device *bdev = inode->i_sb->s_bdev;
2603 
2604 	if (align & blocksize_mask) {
2605 		if (bdev)
2606 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
2607 		blocksize_mask = (1 << blkbits) - 1;
2608 		if (align & blocksize_mask)
2609 			return -EINVAL;
2610 		return 1;
2611 	}
2612 	return 0;
2613 }
2614 
2615 static void f2fs_dio_end_io(struct bio *bio)
2616 {
2617 	struct f2fs_private_dio *dio = bio->bi_private;
2618 
2619 	dec_page_count(F2FS_I_SB(dio->inode),
2620 			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
2621 
2622 	bio->bi_private = dio->orig_private;
2623 	bio->bi_end_io = dio->orig_end_io;
2624 
2625 	kvfree(dio);
2626 
2627 	bio_endio(bio);
2628 }
2629 
2630 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
2631 							loff_t file_offset)
2632 {
2633 	struct f2fs_private_dio *dio;
2634 	bool write = (bio_op(bio) == REQ_OP_WRITE);
2635 
2636 	dio = f2fs_kzalloc(F2FS_I_SB(inode),
2637 			sizeof(struct f2fs_private_dio), GFP_NOFS);
2638 	if (!dio)
2639 		goto out;
2640 
2641 	dio->inode = inode;
2642 	dio->orig_end_io = bio->bi_end_io;
2643 	dio->orig_private = bio->bi_private;
2644 	dio->write = write;
2645 
2646 	bio->bi_end_io = f2fs_dio_end_io;
2647 	bio->bi_private = dio;
2648 
2649 	inc_page_count(F2FS_I_SB(inode),
2650 			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
2651 
2652 	submit_bio(bio);
2653 	return;
2654 out:
2655 	bio->bi_status = BLK_STS_IOERR;
2656 	bio_endio(bio);
2657 }
2658 
2659 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2660 {
2661 	struct address_space *mapping = iocb->ki_filp->f_mapping;
2662 	struct inode *inode = mapping->host;
2663 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2664 	struct f2fs_inode_info *fi = F2FS_I(inode);
2665 	size_t count = iov_iter_count(iter);
2666 	loff_t offset = iocb->ki_pos;
2667 	int rw = iov_iter_rw(iter);
2668 	int err;
2669 	enum rw_hint hint = iocb->ki_hint;
2670 	int whint_mode = F2FS_OPTION(sbi).whint_mode;
2671 	bool do_opu;
2672 
2673 	err = check_direct_IO(inode, iter, offset);
2674 	if (err)
2675 		return err < 0 ? err : 0;
2676 
2677 	if (f2fs_force_buffered_io(inode, iocb, iter))
2678 		return 0;
2679 
2680 	do_opu = allow_outplace_dio(inode, iocb, iter);
2681 
2682 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2683 
2684 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
2685 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
2686 
2687 	if (iocb->ki_flags & IOCB_NOWAIT) {
2688 		if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
2689 			iocb->ki_hint = hint;
2690 			err = -EAGAIN;
2691 			goto out;
2692 		}
2693 		if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
2694 			up_read(&fi->i_gc_rwsem[rw]);
2695 			iocb->ki_hint = hint;
2696 			err = -EAGAIN;
2697 			goto out;
2698 		}
2699 	} else {
2700 		down_read(&fi->i_gc_rwsem[rw]);
2701 		if (do_opu)
2702 			down_read(&fi->i_gc_rwsem[READ]);
2703 	}
2704 
2705 	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
2706 			iter, rw == WRITE ? get_data_block_dio_write :
2707 			get_data_block_dio, NULL, f2fs_dio_submit_bio,
2708 			DIO_LOCKING | DIO_SKIP_HOLES);
2709 
2710 	if (do_opu)
2711 		up_read(&fi->i_gc_rwsem[READ]);
2712 
2713 	up_read(&fi->i_gc_rwsem[rw]);
2714 
2715 	if (rw == WRITE) {
2716 		if (whint_mode == WHINT_MODE_OFF)
2717 			iocb->ki_hint = hint;
2718 		if (err > 0) {
2719 			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
2720 									err);
2721 			if (!do_opu)
2722 				set_inode_flag(inode, FI_UPDATE_WRITE);
2723 		} else if (err < 0) {
2724 			f2fs_write_failed(mapping, offset + count);
2725 		}
2726 	}
2727 
2728 out:
2729 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2730 
2731 	return err;
2732 }
2733 
2734 void f2fs_invalidate_page(struct page *page, unsigned int offset,
2735 							unsigned int length)
2736 {
2737 	struct inode *inode = page->mapping->host;
2738 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2739 
2740 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2741 		(offset % PAGE_SIZE || length != PAGE_SIZE))
2742 		return;
2743 
2744 	if (PageDirty(page)) {
2745 		if (inode->i_ino == F2FS_META_INO(sbi)) {
2746 			dec_page_count(sbi, F2FS_DIRTY_META);
2747 		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2748 			dec_page_count(sbi, F2FS_DIRTY_NODES);
2749 		} else {
2750 			inode_dec_dirty_pages(inode);
2751 			f2fs_remove_dirty_inode(inode);
2752 		}
2753 	}
2754 
2755 	clear_cold_data(page);
2756 
2757 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2758 		return f2fs_drop_inmem_page(inode, page);
2759 
2760 	f2fs_clear_page_private(page);
2761 }
2762 
2763 int f2fs_release_page(struct page *page, gfp_t wait)
2764 {
2765 	/* If this is dirty page, keep PagePrivate */
2766 	if (PageDirty(page))
2767 		return 0;
2768 
2769 	/* This is atomic written page, keep Private */
2770 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2771 		return 0;
2772 
2773 	clear_cold_data(page);
2774 	f2fs_clear_page_private(page);
2775 	return 1;
2776 }
2777 
2778 static int f2fs_set_data_page_dirty(struct page *page)
2779 {
2780 	struct address_space *mapping = page->mapping;
2781 	struct inode *inode = mapping->host;
2782 
2783 	trace_f2fs_set_page_dirty(page, DATA);
2784 
2785 	if (!PageUptodate(page))
2786 		SetPageUptodate(page);
2787 
2788 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
2789 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
2790 			f2fs_register_inmem_page(inode, page);
2791 			return 1;
2792 		}
2793 		/*
2794 		 * Previously, this page has been registered, we just
2795 		 * return here.
2796 		 */
2797 		return 0;
2798 	}
2799 
2800 	if (!PageDirty(page)) {
2801 		__set_page_dirty_nobuffers(page);
2802 		f2fs_update_dirty_page(inode, page);
2803 		return 1;
2804 	}
2805 	return 0;
2806 }
2807 
2808 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
2809 {
2810 	struct inode *inode = mapping->host;
2811 
2812 	if (f2fs_has_inline_data(inode))
2813 		return 0;
2814 
2815 	/* make sure allocating whole blocks */
2816 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2817 		filemap_write_and_wait(mapping);
2818 
2819 	return generic_block_bmap(mapping, block, get_data_block_bmap);
2820 }
2821 
2822 #ifdef CONFIG_MIGRATION
2823 #include <linux/migrate.h>
2824 
2825 int f2fs_migrate_page(struct address_space *mapping,
2826 		struct page *newpage, struct page *page, enum migrate_mode mode)
2827 {
2828 	int rc, extra_count;
2829 	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
2830 	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
2831 
2832 	BUG_ON(PageWriteback(page));
2833 
2834 	/* migrating an atomic written page is safe with the inmem_lock hold */
2835 	if (atomic_written) {
2836 		if (mode != MIGRATE_SYNC)
2837 			return -EBUSY;
2838 		if (!mutex_trylock(&fi->inmem_lock))
2839 			return -EAGAIN;
2840 	}
2841 
2842 	/* one extra reference was held for atomic_write page */
2843 	extra_count = atomic_written ? 1 : 0;
2844 	rc = migrate_page_move_mapping(mapping, newpage,
2845 				page, mode, extra_count);
2846 	if (rc != MIGRATEPAGE_SUCCESS) {
2847 		if (atomic_written)
2848 			mutex_unlock(&fi->inmem_lock);
2849 		return rc;
2850 	}
2851 
2852 	if (atomic_written) {
2853 		struct inmem_pages *cur;
2854 		list_for_each_entry(cur, &fi->inmem_pages, list)
2855 			if (cur->page == page) {
2856 				cur->page = newpage;
2857 				break;
2858 			}
2859 		mutex_unlock(&fi->inmem_lock);
2860 		put_page(page);
2861 		get_page(newpage);
2862 	}
2863 
2864 	if (PagePrivate(page)) {
2865 		f2fs_set_page_private(newpage, page_private(page));
2866 		f2fs_clear_page_private(page);
2867 	}
2868 
2869 	if (mode != MIGRATE_SYNC_NO_COPY)
2870 		migrate_page_copy(newpage, page);
2871 	else
2872 		migrate_page_states(newpage, page);
2873 
2874 	return MIGRATEPAGE_SUCCESS;
2875 }
2876 #endif
2877 
2878 const struct address_space_operations f2fs_dblock_aops = {
2879 	.readpage	= f2fs_read_data_page,
2880 	.readpages	= f2fs_read_data_pages,
2881 	.writepage	= f2fs_write_data_page,
2882 	.writepages	= f2fs_write_data_pages,
2883 	.write_begin	= f2fs_write_begin,
2884 	.write_end	= f2fs_write_end,
2885 	.set_page_dirty	= f2fs_set_data_page_dirty,
2886 	.invalidatepage	= f2fs_invalidate_page,
2887 	.releasepage	= f2fs_release_page,
2888 	.direct_IO	= f2fs_direct_IO,
2889 	.bmap		= f2fs_bmap,
2890 #ifdef CONFIG_MIGRATION
2891 	.migratepage    = f2fs_migrate_page,
2892 #endif
2893 };
2894 
2895 void f2fs_clear_page_cache_dirty_tag(struct page *page)
2896 {
2897 	struct address_space *mapping = page_mapping(page);
2898 	unsigned long flags;
2899 
2900 	xa_lock_irqsave(&mapping->i_pages, flags);
2901 	__xa_clear_mark(&mapping->i_pages, page_index(page),
2902 						PAGECACHE_TAG_DIRTY);
2903 	xa_unlock_irqrestore(&mapping->i_pages, flags);
2904 }
2905 
2906 int __init f2fs_init_post_read_processing(void)
2907 {
2908 	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
2909 	if (!bio_post_read_ctx_cache)
2910 		goto fail;
2911 	bio_post_read_ctx_pool =
2912 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
2913 					 bio_post_read_ctx_cache);
2914 	if (!bio_post_read_ctx_pool)
2915 		goto fail_free_cache;
2916 	return 0;
2917 
2918 fail_free_cache:
2919 	kmem_cache_destroy(bio_post_read_ctx_cache);
2920 fail:
2921 	return -ENOMEM;
2922 }
2923 
2924 void __exit f2fs_destroy_post_read_processing(void)
2925 {
2926 	mempool_destroy(bio_post_read_ctx_pool);
2927 	kmem_cache_destroy(bio_post_read_ctx_cache);
2928 }
2929