xref: /linux/fs/f2fs/data.c (revision 92ce4c3ea7c44e61ca2b6ef3e5682bfcea851d87)
1 /*
2  * fs/f2fs/data.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
22 #include <linux/mm.h>
23 #include <linux/memcontrol.h>
24 #include <linux/cleancache.h>
25 #include <linux/sched/signal.h>
26 
27 #include "f2fs.h"
28 #include "node.h"
29 #include "segment.h"
30 #include "trace.h"
31 #include <trace/events/f2fs.h>
32 
33 static bool __is_cp_guaranteed(struct page *page)
34 {
35 	struct address_space *mapping = page->mapping;
36 	struct inode *inode;
37 	struct f2fs_sb_info *sbi;
38 
39 	if (!mapping)
40 		return false;
41 
42 	inode = mapping->host;
43 	sbi = F2FS_I_SB(inode);
44 
45 	if (inode->i_ino == F2FS_META_INO(sbi) ||
46 			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
47 			S_ISDIR(inode->i_mode) ||
48 			is_cold_data(page))
49 		return true;
50 	return false;
51 }
52 
53 static void f2fs_read_end_io(struct bio *bio)
54 {
55 	struct bio_vec *bvec;
56 	int i;
57 
58 #ifdef CONFIG_F2FS_FAULT_INJECTION
59 	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
60 		f2fs_show_injection_info(FAULT_IO);
61 		bio->bi_status = BLK_STS_IOERR;
62 	}
63 #endif
64 
65 	if (f2fs_bio_encrypted(bio)) {
66 		if (bio->bi_status) {
67 			fscrypt_release_ctx(bio->bi_private);
68 		} else {
69 			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
70 			return;
71 		}
72 	}
73 
74 	bio_for_each_segment_all(bvec, bio, i) {
75 		struct page *page = bvec->bv_page;
76 
77 		if (!bio->bi_status) {
78 			if (!PageUptodate(page))
79 				SetPageUptodate(page);
80 		} else {
81 			ClearPageUptodate(page);
82 			SetPageError(page);
83 		}
84 		unlock_page(page);
85 	}
86 	bio_put(bio);
87 }
88 
89 static void f2fs_write_end_io(struct bio *bio)
90 {
91 	struct f2fs_sb_info *sbi = bio->bi_private;
92 	struct bio_vec *bvec;
93 	int i;
94 
95 	bio_for_each_segment_all(bvec, bio, i) {
96 		struct page *page = bvec->bv_page;
97 		enum count_type type = WB_DATA_TYPE(page);
98 
99 		if (IS_DUMMY_WRITTEN_PAGE(page)) {
100 			set_page_private(page, (unsigned long)NULL);
101 			ClearPagePrivate(page);
102 			unlock_page(page);
103 			mempool_free(page, sbi->write_io_dummy);
104 
105 			if (unlikely(bio->bi_status))
106 				f2fs_stop_checkpoint(sbi, true);
107 			continue;
108 		}
109 
110 		fscrypt_pullback_bio_page(&page, true);
111 
112 		if (unlikely(bio->bi_status)) {
113 			mapping_set_error(page->mapping, -EIO);
114 			f2fs_stop_checkpoint(sbi, true);
115 		}
116 		dec_page_count(sbi, type);
117 		clear_cold_data(page);
118 		end_page_writeback(page);
119 	}
120 	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
121 				wq_has_sleeper(&sbi->cp_wait))
122 		wake_up(&sbi->cp_wait);
123 
124 	bio_put(bio);
125 }
126 
127 /*
128  * Return true, if pre_bio's bdev is same as its target device.
129  */
130 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
131 				block_t blk_addr, struct bio *bio)
132 {
133 	struct block_device *bdev = sbi->sb->s_bdev;
134 	int i;
135 
136 	for (i = 0; i < sbi->s_ndevs; i++) {
137 		if (FDEV(i).start_blk <= blk_addr &&
138 					FDEV(i).end_blk >= blk_addr) {
139 			blk_addr -= FDEV(i).start_blk;
140 			bdev = FDEV(i).bdev;
141 			break;
142 		}
143 	}
144 	if (bio) {
145 		bio_set_dev(bio, bdev);
146 		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
147 	}
148 	return bdev;
149 }
150 
151 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
152 {
153 	int i;
154 
155 	for (i = 0; i < sbi->s_ndevs; i++)
156 		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
157 			return i;
158 	return 0;
159 }
160 
161 static bool __same_bdev(struct f2fs_sb_info *sbi,
162 				block_t blk_addr, struct bio *bio)
163 {
164 	struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
165 	return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
166 }
167 
168 /*
169  * Low-level block read/write IO operations.
170  */
171 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
172 				int npages, bool is_read)
173 {
174 	struct bio *bio;
175 
176 	bio = f2fs_bio_alloc(npages);
177 
178 	f2fs_target_device(sbi, blk_addr, bio);
179 	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
180 	bio->bi_private = is_read ? NULL : sbi;
181 
182 	return bio;
183 }
184 
185 static inline void __submit_bio(struct f2fs_sb_info *sbi,
186 				struct bio *bio, enum page_type type)
187 {
188 	if (!is_read_io(bio_op(bio))) {
189 		unsigned int start;
190 
191 		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
192 			current->plug && (type == DATA || type == NODE))
193 			blk_finish_plug(current->plug);
194 
195 		if (type != DATA && type != NODE)
196 			goto submit_io;
197 
198 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
199 		start %= F2FS_IO_SIZE(sbi);
200 
201 		if (start == 0)
202 			goto submit_io;
203 
204 		/* fill dummy pages */
205 		for (; start < F2FS_IO_SIZE(sbi); start++) {
206 			struct page *page =
207 				mempool_alloc(sbi->write_io_dummy,
208 					GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
209 			f2fs_bug_on(sbi, !page);
210 
211 			SetPagePrivate(page);
212 			set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
213 			lock_page(page);
214 			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
215 				f2fs_bug_on(sbi, 1);
216 		}
217 		/*
218 		 * In the NODE case, we lose next block address chain. So, we
219 		 * need to do checkpoint in f2fs_sync_file.
220 		 */
221 		if (type == NODE)
222 			set_sbi_flag(sbi, SBI_NEED_CP);
223 	}
224 submit_io:
225 	if (is_read_io(bio_op(bio)))
226 		trace_f2fs_submit_read_bio(sbi->sb, type, bio);
227 	else
228 		trace_f2fs_submit_write_bio(sbi->sb, type, bio);
229 	submit_bio(bio);
230 }
231 
232 static void __submit_merged_bio(struct f2fs_bio_info *io)
233 {
234 	struct f2fs_io_info *fio = &io->fio;
235 
236 	if (!io->bio)
237 		return;
238 
239 	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
240 
241 	if (is_read_io(fio->op))
242 		trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
243 	else
244 		trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
245 
246 	__submit_bio(io->sbi, io->bio, fio->type);
247 	io->bio = NULL;
248 }
249 
250 static bool __has_merged_page(struct f2fs_bio_info *io,
251 				struct inode *inode, nid_t ino, pgoff_t idx)
252 {
253 	struct bio_vec *bvec;
254 	struct page *target;
255 	int i;
256 
257 	if (!io->bio)
258 		return false;
259 
260 	if (!inode && !ino)
261 		return true;
262 
263 	bio_for_each_segment_all(bvec, io->bio, i) {
264 
265 		if (bvec->bv_page->mapping)
266 			target = bvec->bv_page;
267 		else
268 			target = fscrypt_control_page(bvec->bv_page);
269 
270 		if (idx != target->index)
271 			continue;
272 
273 		if (inode && inode == target->mapping->host)
274 			return true;
275 		if (ino && ino == ino_of_node(target))
276 			return true;
277 	}
278 
279 	return false;
280 }
281 
282 static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
283 				nid_t ino, pgoff_t idx, enum page_type type)
284 {
285 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
286 	enum temp_type temp;
287 	struct f2fs_bio_info *io;
288 	bool ret = false;
289 
290 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
291 		io = sbi->write_io[btype] + temp;
292 
293 		down_read(&io->io_rwsem);
294 		ret = __has_merged_page(io, inode, ino, idx);
295 		up_read(&io->io_rwsem);
296 
297 		/* TODO: use HOT temp only for meta pages now. */
298 		if (ret || btype == META)
299 			break;
300 	}
301 	return ret;
302 }
303 
304 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
305 				enum page_type type, enum temp_type temp)
306 {
307 	enum page_type btype = PAGE_TYPE_OF_BIO(type);
308 	struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
309 
310 	down_write(&io->io_rwsem);
311 
312 	/* change META to META_FLUSH in the checkpoint procedure */
313 	if (type >= META_FLUSH) {
314 		io->fio.type = META_FLUSH;
315 		io->fio.op = REQ_OP_WRITE;
316 		io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
317 		if (!test_opt(sbi, NOBARRIER))
318 			io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
319 	}
320 	__submit_merged_bio(io);
321 	up_write(&io->io_rwsem);
322 }
323 
324 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
325 				struct inode *inode, nid_t ino, pgoff_t idx,
326 				enum page_type type, bool force)
327 {
328 	enum temp_type temp;
329 
330 	if (!force && !has_merged_page(sbi, inode, ino, idx, type))
331 		return;
332 
333 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
334 
335 		__f2fs_submit_merged_write(sbi, type, temp);
336 
337 		/* TODO: use HOT temp only for meta pages now. */
338 		if (type >= META)
339 			break;
340 	}
341 }
342 
343 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
344 {
345 	__submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
346 }
347 
348 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
349 				struct inode *inode, nid_t ino, pgoff_t idx,
350 				enum page_type type)
351 {
352 	__submit_merged_write_cond(sbi, inode, ino, idx, type, false);
353 }
354 
355 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
356 {
357 	f2fs_submit_merged_write(sbi, DATA);
358 	f2fs_submit_merged_write(sbi, NODE);
359 	f2fs_submit_merged_write(sbi, META);
360 }
361 
362 /*
363  * Fill the locked page with data located in the block address.
364  * A caller needs to unlock the page on failure.
365  */
366 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
367 {
368 	struct bio *bio;
369 	struct page *page = fio->encrypted_page ?
370 			fio->encrypted_page : fio->page;
371 
372 	trace_f2fs_submit_page_bio(page, fio);
373 	f2fs_trace_ios(fio, 0);
374 
375 	/* Allocate a new bio */
376 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
377 
378 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
379 		bio_put(bio);
380 		return -EFAULT;
381 	}
382 	bio_set_op_attrs(bio, fio->op, fio->op_flags);
383 
384 	__submit_bio(fio->sbi, bio, fio->type);
385 
386 	if (!is_read_io(fio->op))
387 		inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
388 	return 0;
389 }
390 
391 int f2fs_submit_page_write(struct f2fs_io_info *fio)
392 {
393 	struct f2fs_sb_info *sbi = fio->sbi;
394 	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
395 	struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
396 	struct page *bio_page;
397 	int err = 0;
398 
399 	f2fs_bug_on(sbi, is_read_io(fio->op));
400 
401 	down_write(&io->io_rwsem);
402 next:
403 	if (fio->in_list) {
404 		spin_lock(&io->io_lock);
405 		if (list_empty(&io->io_list)) {
406 			spin_unlock(&io->io_lock);
407 			goto out_fail;
408 		}
409 		fio = list_first_entry(&io->io_list,
410 						struct f2fs_io_info, list);
411 		list_del(&fio->list);
412 		spin_unlock(&io->io_lock);
413 	}
414 
415 	if (fio->old_blkaddr != NEW_ADDR)
416 		verify_block_addr(sbi, fio->old_blkaddr);
417 	verify_block_addr(sbi, fio->new_blkaddr);
418 
419 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
420 
421 	/* set submitted = 1 as a return value */
422 	fio->submitted = 1;
423 
424 	inc_page_count(sbi, WB_DATA_TYPE(bio_page));
425 
426 	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
427 	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
428 			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
429 		__submit_merged_bio(io);
430 alloc_new:
431 	if (io->bio == NULL) {
432 		if ((fio->type == DATA || fio->type == NODE) &&
433 				fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
434 			err = -EAGAIN;
435 			dec_page_count(sbi, WB_DATA_TYPE(bio_page));
436 			goto out_fail;
437 		}
438 		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
439 						BIO_MAX_PAGES, false);
440 		io->fio = *fio;
441 	}
442 
443 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
444 		__submit_merged_bio(io);
445 		goto alloc_new;
446 	}
447 
448 	io->last_block_in_bio = fio->new_blkaddr;
449 	f2fs_trace_ios(fio, 0);
450 
451 	trace_f2fs_submit_page_write(fio->page, fio);
452 
453 	if (fio->in_list)
454 		goto next;
455 out_fail:
456 	up_write(&io->io_rwsem);
457 	return err;
458 }
459 
460 static void __set_data_blkaddr(struct dnode_of_data *dn)
461 {
462 	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
463 	__le32 *addr_array;
464 
465 	/* Get physical address of data block */
466 	addr_array = blkaddr_in_node(rn);
467 	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
468 }
469 
470 /*
471  * Lock ordering for the change of data block address:
472  * ->data_page
473  *  ->node_page
474  *    update block addresses in the node page
475  */
476 void set_data_blkaddr(struct dnode_of_data *dn)
477 {
478 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
479 	__set_data_blkaddr(dn);
480 	if (set_page_dirty(dn->node_page))
481 		dn->node_changed = true;
482 }
483 
484 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
485 {
486 	dn->data_blkaddr = blkaddr;
487 	set_data_blkaddr(dn);
488 	f2fs_update_extent_cache(dn);
489 }
490 
491 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
492 int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
493 {
494 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
495 	int err;
496 
497 	if (!count)
498 		return 0;
499 
500 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
501 		return -EPERM;
502 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
503 		return err;
504 
505 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
506 						dn->ofs_in_node, count);
507 
508 	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
509 
510 	for (; count > 0; dn->ofs_in_node++) {
511 		block_t blkaddr =
512 			datablock_addr(dn->node_page, dn->ofs_in_node);
513 		if (blkaddr == NULL_ADDR) {
514 			dn->data_blkaddr = NEW_ADDR;
515 			__set_data_blkaddr(dn);
516 			count--;
517 		}
518 	}
519 
520 	if (set_page_dirty(dn->node_page))
521 		dn->node_changed = true;
522 	return 0;
523 }
524 
525 /* Should keep dn->ofs_in_node unchanged */
526 int reserve_new_block(struct dnode_of_data *dn)
527 {
528 	unsigned int ofs_in_node = dn->ofs_in_node;
529 	int ret;
530 
531 	ret = reserve_new_blocks(dn, 1);
532 	dn->ofs_in_node = ofs_in_node;
533 	return ret;
534 }
535 
536 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
537 {
538 	bool need_put = dn->inode_page ? false : true;
539 	int err;
540 
541 	err = get_dnode_of_data(dn, index, ALLOC_NODE);
542 	if (err)
543 		return err;
544 
545 	if (dn->data_blkaddr == NULL_ADDR)
546 		err = reserve_new_block(dn);
547 	if (err || need_put)
548 		f2fs_put_dnode(dn);
549 	return err;
550 }
551 
552 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
553 {
554 	struct extent_info ei  = {0,0,0};
555 	struct inode *inode = dn->inode;
556 
557 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
558 		dn->data_blkaddr = ei.blk + index - ei.fofs;
559 		return 0;
560 	}
561 
562 	return f2fs_reserve_block(dn, index);
563 }
564 
565 struct page *get_read_data_page(struct inode *inode, pgoff_t index,
566 						int op_flags, bool for_write)
567 {
568 	struct address_space *mapping = inode->i_mapping;
569 	struct dnode_of_data dn;
570 	struct page *page;
571 	struct extent_info ei = {0,0,0};
572 	int err;
573 	struct f2fs_io_info fio = {
574 		.sbi = F2FS_I_SB(inode),
575 		.type = DATA,
576 		.op = REQ_OP_READ,
577 		.op_flags = op_flags,
578 		.encrypted_page = NULL,
579 	};
580 
581 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
582 		return read_mapping_page(mapping, index, NULL);
583 
584 	page = f2fs_grab_cache_page(mapping, index, for_write);
585 	if (!page)
586 		return ERR_PTR(-ENOMEM);
587 
588 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
589 		dn.data_blkaddr = ei.blk + index - ei.fofs;
590 		goto got_it;
591 	}
592 
593 	set_new_dnode(&dn, inode, NULL, NULL, 0);
594 	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
595 	if (err)
596 		goto put_err;
597 	f2fs_put_dnode(&dn);
598 
599 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
600 		err = -ENOENT;
601 		goto put_err;
602 	}
603 got_it:
604 	if (PageUptodate(page)) {
605 		unlock_page(page);
606 		return page;
607 	}
608 
609 	/*
610 	 * A new dentry page is allocated but not able to be written, since its
611 	 * new inode page couldn't be allocated due to -ENOSPC.
612 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
613 	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
614 	 */
615 	if (dn.data_blkaddr == NEW_ADDR) {
616 		zero_user_segment(page, 0, PAGE_SIZE);
617 		if (!PageUptodate(page))
618 			SetPageUptodate(page);
619 		unlock_page(page);
620 		return page;
621 	}
622 
623 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
624 	fio.page = page;
625 	err = f2fs_submit_page_bio(&fio);
626 	if (err)
627 		goto put_err;
628 	return page;
629 
630 put_err:
631 	f2fs_put_page(page, 1);
632 	return ERR_PTR(err);
633 }
634 
635 struct page *find_data_page(struct inode *inode, pgoff_t index)
636 {
637 	struct address_space *mapping = inode->i_mapping;
638 	struct page *page;
639 
640 	page = find_get_page(mapping, index);
641 	if (page && PageUptodate(page))
642 		return page;
643 	f2fs_put_page(page, 0);
644 
645 	page = get_read_data_page(inode, index, 0, false);
646 	if (IS_ERR(page))
647 		return page;
648 
649 	if (PageUptodate(page))
650 		return page;
651 
652 	wait_on_page_locked(page);
653 	if (unlikely(!PageUptodate(page))) {
654 		f2fs_put_page(page, 0);
655 		return ERR_PTR(-EIO);
656 	}
657 	return page;
658 }
659 
660 /*
661  * If it tries to access a hole, return an error.
662  * Because, the callers, functions in dir.c and GC, should be able to know
663  * whether this page exists or not.
664  */
665 struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
666 							bool for_write)
667 {
668 	struct address_space *mapping = inode->i_mapping;
669 	struct page *page;
670 repeat:
671 	page = get_read_data_page(inode, index, 0, for_write);
672 	if (IS_ERR(page))
673 		return page;
674 
675 	/* wait for read completion */
676 	lock_page(page);
677 	if (unlikely(page->mapping != mapping)) {
678 		f2fs_put_page(page, 1);
679 		goto repeat;
680 	}
681 	if (unlikely(!PageUptodate(page))) {
682 		f2fs_put_page(page, 1);
683 		return ERR_PTR(-EIO);
684 	}
685 	return page;
686 }
687 
688 /*
689  * Caller ensures that this data page is never allocated.
690  * A new zero-filled data page is allocated in the page cache.
691  *
692  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
693  * f2fs_unlock_op().
694  * Note that, ipage is set only by make_empty_dir, and if any error occur,
695  * ipage should be released by this function.
696  */
697 struct page *get_new_data_page(struct inode *inode,
698 		struct page *ipage, pgoff_t index, bool new_i_size)
699 {
700 	struct address_space *mapping = inode->i_mapping;
701 	struct page *page;
702 	struct dnode_of_data dn;
703 	int err;
704 
705 	page = f2fs_grab_cache_page(mapping, index, true);
706 	if (!page) {
707 		/*
708 		 * before exiting, we should make sure ipage will be released
709 		 * if any error occur.
710 		 */
711 		f2fs_put_page(ipage, 1);
712 		return ERR_PTR(-ENOMEM);
713 	}
714 
715 	set_new_dnode(&dn, inode, ipage, NULL, 0);
716 	err = f2fs_reserve_block(&dn, index);
717 	if (err) {
718 		f2fs_put_page(page, 1);
719 		return ERR_PTR(err);
720 	}
721 	if (!ipage)
722 		f2fs_put_dnode(&dn);
723 
724 	if (PageUptodate(page))
725 		goto got_it;
726 
727 	if (dn.data_blkaddr == NEW_ADDR) {
728 		zero_user_segment(page, 0, PAGE_SIZE);
729 		if (!PageUptodate(page))
730 			SetPageUptodate(page);
731 	} else {
732 		f2fs_put_page(page, 1);
733 
734 		/* if ipage exists, blkaddr should be NEW_ADDR */
735 		f2fs_bug_on(F2FS_I_SB(inode), ipage);
736 		page = get_lock_data_page(inode, index, true);
737 		if (IS_ERR(page))
738 			return page;
739 	}
740 got_it:
741 	if (new_i_size && i_size_read(inode) <
742 				((loff_t)(index + 1) << PAGE_SHIFT))
743 		f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
744 	return page;
745 }
746 
747 static int __allocate_data_block(struct dnode_of_data *dn)
748 {
749 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
750 	struct f2fs_summary sum;
751 	struct node_info ni;
752 	pgoff_t fofs;
753 	blkcnt_t count = 1;
754 	int err;
755 
756 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
757 		return -EPERM;
758 
759 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
760 	if (dn->data_blkaddr == NEW_ADDR)
761 		goto alloc;
762 
763 	if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
764 		return err;
765 
766 alloc:
767 	get_node_info(sbi, dn->nid, &ni);
768 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
769 
770 	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
771 					&sum, CURSEG_WARM_DATA, NULL, false);
772 	set_data_blkaddr(dn);
773 
774 	/* update i_size */
775 	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
776 							dn->ofs_in_node;
777 	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
778 		f2fs_i_size_write(dn->inode,
779 				((loff_t)(fofs + 1) << PAGE_SHIFT));
780 	return 0;
781 }
782 
783 static inline bool __force_buffered_io(struct inode *inode, int rw)
784 {
785 	return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
786 			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
787 			F2FS_I_SB(inode)->s_ndevs);
788 }
789 
790 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
791 {
792 	struct inode *inode = file_inode(iocb->ki_filp);
793 	struct f2fs_map_blocks map;
794 	int err = 0;
795 
796 	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
797 		return 0;
798 
799 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
800 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
801 	if (map.m_len > map.m_lblk)
802 		map.m_len -= map.m_lblk;
803 	else
804 		map.m_len = 0;
805 
806 	map.m_next_pgofs = NULL;
807 
808 	if (iocb->ki_flags & IOCB_DIRECT) {
809 		err = f2fs_convert_inline_inode(inode);
810 		if (err)
811 			return err;
812 		return f2fs_map_blocks(inode, &map, 1,
813 			__force_buffered_io(inode, WRITE) ?
814 				F2FS_GET_BLOCK_PRE_AIO :
815 				F2FS_GET_BLOCK_PRE_DIO);
816 	}
817 	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
818 		err = f2fs_convert_inline_inode(inode);
819 		if (err)
820 			return err;
821 	}
822 	if (!f2fs_has_inline_data(inode))
823 		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
824 	return err;
825 }
826 
827 static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
828 {
829 	if (flag == F2FS_GET_BLOCK_PRE_AIO) {
830 		if (lock)
831 			down_read(&sbi->node_change);
832 		else
833 			up_read(&sbi->node_change);
834 	} else {
835 		if (lock)
836 			f2fs_lock_op(sbi);
837 		else
838 			f2fs_unlock_op(sbi);
839 	}
840 }
841 
842 /*
843  * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
844  * f2fs_map_blocks structure.
845  * If original data blocks are allocated, then give them to blockdev.
846  * Otherwise,
847  *     a. preallocate requested block addresses
848  *     b. do not use extent cache for better performance
849  *     c. give the block addresses to blockdev
850  */
851 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
852 						int create, int flag)
853 {
854 	unsigned int maxblocks = map->m_len;
855 	struct dnode_of_data dn;
856 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
857 	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
858 	pgoff_t pgofs, end_offset, end;
859 	int err = 0, ofs = 1;
860 	unsigned int ofs_in_node, last_ofs_in_node;
861 	blkcnt_t prealloc;
862 	struct extent_info ei = {0,0,0};
863 	block_t blkaddr;
864 
865 	if (!maxblocks)
866 		return 0;
867 
868 	map->m_len = 0;
869 	map->m_flags = 0;
870 
871 	/* it only supports block size == page size */
872 	pgofs =	(pgoff_t)map->m_lblk;
873 	end = pgofs + maxblocks;
874 
875 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
876 		map->m_pblk = ei.blk + pgofs - ei.fofs;
877 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
878 		map->m_flags = F2FS_MAP_MAPPED;
879 		goto out;
880 	}
881 
882 next_dnode:
883 	if (create)
884 		__do_map_lock(sbi, flag, true);
885 
886 	/* When reading holes, we need its node page */
887 	set_new_dnode(&dn, inode, NULL, NULL, 0);
888 	err = get_dnode_of_data(&dn, pgofs, mode);
889 	if (err) {
890 		if (flag == F2FS_GET_BLOCK_BMAP)
891 			map->m_pblk = 0;
892 		if (err == -ENOENT) {
893 			err = 0;
894 			if (map->m_next_pgofs)
895 				*map->m_next_pgofs =
896 					get_next_page_offset(&dn, pgofs);
897 		}
898 		goto unlock_out;
899 	}
900 
901 	prealloc = 0;
902 	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
903 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
904 
905 next_block:
906 	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
907 
908 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
909 		if (create) {
910 			if (unlikely(f2fs_cp_error(sbi))) {
911 				err = -EIO;
912 				goto sync_out;
913 			}
914 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
915 				if (blkaddr == NULL_ADDR) {
916 					prealloc++;
917 					last_ofs_in_node = dn.ofs_in_node;
918 				}
919 			} else {
920 				err = __allocate_data_block(&dn);
921 				if (!err)
922 					set_inode_flag(inode, FI_APPEND_WRITE);
923 			}
924 			if (err)
925 				goto sync_out;
926 			map->m_flags |= F2FS_MAP_NEW;
927 			blkaddr = dn.data_blkaddr;
928 		} else {
929 			if (flag == F2FS_GET_BLOCK_BMAP) {
930 				map->m_pblk = 0;
931 				goto sync_out;
932 			}
933 			if (flag == F2FS_GET_BLOCK_FIEMAP &&
934 						blkaddr == NULL_ADDR) {
935 				if (map->m_next_pgofs)
936 					*map->m_next_pgofs = pgofs + 1;
937 			}
938 			if (flag != F2FS_GET_BLOCK_FIEMAP ||
939 						blkaddr != NEW_ADDR)
940 				goto sync_out;
941 		}
942 	}
943 
944 	if (flag == F2FS_GET_BLOCK_PRE_AIO)
945 		goto skip;
946 
947 	if (map->m_len == 0) {
948 		/* preallocated unwritten block should be mapped for fiemap. */
949 		if (blkaddr == NEW_ADDR)
950 			map->m_flags |= F2FS_MAP_UNWRITTEN;
951 		map->m_flags |= F2FS_MAP_MAPPED;
952 
953 		map->m_pblk = blkaddr;
954 		map->m_len = 1;
955 	} else if ((map->m_pblk != NEW_ADDR &&
956 			blkaddr == (map->m_pblk + ofs)) ||
957 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
958 			flag == F2FS_GET_BLOCK_PRE_DIO) {
959 		ofs++;
960 		map->m_len++;
961 	} else {
962 		goto sync_out;
963 	}
964 
965 skip:
966 	dn.ofs_in_node++;
967 	pgofs++;
968 
969 	/* preallocate blocks in batch for one dnode page */
970 	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
971 			(pgofs == end || dn.ofs_in_node == end_offset)) {
972 
973 		dn.ofs_in_node = ofs_in_node;
974 		err = reserve_new_blocks(&dn, prealloc);
975 		if (err)
976 			goto sync_out;
977 
978 		map->m_len += dn.ofs_in_node - ofs_in_node;
979 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
980 			err = -ENOSPC;
981 			goto sync_out;
982 		}
983 		dn.ofs_in_node = end_offset;
984 	}
985 
986 	if (pgofs >= end)
987 		goto sync_out;
988 	else if (dn.ofs_in_node < end_offset)
989 		goto next_block;
990 
991 	f2fs_put_dnode(&dn);
992 
993 	if (create) {
994 		__do_map_lock(sbi, flag, false);
995 		f2fs_balance_fs(sbi, dn.node_changed);
996 	}
997 	goto next_dnode;
998 
999 sync_out:
1000 	f2fs_put_dnode(&dn);
1001 unlock_out:
1002 	if (create) {
1003 		__do_map_lock(sbi, flag, false);
1004 		f2fs_balance_fs(sbi, dn.node_changed);
1005 	}
1006 out:
1007 	trace_f2fs_map_blocks(inode, map, err);
1008 	return err;
1009 }
1010 
1011 static int __get_data_block(struct inode *inode, sector_t iblock,
1012 			struct buffer_head *bh, int create, int flag,
1013 			pgoff_t *next_pgofs)
1014 {
1015 	struct f2fs_map_blocks map;
1016 	int err;
1017 
1018 	map.m_lblk = iblock;
1019 	map.m_len = bh->b_size >> inode->i_blkbits;
1020 	map.m_next_pgofs = next_pgofs;
1021 
1022 	err = f2fs_map_blocks(inode, &map, create, flag);
1023 	if (!err) {
1024 		map_bh(bh, inode->i_sb, map.m_pblk);
1025 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1026 		bh->b_size = (u64)map.m_len << inode->i_blkbits;
1027 	}
1028 	return err;
1029 }
1030 
1031 static int get_data_block(struct inode *inode, sector_t iblock,
1032 			struct buffer_head *bh_result, int create, int flag,
1033 			pgoff_t *next_pgofs)
1034 {
1035 	return __get_data_block(inode, iblock, bh_result, create,
1036 							flag, next_pgofs);
1037 }
1038 
1039 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1040 			struct buffer_head *bh_result, int create)
1041 {
1042 	return __get_data_block(inode, iblock, bh_result, create,
1043 						F2FS_GET_BLOCK_DIO, NULL);
1044 }
1045 
1046 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1047 			struct buffer_head *bh_result, int create)
1048 {
1049 	/* Block number less than F2FS MAX BLOCKS */
1050 	if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
1051 		return -EFBIG;
1052 
1053 	return __get_data_block(inode, iblock, bh_result, create,
1054 						F2FS_GET_BLOCK_BMAP, NULL);
1055 }
1056 
1057 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1058 {
1059 	return (offset >> inode->i_blkbits);
1060 }
1061 
1062 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1063 {
1064 	return (blk << inode->i_blkbits);
1065 }
1066 
1067 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1068 		u64 start, u64 len)
1069 {
1070 	struct buffer_head map_bh;
1071 	sector_t start_blk, last_blk;
1072 	pgoff_t next_pgofs;
1073 	u64 logical = 0, phys = 0, size = 0;
1074 	u32 flags = 0;
1075 	int ret = 0;
1076 
1077 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1078 	if (ret)
1079 		return ret;
1080 
1081 	if (f2fs_has_inline_data(inode)) {
1082 		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1083 		if (ret != -EAGAIN)
1084 			return ret;
1085 	}
1086 
1087 	inode_lock(inode);
1088 
1089 	if (logical_to_blk(inode, len) == 0)
1090 		len = blk_to_logical(inode, 1);
1091 
1092 	start_blk = logical_to_blk(inode, start);
1093 	last_blk = logical_to_blk(inode, start + len - 1);
1094 
1095 next:
1096 	memset(&map_bh, 0, sizeof(struct buffer_head));
1097 	map_bh.b_size = len;
1098 
1099 	ret = get_data_block(inode, start_blk, &map_bh, 0,
1100 					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1101 	if (ret)
1102 		goto out;
1103 
1104 	/* HOLE */
1105 	if (!buffer_mapped(&map_bh)) {
1106 		start_blk = next_pgofs;
1107 
1108 		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1109 					F2FS_I_SB(inode)->max_file_blocks))
1110 			goto prep_next;
1111 
1112 		flags |= FIEMAP_EXTENT_LAST;
1113 	}
1114 
1115 	if (size) {
1116 		if (f2fs_encrypted_inode(inode))
1117 			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1118 
1119 		ret = fiemap_fill_next_extent(fieinfo, logical,
1120 				phys, size, flags);
1121 	}
1122 
1123 	if (start_blk > last_blk || ret)
1124 		goto out;
1125 
1126 	logical = blk_to_logical(inode, start_blk);
1127 	phys = blk_to_logical(inode, map_bh.b_blocknr);
1128 	size = map_bh.b_size;
1129 	flags = 0;
1130 	if (buffer_unwritten(&map_bh))
1131 		flags = FIEMAP_EXTENT_UNWRITTEN;
1132 
1133 	start_blk += logical_to_blk(inode, size);
1134 
1135 prep_next:
1136 	cond_resched();
1137 	if (fatal_signal_pending(current))
1138 		ret = -EINTR;
1139 	else
1140 		goto next;
1141 out:
1142 	if (ret == 1)
1143 		ret = 0;
1144 
1145 	inode_unlock(inode);
1146 	return ret;
1147 }
1148 
1149 static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
1150 				 unsigned nr_pages)
1151 {
1152 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1153 	struct fscrypt_ctx *ctx = NULL;
1154 	struct bio *bio;
1155 
1156 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1157 		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
1158 		if (IS_ERR(ctx))
1159 			return ERR_CAST(ctx);
1160 
1161 		/* wait the page to be moved by cleaning */
1162 		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1163 	}
1164 
1165 	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
1166 	if (!bio) {
1167 		if (ctx)
1168 			fscrypt_release_ctx(ctx);
1169 		return ERR_PTR(-ENOMEM);
1170 	}
1171 	f2fs_target_device(sbi, blkaddr, bio);
1172 	bio->bi_end_io = f2fs_read_end_io;
1173 	bio->bi_private = ctx;
1174 
1175 	return bio;
1176 }
1177 
1178 /*
1179  * This function was originally taken from fs/mpage.c, and customized for f2fs.
1180  * Major change was from block_size == page_size in f2fs by default.
1181  */
1182 static int f2fs_mpage_readpages(struct address_space *mapping,
1183 			struct list_head *pages, struct page *page,
1184 			unsigned nr_pages)
1185 {
1186 	struct bio *bio = NULL;
1187 	unsigned page_idx;
1188 	sector_t last_block_in_bio = 0;
1189 	struct inode *inode = mapping->host;
1190 	const unsigned blkbits = inode->i_blkbits;
1191 	const unsigned blocksize = 1 << blkbits;
1192 	sector_t block_in_file;
1193 	sector_t last_block;
1194 	sector_t last_block_in_file;
1195 	sector_t block_nr;
1196 	struct f2fs_map_blocks map;
1197 
1198 	map.m_pblk = 0;
1199 	map.m_lblk = 0;
1200 	map.m_len = 0;
1201 	map.m_flags = 0;
1202 	map.m_next_pgofs = NULL;
1203 
1204 	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
1205 
1206 		if (pages) {
1207 			page = list_last_entry(pages, struct page, lru);
1208 
1209 			prefetchw(&page->flags);
1210 			list_del(&page->lru);
1211 			if (add_to_page_cache_lru(page, mapping,
1212 						  page->index,
1213 						  readahead_gfp_mask(mapping)))
1214 				goto next_page;
1215 		}
1216 
1217 		block_in_file = (sector_t)page->index;
1218 		last_block = block_in_file + nr_pages;
1219 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1220 								blkbits;
1221 		if (last_block > last_block_in_file)
1222 			last_block = last_block_in_file;
1223 
1224 		/*
1225 		 * Map blocks using the previous result first.
1226 		 */
1227 		if ((map.m_flags & F2FS_MAP_MAPPED) &&
1228 				block_in_file > map.m_lblk &&
1229 				block_in_file < (map.m_lblk + map.m_len))
1230 			goto got_it;
1231 
1232 		/*
1233 		 * Then do more f2fs_map_blocks() calls until we are
1234 		 * done with this page.
1235 		 */
1236 		map.m_flags = 0;
1237 
1238 		if (block_in_file < last_block) {
1239 			map.m_lblk = block_in_file;
1240 			map.m_len = last_block - block_in_file;
1241 
1242 			if (f2fs_map_blocks(inode, &map, 0,
1243 						F2FS_GET_BLOCK_READ))
1244 				goto set_error_page;
1245 		}
1246 got_it:
1247 		if ((map.m_flags & F2FS_MAP_MAPPED)) {
1248 			block_nr = map.m_pblk + block_in_file - map.m_lblk;
1249 			SetPageMappedToDisk(page);
1250 
1251 			if (!PageUptodate(page) && !cleancache_get_page(page)) {
1252 				SetPageUptodate(page);
1253 				goto confused;
1254 			}
1255 		} else {
1256 			zero_user_segment(page, 0, PAGE_SIZE);
1257 			if (!PageUptodate(page))
1258 				SetPageUptodate(page);
1259 			unlock_page(page);
1260 			goto next_page;
1261 		}
1262 
1263 		/*
1264 		 * This page will go to BIO.  Do we need to send this
1265 		 * BIO off first?
1266 		 */
1267 		if (bio && (last_block_in_bio != block_nr - 1 ||
1268 			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
1269 submit_and_realloc:
1270 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1271 			bio = NULL;
1272 		}
1273 		if (bio == NULL) {
1274 			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
1275 			if (IS_ERR(bio)) {
1276 				bio = NULL;
1277 				goto set_error_page;
1278 			}
1279 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
1280 		}
1281 
1282 		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1283 			goto submit_and_realloc;
1284 
1285 		last_block_in_bio = block_nr;
1286 		goto next_page;
1287 set_error_page:
1288 		SetPageError(page);
1289 		zero_user_segment(page, 0, PAGE_SIZE);
1290 		unlock_page(page);
1291 		goto next_page;
1292 confused:
1293 		if (bio) {
1294 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
1295 			bio = NULL;
1296 		}
1297 		unlock_page(page);
1298 next_page:
1299 		if (pages)
1300 			put_page(page);
1301 	}
1302 	BUG_ON(pages && !list_empty(pages));
1303 	if (bio)
1304 		__submit_bio(F2FS_I_SB(inode), bio, DATA);
1305 	return 0;
1306 }
1307 
1308 static int f2fs_read_data_page(struct file *file, struct page *page)
1309 {
1310 	struct inode *inode = page->mapping->host;
1311 	int ret = -EAGAIN;
1312 
1313 	trace_f2fs_readpage(page, DATA);
1314 
1315 	/* If the file has inline data, try to read it directly */
1316 	if (f2fs_has_inline_data(inode))
1317 		ret = f2fs_read_inline_data(inode, page);
1318 	if (ret == -EAGAIN)
1319 		ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
1320 	return ret;
1321 }
1322 
1323 static int f2fs_read_data_pages(struct file *file,
1324 			struct address_space *mapping,
1325 			struct list_head *pages, unsigned nr_pages)
1326 {
1327 	struct inode *inode = file->f_mapping->host;
1328 	struct page *page = list_last_entry(pages, struct page, lru);
1329 
1330 	trace_f2fs_readpages(inode, page, nr_pages);
1331 
1332 	/* If the file has inline data, skip readpages */
1333 	if (f2fs_has_inline_data(inode))
1334 		return 0;
1335 
1336 	return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
1337 }
1338 
1339 static int encrypt_one_page(struct f2fs_io_info *fio)
1340 {
1341 	struct inode *inode = fio->page->mapping->host;
1342 	gfp_t gfp_flags = GFP_NOFS;
1343 
1344 	if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
1345 		return 0;
1346 
1347 	/* wait for GCed encrypted page writeback */
1348 	f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);
1349 
1350 retry_encrypt:
1351 	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1352 			PAGE_SIZE, 0, fio->page->index, gfp_flags);
1353 	if (!IS_ERR(fio->encrypted_page))
1354 		return 0;
1355 
1356 	/* flush pending IOs and wait for a while in the ENOMEM case */
1357 	if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
1358 		f2fs_flush_merged_writes(fio->sbi);
1359 		congestion_wait(BLK_RW_ASYNC, HZ/50);
1360 		gfp_flags |= __GFP_NOFAIL;
1361 		goto retry_encrypt;
1362 	}
1363 	return PTR_ERR(fio->encrypted_page);
1364 }
1365 
1366 static inline bool need_inplace_update(struct f2fs_io_info *fio)
1367 {
1368 	struct inode *inode = fio->page->mapping->host;
1369 
1370 	if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
1371 		return false;
1372 	if (is_cold_data(fio->page))
1373 		return false;
1374 	if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
1375 		return false;
1376 
1377 	return need_inplace_update_policy(inode, fio);
1378 }
1379 
1380 static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
1381 {
1382 	if (fio->old_blkaddr == NEW_ADDR)
1383 		return false;
1384 	if (fio->old_blkaddr == NULL_ADDR)
1385 		return false;
1386 	return true;
1387 }
1388 
1389 int do_write_data_page(struct f2fs_io_info *fio)
1390 {
1391 	struct page *page = fio->page;
1392 	struct inode *inode = page->mapping->host;
1393 	struct dnode_of_data dn;
1394 	struct extent_info ei = {0,0,0};
1395 	bool ipu_force = false;
1396 	int err = 0;
1397 
1398 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1399 	if (need_inplace_update(fio) &&
1400 			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
1401 		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
1402 
1403 		if (valid_ipu_blkaddr(fio)) {
1404 			ipu_force = true;
1405 			fio->need_lock = LOCK_DONE;
1406 			goto got_it;
1407 		}
1408 	}
1409 
1410 	/* Deadlock due to between page->lock and f2fs_lock_op */
1411 	if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
1412 		return -EAGAIN;
1413 
1414 	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
1415 	if (err)
1416 		goto out;
1417 
1418 	fio->old_blkaddr = dn.data_blkaddr;
1419 
1420 	/* This page is already truncated */
1421 	if (fio->old_blkaddr == NULL_ADDR) {
1422 		ClearPageUptodate(page);
1423 		goto out_writepage;
1424 	}
1425 got_it:
1426 	/*
1427 	 * If current allocation needs SSR,
1428 	 * it had better in-place writes for updated data.
1429 	 */
1430 	if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
1431 		err = encrypt_one_page(fio);
1432 		if (err)
1433 			goto out_writepage;
1434 
1435 		set_page_writeback(page);
1436 		f2fs_put_dnode(&dn);
1437 		if (fio->need_lock == LOCK_REQ)
1438 			f2fs_unlock_op(fio->sbi);
1439 		err = rewrite_data_page(fio);
1440 		trace_f2fs_do_write_data_page(fio->page, IPU);
1441 		set_inode_flag(inode, FI_UPDATE_WRITE);
1442 		return err;
1443 	}
1444 
1445 	if (fio->need_lock == LOCK_RETRY) {
1446 		if (!f2fs_trylock_op(fio->sbi)) {
1447 			err = -EAGAIN;
1448 			goto out_writepage;
1449 		}
1450 		fio->need_lock = LOCK_REQ;
1451 	}
1452 
1453 	err = encrypt_one_page(fio);
1454 	if (err)
1455 		goto out_writepage;
1456 
1457 	set_page_writeback(page);
1458 
1459 	/* LFS mode write path */
1460 	write_data_page(&dn, fio);
1461 	trace_f2fs_do_write_data_page(page, OPU);
1462 	set_inode_flag(inode, FI_APPEND_WRITE);
1463 	if (page->index == 0)
1464 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1465 out_writepage:
1466 	f2fs_put_dnode(&dn);
1467 out:
1468 	if (fio->need_lock == LOCK_REQ)
1469 		f2fs_unlock_op(fio->sbi);
1470 	return err;
1471 }
1472 
1473 static int __write_data_page(struct page *page, bool *submitted,
1474 				struct writeback_control *wbc)
1475 {
1476 	struct inode *inode = page->mapping->host;
1477 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1478 	loff_t i_size = i_size_read(inode);
1479 	const pgoff_t end_index = ((unsigned long long) i_size)
1480 							>> PAGE_SHIFT;
1481 	loff_t psize = (page->index + 1) << PAGE_SHIFT;
1482 	unsigned offset = 0;
1483 	bool need_balance_fs = false;
1484 	int err = 0;
1485 	struct f2fs_io_info fio = {
1486 		.sbi = sbi,
1487 		.type = DATA,
1488 		.op = REQ_OP_WRITE,
1489 		.op_flags = wbc_to_write_flags(wbc),
1490 		.old_blkaddr = NULL_ADDR,
1491 		.page = page,
1492 		.encrypted_page = NULL,
1493 		.submitted = false,
1494 		.need_lock = LOCK_RETRY,
1495 	};
1496 
1497 	trace_f2fs_writepage(page, DATA);
1498 
1499 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1500 		goto redirty_out;
1501 
1502 	if (page->index < end_index)
1503 		goto write;
1504 
1505 	/*
1506 	 * If the offset is out-of-range of file size,
1507 	 * this page does not have to be written to disk.
1508 	 */
1509 	offset = i_size & (PAGE_SIZE - 1);
1510 	if ((page->index >= end_index + 1) || !offset)
1511 		goto out;
1512 
1513 	zero_user_segment(page, offset, PAGE_SIZE);
1514 write:
1515 	if (f2fs_is_drop_cache(inode))
1516 		goto out;
1517 	/* we should not write 0'th page having journal header */
1518 	if (f2fs_is_volatile_file(inode) && (!page->index ||
1519 			(!wbc->for_reclaim &&
1520 			available_free_memory(sbi, BASE_CHECK))))
1521 		goto redirty_out;
1522 
1523 	/* we should bypass data pages to proceed the kworkder jobs */
1524 	if (unlikely(f2fs_cp_error(sbi))) {
1525 		mapping_set_error(page->mapping, -EIO);
1526 		goto out;
1527 	}
1528 
1529 	/* Dentry blocks are controlled by checkpoint */
1530 	if (S_ISDIR(inode->i_mode)) {
1531 		fio.need_lock = LOCK_DONE;
1532 		err = do_write_data_page(&fio);
1533 		goto done;
1534 	}
1535 
1536 	if (!wbc->for_reclaim)
1537 		need_balance_fs = true;
1538 	else if (has_not_enough_free_secs(sbi, 0, 0))
1539 		goto redirty_out;
1540 	else
1541 		set_inode_flag(inode, FI_HOT_DATA);
1542 
1543 	err = -EAGAIN;
1544 	if (f2fs_has_inline_data(inode)) {
1545 		err = f2fs_write_inline_data(inode, page);
1546 		if (!err)
1547 			goto out;
1548 	}
1549 
1550 	if (err == -EAGAIN) {
1551 		err = do_write_data_page(&fio);
1552 		if (err == -EAGAIN) {
1553 			fio.need_lock = LOCK_REQ;
1554 			err = do_write_data_page(&fio);
1555 		}
1556 	}
1557 	if (F2FS_I(inode)->last_disk_size < psize)
1558 		F2FS_I(inode)->last_disk_size = psize;
1559 
1560 done:
1561 	if (err && err != -ENOENT)
1562 		goto redirty_out;
1563 
1564 out:
1565 	inode_dec_dirty_pages(inode);
1566 	if (err)
1567 		ClearPageUptodate(page);
1568 
1569 	if (wbc->for_reclaim) {
1570 		f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
1571 		clear_inode_flag(inode, FI_HOT_DATA);
1572 		remove_dirty_inode(inode);
1573 		submitted = NULL;
1574 	}
1575 
1576 	unlock_page(page);
1577 	if (!S_ISDIR(inode->i_mode))
1578 		f2fs_balance_fs(sbi, need_balance_fs);
1579 
1580 	if (unlikely(f2fs_cp_error(sbi))) {
1581 		f2fs_submit_merged_write(sbi, DATA);
1582 		submitted = NULL;
1583 	}
1584 
1585 	if (submitted)
1586 		*submitted = fio.submitted;
1587 
1588 	return 0;
1589 
1590 redirty_out:
1591 	redirty_page_for_writepage(wbc, page);
1592 	if (!err)
1593 		return AOP_WRITEPAGE_ACTIVATE;
1594 	unlock_page(page);
1595 	return err;
1596 }
1597 
1598 static int f2fs_write_data_page(struct page *page,
1599 					struct writeback_control *wbc)
1600 {
1601 	return __write_data_page(page, NULL, wbc);
1602 }
1603 
1604 /*
1605  * This function was copied from write_cche_pages from mm/page-writeback.c.
1606  * The major change is making write step of cold data page separately from
1607  * warm/hot data page.
1608  */
1609 static int f2fs_write_cache_pages(struct address_space *mapping,
1610 					struct writeback_control *wbc)
1611 {
1612 	int ret = 0;
1613 	int done = 0;
1614 	struct pagevec pvec;
1615 	int nr_pages;
1616 	pgoff_t uninitialized_var(writeback_index);
1617 	pgoff_t index;
1618 	pgoff_t end;		/* Inclusive */
1619 	pgoff_t done_index;
1620 	pgoff_t last_idx = ULONG_MAX;
1621 	int cycled;
1622 	int range_whole = 0;
1623 	int tag;
1624 
1625 	pagevec_init(&pvec, 0);
1626 
1627 	if (get_dirty_pages(mapping->host) <=
1628 				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
1629 		set_inode_flag(mapping->host, FI_HOT_DATA);
1630 	else
1631 		clear_inode_flag(mapping->host, FI_HOT_DATA);
1632 
1633 	if (wbc->range_cyclic) {
1634 		writeback_index = mapping->writeback_index; /* prev offset */
1635 		index = writeback_index;
1636 		if (index == 0)
1637 			cycled = 1;
1638 		else
1639 			cycled = 0;
1640 		end = -1;
1641 	} else {
1642 		index = wbc->range_start >> PAGE_SHIFT;
1643 		end = wbc->range_end >> PAGE_SHIFT;
1644 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1645 			range_whole = 1;
1646 		cycled = 1; /* ignore range_cyclic tests */
1647 	}
1648 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1649 		tag = PAGECACHE_TAG_TOWRITE;
1650 	else
1651 		tag = PAGECACHE_TAG_DIRTY;
1652 retry:
1653 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1654 		tag_pages_for_writeback(mapping, index, end);
1655 	done_index = index;
1656 	while (!done && (index <= end)) {
1657 		int i;
1658 
1659 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1660 			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1661 		if (nr_pages == 0)
1662 			break;
1663 
1664 		for (i = 0; i < nr_pages; i++) {
1665 			struct page *page = pvec.pages[i];
1666 			bool submitted = false;
1667 
1668 			if (page->index > end) {
1669 				done = 1;
1670 				break;
1671 			}
1672 
1673 			done_index = page->index;
1674 retry_write:
1675 			lock_page(page);
1676 
1677 			if (unlikely(page->mapping != mapping)) {
1678 continue_unlock:
1679 				unlock_page(page);
1680 				continue;
1681 			}
1682 
1683 			if (!PageDirty(page)) {
1684 				/* someone wrote it for us */
1685 				goto continue_unlock;
1686 			}
1687 
1688 			if (PageWriteback(page)) {
1689 				if (wbc->sync_mode != WB_SYNC_NONE)
1690 					f2fs_wait_on_page_writeback(page,
1691 								DATA, true);
1692 				else
1693 					goto continue_unlock;
1694 			}
1695 
1696 			BUG_ON(PageWriteback(page));
1697 			if (!clear_page_dirty_for_io(page))
1698 				goto continue_unlock;
1699 
1700 			ret = __write_data_page(page, &submitted, wbc);
1701 			if (unlikely(ret)) {
1702 				/*
1703 				 * keep nr_to_write, since vfs uses this to
1704 				 * get # of written pages.
1705 				 */
1706 				if (ret == AOP_WRITEPAGE_ACTIVATE) {
1707 					unlock_page(page);
1708 					ret = 0;
1709 					continue;
1710 				} else if (ret == -EAGAIN) {
1711 					ret = 0;
1712 					if (wbc->sync_mode == WB_SYNC_ALL) {
1713 						cond_resched();
1714 						congestion_wait(BLK_RW_ASYNC,
1715 									HZ/50);
1716 						goto retry_write;
1717 					}
1718 					continue;
1719 				}
1720 				done_index = page->index + 1;
1721 				done = 1;
1722 				break;
1723 			} else if (submitted) {
1724 				last_idx = page->index;
1725 			}
1726 
1727 			/* give a priority to WB_SYNC threads */
1728 			if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
1729 					--wbc->nr_to_write <= 0) &&
1730 					wbc->sync_mode == WB_SYNC_NONE) {
1731 				done = 1;
1732 				break;
1733 			}
1734 		}
1735 		pagevec_release(&pvec);
1736 		cond_resched();
1737 	}
1738 
1739 	if (!cycled && !done) {
1740 		cycled = 1;
1741 		index = 0;
1742 		end = writeback_index - 1;
1743 		goto retry;
1744 	}
1745 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1746 		mapping->writeback_index = done_index;
1747 
1748 	if (last_idx != ULONG_MAX)
1749 		f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
1750 						0, last_idx, DATA);
1751 
1752 	return ret;
1753 }
1754 
1755 static int f2fs_write_data_pages(struct address_space *mapping,
1756 			    struct writeback_control *wbc)
1757 {
1758 	struct inode *inode = mapping->host;
1759 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1760 	struct blk_plug plug;
1761 	int ret;
1762 
1763 	/* deal with chardevs and other special file */
1764 	if (!mapping->a_ops->writepage)
1765 		return 0;
1766 
1767 	/* skip writing if there is no dirty page in this inode */
1768 	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1769 		return 0;
1770 
1771 	/* during POR, we don't need to trigger writepage at all. */
1772 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1773 		goto skip_write;
1774 
1775 	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1776 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1777 			available_free_memory(sbi, DIRTY_DENTS))
1778 		goto skip_write;
1779 
1780 	/* skip writing during file defragment */
1781 	if (is_inode_flag_set(inode, FI_DO_DEFRAG))
1782 		goto skip_write;
1783 
1784 	trace_f2fs_writepages(mapping->host, wbc, DATA);
1785 
1786 	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
1787 	if (wbc->sync_mode == WB_SYNC_ALL)
1788 		atomic_inc(&sbi->wb_sync_req);
1789 	else if (atomic_read(&sbi->wb_sync_req))
1790 		goto skip_write;
1791 
1792 	blk_start_plug(&plug);
1793 	ret = f2fs_write_cache_pages(mapping, wbc);
1794 	blk_finish_plug(&plug);
1795 
1796 	if (wbc->sync_mode == WB_SYNC_ALL)
1797 		atomic_dec(&sbi->wb_sync_req);
1798 	/*
1799 	 * if some pages were truncated, we cannot guarantee its mapping->host
1800 	 * to detect pending bios.
1801 	 */
1802 
1803 	remove_dirty_inode(inode);
1804 	return ret;
1805 
1806 skip_write:
1807 	wbc->pages_skipped += get_dirty_pages(inode);
1808 	trace_f2fs_writepages(mapping->host, wbc, DATA);
1809 	return 0;
1810 }
1811 
1812 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1813 {
1814 	struct inode *inode = mapping->host;
1815 	loff_t i_size = i_size_read(inode);
1816 
1817 	if (to > i_size) {
1818 		down_write(&F2FS_I(inode)->i_mmap_sem);
1819 		truncate_pagecache(inode, i_size);
1820 		truncate_blocks(inode, i_size, true);
1821 		up_write(&F2FS_I(inode)->i_mmap_sem);
1822 	}
1823 }
1824 
1825 static int prepare_write_begin(struct f2fs_sb_info *sbi,
1826 			struct page *page, loff_t pos, unsigned len,
1827 			block_t *blk_addr, bool *node_changed)
1828 {
1829 	struct inode *inode = page->mapping->host;
1830 	pgoff_t index = page->index;
1831 	struct dnode_of_data dn;
1832 	struct page *ipage;
1833 	bool locked = false;
1834 	struct extent_info ei = {0,0,0};
1835 	int err = 0;
1836 
1837 	/*
1838 	 * we already allocated all the blocks, so we don't need to get
1839 	 * the block addresses when there is no need to fill the page.
1840 	 */
1841 	if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
1842 			!is_inode_flag_set(inode, FI_NO_PREALLOC))
1843 		return 0;
1844 
1845 	if (f2fs_has_inline_data(inode) ||
1846 			(pos & PAGE_MASK) >= i_size_read(inode)) {
1847 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
1848 		locked = true;
1849 	}
1850 restart:
1851 	/* check inline_data */
1852 	ipage = get_node_page(sbi, inode->i_ino);
1853 	if (IS_ERR(ipage)) {
1854 		err = PTR_ERR(ipage);
1855 		goto unlock_out;
1856 	}
1857 
1858 	set_new_dnode(&dn, inode, ipage, ipage, 0);
1859 
1860 	if (f2fs_has_inline_data(inode)) {
1861 		if (pos + len <= MAX_INLINE_DATA) {
1862 			read_inline_data(page, ipage);
1863 			set_inode_flag(inode, FI_DATA_EXIST);
1864 			if (inode->i_nlink)
1865 				set_inline_node(ipage);
1866 		} else {
1867 			err = f2fs_convert_inline_page(&dn, page);
1868 			if (err)
1869 				goto out;
1870 			if (dn.data_blkaddr == NULL_ADDR)
1871 				err = f2fs_get_block(&dn, index);
1872 		}
1873 	} else if (locked) {
1874 		err = f2fs_get_block(&dn, index);
1875 	} else {
1876 		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1877 			dn.data_blkaddr = ei.blk + index - ei.fofs;
1878 		} else {
1879 			/* hole case */
1880 			err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
1881 			if (err || dn.data_blkaddr == NULL_ADDR) {
1882 				f2fs_put_dnode(&dn);
1883 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
1884 								true);
1885 				locked = true;
1886 				goto restart;
1887 			}
1888 		}
1889 	}
1890 
1891 	/* convert_inline_page can make node_changed */
1892 	*blk_addr = dn.data_blkaddr;
1893 	*node_changed = dn.node_changed;
1894 out:
1895 	f2fs_put_dnode(&dn);
1896 unlock_out:
1897 	if (locked)
1898 		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
1899 	return err;
1900 }
1901 
1902 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1903 		loff_t pos, unsigned len, unsigned flags,
1904 		struct page **pagep, void **fsdata)
1905 {
1906 	struct inode *inode = mapping->host;
1907 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1908 	struct page *page = NULL;
1909 	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
1910 	bool need_balance = false;
1911 	block_t blkaddr = NULL_ADDR;
1912 	int err = 0;
1913 
1914 	trace_f2fs_write_begin(inode, pos, len, flags);
1915 
1916 	/*
1917 	 * We should check this at this moment to avoid deadlock on inode page
1918 	 * and #0 page. The locking rule for inline_data conversion should be:
1919 	 * lock_page(page #0) -> lock_page(inode_page)
1920 	 */
1921 	if (index != 0) {
1922 		err = f2fs_convert_inline_inode(inode);
1923 		if (err)
1924 			goto fail;
1925 	}
1926 repeat:
1927 	/*
1928 	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
1929 	 * wait_for_stable_page. Will wait that below with our IO control.
1930 	 */
1931 	page = pagecache_get_page(mapping, index,
1932 				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
1933 	if (!page) {
1934 		err = -ENOMEM;
1935 		goto fail;
1936 	}
1937 
1938 	*pagep = page;
1939 
1940 	err = prepare_write_begin(sbi, page, pos, len,
1941 					&blkaddr, &need_balance);
1942 	if (err)
1943 		goto fail;
1944 
1945 	if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
1946 		unlock_page(page);
1947 		f2fs_balance_fs(sbi, true);
1948 		lock_page(page);
1949 		if (page->mapping != mapping) {
1950 			/* The page got truncated from under us */
1951 			f2fs_put_page(page, 1);
1952 			goto repeat;
1953 		}
1954 	}
1955 
1956 	f2fs_wait_on_page_writeback(page, DATA, false);
1957 
1958 	/* wait for GCed encrypted page writeback */
1959 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1960 		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
1961 
1962 	if (len == PAGE_SIZE || PageUptodate(page))
1963 		return 0;
1964 
1965 	if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
1966 		zero_user_segment(page, len, PAGE_SIZE);
1967 		return 0;
1968 	}
1969 
1970 	if (blkaddr == NEW_ADDR) {
1971 		zero_user_segment(page, 0, PAGE_SIZE);
1972 		SetPageUptodate(page);
1973 	} else {
1974 		struct bio *bio;
1975 
1976 		bio = f2fs_grab_bio(inode, blkaddr, 1);
1977 		if (IS_ERR(bio)) {
1978 			err = PTR_ERR(bio);
1979 			goto fail;
1980 		}
1981 		bio->bi_opf = REQ_OP_READ;
1982 		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1983 			bio_put(bio);
1984 			err = -EFAULT;
1985 			goto fail;
1986 		}
1987 
1988 		__submit_bio(sbi, bio, DATA);
1989 
1990 		lock_page(page);
1991 		if (unlikely(page->mapping != mapping)) {
1992 			f2fs_put_page(page, 1);
1993 			goto repeat;
1994 		}
1995 		if (unlikely(!PageUptodate(page))) {
1996 			err = -EIO;
1997 			goto fail;
1998 		}
1999 	}
2000 	return 0;
2001 
2002 fail:
2003 	f2fs_put_page(page, 1);
2004 	f2fs_write_failed(mapping, pos + len);
2005 	return err;
2006 }
2007 
2008 static int f2fs_write_end(struct file *file,
2009 			struct address_space *mapping,
2010 			loff_t pos, unsigned len, unsigned copied,
2011 			struct page *page, void *fsdata)
2012 {
2013 	struct inode *inode = page->mapping->host;
2014 
2015 	trace_f2fs_write_end(inode, pos, len, copied);
2016 
2017 	/*
2018 	 * This should be come from len == PAGE_SIZE, and we expect copied
2019 	 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2020 	 * let generic_perform_write() try to copy data again through copied=0.
2021 	 */
2022 	if (!PageUptodate(page)) {
2023 		if (unlikely(copied != len))
2024 			copied = 0;
2025 		else
2026 			SetPageUptodate(page);
2027 	}
2028 	if (!copied)
2029 		goto unlock_out;
2030 
2031 	set_page_dirty(page);
2032 
2033 	if (pos + copied > i_size_read(inode))
2034 		f2fs_i_size_write(inode, pos + copied);
2035 unlock_out:
2036 	f2fs_put_page(page, 1);
2037 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2038 	return copied;
2039 }
2040 
2041 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
2042 			   loff_t offset)
2043 {
2044 	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
2045 
2046 	if (offset & blocksize_mask)
2047 		return -EINVAL;
2048 
2049 	if (iov_iter_alignment(iter) & blocksize_mask)
2050 		return -EINVAL;
2051 
2052 	return 0;
2053 }
2054 
2055 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2056 {
2057 	struct address_space *mapping = iocb->ki_filp->f_mapping;
2058 	struct inode *inode = mapping->host;
2059 	size_t count = iov_iter_count(iter);
2060 	loff_t offset = iocb->ki_pos;
2061 	int rw = iov_iter_rw(iter);
2062 	int err;
2063 
2064 	err = check_direct_IO(inode, iter, offset);
2065 	if (err)
2066 		return err;
2067 
2068 	if (__force_buffered_io(inode, rw))
2069 		return 0;
2070 
2071 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
2072 
2073 	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
2074 	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
2075 	up_read(&F2FS_I(inode)->dio_rwsem[rw]);
2076 
2077 	if (rw == WRITE) {
2078 		if (err > 0)
2079 			set_inode_flag(inode, FI_UPDATE_WRITE);
2080 		else if (err < 0)
2081 			f2fs_write_failed(mapping, offset + count);
2082 	}
2083 
2084 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
2085 
2086 	return err;
2087 }
2088 
2089 void f2fs_invalidate_page(struct page *page, unsigned int offset,
2090 							unsigned int length)
2091 {
2092 	struct inode *inode = page->mapping->host;
2093 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2094 
2095 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
2096 		(offset % PAGE_SIZE || length != PAGE_SIZE))
2097 		return;
2098 
2099 	if (PageDirty(page)) {
2100 		if (inode->i_ino == F2FS_META_INO(sbi)) {
2101 			dec_page_count(sbi, F2FS_DIRTY_META);
2102 		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
2103 			dec_page_count(sbi, F2FS_DIRTY_NODES);
2104 		} else {
2105 			inode_dec_dirty_pages(inode);
2106 			remove_dirty_inode(inode);
2107 		}
2108 	}
2109 
2110 	/* This is atomic written page, keep Private */
2111 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2112 		return drop_inmem_page(inode, page);
2113 
2114 	set_page_private(page, 0);
2115 	ClearPagePrivate(page);
2116 }
2117 
2118 int f2fs_release_page(struct page *page, gfp_t wait)
2119 {
2120 	/* If this is dirty page, keep PagePrivate */
2121 	if (PageDirty(page))
2122 		return 0;
2123 
2124 	/* This is atomic written page, keep Private */
2125 	if (IS_ATOMIC_WRITTEN_PAGE(page))
2126 		return 0;
2127 
2128 	set_page_private(page, 0);
2129 	ClearPagePrivate(page);
2130 	return 1;
2131 }
2132 
2133 /*
2134  * This was copied from __set_page_dirty_buffers which gives higher performance
2135  * in very high speed storages. (e.g., pmem)
2136  */
2137 void f2fs_set_page_dirty_nobuffers(struct page *page)
2138 {
2139 	struct address_space *mapping = page->mapping;
2140 	unsigned long flags;
2141 
2142 	if (unlikely(!mapping))
2143 		return;
2144 
2145 	spin_lock(&mapping->private_lock);
2146 	lock_page_memcg(page);
2147 	SetPageDirty(page);
2148 	spin_unlock(&mapping->private_lock);
2149 
2150 	spin_lock_irqsave(&mapping->tree_lock, flags);
2151 	WARN_ON_ONCE(!PageUptodate(page));
2152 	account_page_dirtied(page, mapping);
2153 	radix_tree_tag_set(&mapping->page_tree,
2154 			page_index(page), PAGECACHE_TAG_DIRTY);
2155 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
2156 	unlock_page_memcg(page);
2157 
2158 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2159 	return;
2160 }
2161 
2162 static int f2fs_set_data_page_dirty(struct page *page)
2163 {
2164 	struct address_space *mapping = page->mapping;
2165 	struct inode *inode = mapping->host;
2166 
2167 	trace_f2fs_set_page_dirty(page, DATA);
2168 
2169 	if (!PageUptodate(page))
2170 		SetPageUptodate(page);
2171 
2172 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
2173 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
2174 			register_inmem_page(inode, page);
2175 			return 1;
2176 		}
2177 		/*
2178 		 * Previously, this page has been registered, we just
2179 		 * return here.
2180 		 */
2181 		return 0;
2182 	}
2183 
2184 	if (!PageDirty(page)) {
2185 		f2fs_set_page_dirty_nobuffers(page);
2186 		update_dirty_page(inode, page);
2187 		return 1;
2188 	}
2189 	return 0;
2190 }
2191 
2192 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
2193 {
2194 	struct inode *inode = mapping->host;
2195 
2196 	if (f2fs_has_inline_data(inode))
2197 		return 0;
2198 
2199 	/* make sure allocating whole blocks */
2200 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2201 		filemap_write_and_wait(mapping);
2202 
2203 	return generic_block_bmap(mapping, block, get_data_block_bmap);
2204 }
2205 
2206 #ifdef CONFIG_MIGRATION
2207 #include <linux/migrate.h>
2208 
2209 int f2fs_migrate_page(struct address_space *mapping,
2210 		struct page *newpage, struct page *page, enum migrate_mode mode)
2211 {
2212 	int rc, extra_count;
2213 	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
2214 	bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
2215 
2216 	BUG_ON(PageWriteback(page));
2217 
2218 	/* migrating an atomic written page is safe with the inmem_lock hold */
2219 	if (atomic_written) {
2220 		if (mode != MIGRATE_SYNC)
2221 			return -EBUSY;
2222 		if (!mutex_trylock(&fi->inmem_lock))
2223 			return -EAGAIN;
2224 	}
2225 
2226 	/*
2227 	 * A reference is expected if PagePrivate set when move mapping,
2228 	 * however F2FS breaks this for maintaining dirty page counts when
2229 	 * truncating pages. So here adjusting the 'extra_count' make it work.
2230 	 */
2231 	extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
2232 	rc = migrate_page_move_mapping(mapping, newpage,
2233 				page, NULL, mode, extra_count);
2234 	if (rc != MIGRATEPAGE_SUCCESS) {
2235 		if (atomic_written)
2236 			mutex_unlock(&fi->inmem_lock);
2237 		return rc;
2238 	}
2239 
2240 	if (atomic_written) {
2241 		struct inmem_pages *cur;
2242 		list_for_each_entry(cur, &fi->inmem_pages, list)
2243 			if (cur->page == page) {
2244 				cur->page = newpage;
2245 				break;
2246 			}
2247 		mutex_unlock(&fi->inmem_lock);
2248 		put_page(page);
2249 		get_page(newpage);
2250 	}
2251 
2252 	if (PagePrivate(page))
2253 		SetPagePrivate(newpage);
2254 	set_page_private(newpage, page_private(page));
2255 
2256 	if (mode != MIGRATE_SYNC_NO_COPY)
2257 		migrate_page_copy(newpage, page);
2258 	else
2259 		migrate_page_states(newpage, page);
2260 
2261 	return MIGRATEPAGE_SUCCESS;
2262 }
2263 #endif
2264 
2265 const struct address_space_operations f2fs_dblock_aops = {
2266 	.readpage	= f2fs_read_data_page,
2267 	.readpages	= f2fs_read_data_pages,
2268 	.writepage	= f2fs_write_data_page,
2269 	.writepages	= f2fs_write_data_pages,
2270 	.write_begin	= f2fs_write_begin,
2271 	.write_end	= f2fs_write_end,
2272 	.set_page_dirty	= f2fs_set_data_page_dirty,
2273 	.invalidatepage	= f2fs_invalidate_page,
2274 	.releasepage	= f2fs_release_page,
2275 	.direct_IO	= f2fs_direct_IO,
2276 	.bmap		= f2fs_bmap,
2277 #ifdef CONFIG_MIGRATION
2278 	.migratepage    = f2fs_migrate_page,
2279 #endif
2280 };
2281