xref: /linux/fs/ext4/page-io.c (revision fd42df305f804ddc0d5ac028e944784283b2f92d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2bd2d0210STheodore Ts'o /*
3bd2d0210STheodore Ts'o  * linux/fs/ext4/page-io.c
4bd2d0210STheodore Ts'o  *
5bd2d0210STheodore Ts'o  * This contains the new page_io functions for ext4
6bd2d0210STheodore Ts'o  *
7bd2d0210STheodore Ts'o  * Written by Theodore Ts'o, 2010.
8bd2d0210STheodore Ts'o  */
9bd2d0210STheodore Ts'o 
10bd2d0210STheodore Ts'o #include <linux/fs.h>
11bd2d0210STheodore Ts'o #include <linux/time.h>
12bd2d0210STheodore Ts'o #include <linux/highuid.h>
13bd2d0210STheodore Ts'o #include <linux/pagemap.h>
14bd2d0210STheodore Ts'o #include <linux/quotaops.h>
15bd2d0210STheodore Ts'o #include <linux/string.h>
16bd2d0210STheodore Ts'o #include <linux/buffer_head.h>
17bd2d0210STheodore Ts'o #include <linux/writeback.h>
18bd2d0210STheodore Ts'o #include <linux/pagevec.h>
19bd2d0210STheodore Ts'o #include <linux/mpage.h>
20bd2d0210STheodore Ts'o #include <linux/namei.h>
21bd2d0210STheodore Ts'o #include <linux/uio.h>
22bd2d0210STheodore Ts'o #include <linux/bio.h>
23bd2d0210STheodore Ts'o #include <linux/workqueue.h>
24bd2d0210STheodore Ts'o #include <linux/kernel.h>
25bd2d0210STheodore Ts'o #include <linux/slab.h>
261ae48a63SJan Kara #include <linux/mm.h>
27c9af28fdSTheodore Ts'o #include <linux/backing-dev.h>
28bd2d0210STheodore Ts'o 
29bd2d0210STheodore Ts'o #include "ext4_jbd2.h"
30bd2d0210STheodore Ts'o #include "xattr.h"
31bd2d0210STheodore Ts'o #include "acl.h"
32bd2d0210STheodore Ts'o 
330058f965SJan Kara static struct kmem_cache *io_end_cachep;
34bd2d0210STheodore Ts'o 
355dabfc78STheodore Ts'o int __init ext4_init_pageio(void)
36bd2d0210STheodore Ts'o {
37bd2d0210STheodore Ts'o 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
380058f965SJan Kara 	if (io_end_cachep == NULL)
39bd2d0210STheodore Ts'o 		return -ENOMEM;
40bd2d0210STheodore Ts'o 	return 0;
41bd2d0210STheodore Ts'o }
42bd2d0210STheodore Ts'o 
435dabfc78STheodore Ts'o void ext4_exit_pageio(void)
44bd2d0210STheodore Ts'o {
45bd2d0210STheodore Ts'o 	kmem_cache_destroy(io_end_cachep);
46bd2d0210STheodore Ts'o }
47bd2d0210STheodore Ts'o 
481ada47d9STheodore Ts'o /*
49b0857d30SJan Kara  * Print an buffer I/O error compatible with the fs/buffer.c.  This
50b0857d30SJan Kara  * provides compatibility with dmesg scrapers that look for a specific
51b0857d30SJan Kara  * buffer I/O error message.  We really need a unified error reporting
52b0857d30SJan Kara  * structure to userspace ala Digital Unix's uerf system, but it's
53b0857d30SJan Kara  * probably not going to happen in my lifetime, due to LKML politics...
54b0857d30SJan Kara  */
55b0857d30SJan Kara static void buffer_io_error(struct buffer_head *bh)
56b0857d30SJan Kara {
57a1c6f057SDmitry Monakhov 	printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
58a1c6f057SDmitry Monakhov 		       bh->b_bdev,
59b0857d30SJan Kara 			(unsigned long long)bh->b_blocknr);
60b0857d30SJan Kara }
61b0857d30SJan Kara 
62b0857d30SJan Kara static void ext4_finish_bio(struct bio *bio)
63b0857d30SJan Kara {
64b0857d30SJan Kara 	int i;
652c30c71bSKent Overstreet 	struct bio_vec *bvec;
66b0857d30SJan Kara 
672c30c71bSKent Overstreet 	bio_for_each_segment_all(bvec, bio, i) {
68b0857d30SJan Kara 		struct page *page = bvec->bv_page;
692058f83aSMichael Halcrow #ifdef CONFIG_EXT4_FS_ENCRYPTION
702058f83aSMichael Halcrow 		struct page *data_page = NULL;
712058f83aSMichael Halcrow #endif
72b0857d30SJan Kara 		struct buffer_head *bh, *head;
73b0857d30SJan Kara 		unsigned bio_start = bvec->bv_offset;
74b0857d30SJan Kara 		unsigned bio_end = bio_start + bvec->bv_len;
75b0857d30SJan Kara 		unsigned under_io = 0;
76b0857d30SJan Kara 		unsigned long flags;
77b0857d30SJan Kara 
78b0857d30SJan Kara 		if (!page)
79b0857d30SJan Kara 			continue;
80b0857d30SJan Kara 
812058f83aSMichael Halcrow #ifdef CONFIG_EXT4_FS_ENCRYPTION
822058f83aSMichael Halcrow 		if (!page->mapping) {
832058f83aSMichael Halcrow 			/* The bounce data pages are unmapped. */
842058f83aSMichael Halcrow 			data_page = page;
85a7550b30SJaegeuk Kim 			fscrypt_pullback_bio_page(&page, false);
862058f83aSMichael Halcrow 		}
872058f83aSMichael Halcrow #endif
882058f83aSMichael Halcrow 
894e4cbee9SChristoph Hellwig 		if (bio->bi_status) {
90b0857d30SJan Kara 			SetPageError(page);
915114a97aSMichal Hocko 			mapping_set_error(page->mapping, -EIO);
92b0857d30SJan Kara 		}
93b0857d30SJan Kara 		bh = head = page_buffers(page);
94b0857d30SJan Kara 		/*
95b0857d30SJan Kara 		 * We check all buffers in the page under BH_Uptodate_Lock
96b0857d30SJan Kara 		 * to avoid races with other end io clearing async_write flags
97b0857d30SJan Kara 		 */
98b0857d30SJan Kara 		local_irq_save(flags);
99b0857d30SJan Kara 		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
100b0857d30SJan Kara 		do {
101b0857d30SJan Kara 			if (bh_offset(bh) < bio_start ||
102b0857d30SJan Kara 			    bh_offset(bh) + bh->b_size > bio_end) {
103b0857d30SJan Kara 				if (buffer_async_write(bh))
104b0857d30SJan Kara 					under_io++;
105b0857d30SJan Kara 				continue;
106b0857d30SJan Kara 			}
107b0857d30SJan Kara 			clear_buffer_async_write(bh);
1084e4cbee9SChristoph Hellwig 			if (bio->bi_status)
109b0857d30SJan Kara 				buffer_io_error(bh);
110b0857d30SJan Kara 		} while ((bh = bh->b_this_page) != head);
111b0857d30SJan Kara 		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
112b0857d30SJan Kara 		local_irq_restore(flags);
1132058f83aSMichael Halcrow 		if (!under_io) {
1142058f83aSMichael Halcrow #ifdef CONFIG_EXT4_FS_ENCRYPTION
115a7550b30SJaegeuk Kim 			if (data_page)
116a7550b30SJaegeuk Kim 				fscrypt_restore_control_page(data_page);
1172058f83aSMichael Halcrow #endif
118b0857d30SJan Kara 			end_page_writeback(page);
119b0857d30SJan Kara 		}
120b0857d30SJan Kara 	}
1212058f83aSMichael Halcrow }
122b0857d30SJan Kara 
12397a851edSJan Kara static void ext4_release_io_end(ext4_io_end_t *io_end)
124bd2d0210STheodore Ts'o {
125b0857d30SJan Kara 	struct bio *bio, *next_bio;
126b0857d30SJan Kara 
12797a851edSJan Kara 	BUG_ON(!list_empty(&io_end->list));
12897a851edSJan Kara 	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
1296b523df4SJan Kara 	WARN_ON(io_end->handle);
130bd2d0210STheodore Ts'o 
131b0857d30SJan Kara 	for (bio = io_end->bio; bio; bio = next_bio) {
132b0857d30SJan Kara 		next_bio = bio->bi_private;
133b0857d30SJan Kara 		ext4_finish_bio(bio);
134b0857d30SJan Kara 		bio_put(bio);
135b0857d30SJan Kara 	}
13697a851edSJan Kara 	kmem_cache_free(io_end_cachep, io_end);
13797a851edSJan Kara }
13897a851edSJan Kara 
139a115f749SJan Kara /*
140a115f749SJan Kara  * Check a range of space and convert unwritten extents to written. Note that
141a115f749SJan Kara  * we are protected from truncate touching same part of extent tree by the
142a115f749SJan Kara  * fact that truncate code waits for all DIO to finish (thus exclusion from
143a115f749SJan Kara  * direct IO is achieved) and also waits for PageWriteback bits. Thus we
144a115f749SJan Kara  * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
145a115f749SJan Kara  * completed (happens from ext4_free_ioend()).
146a115f749SJan Kara  */
14728a535f9SDmitry Monakhov static int ext4_end_io(ext4_io_end_t *io)
148bd2d0210STheodore Ts'o {
149bd2d0210STheodore Ts'o 	struct inode *inode = io->inode;
150bd2d0210STheodore Ts'o 	loff_t offset = io->offset;
151bd2d0210STheodore Ts'o 	ssize_t size = io->size;
1526b523df4SJan Kara 	handle_t *handle = io->handle;
153bd2d0210STheodore Ts'o 	int ret = 0;
154bd2d0210STheodore Ts'o 
155bd2d0210STheodore Ts'o 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
156bd2d0210STheodore Ts'o 		   "list->prev 0x%p\n",
157bd2d0210STheodore Ts'o 		   io, inode->i_ino, io->list.next, io->list.prev);
158bd2d0210STheodore Ts'o 
1596b523df4SJan Kara 	io->handle = NULL;	/* Following call will use up the handle */
1606b523df4SJan Kara 	ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
1610db1ff22STheodore Ts'o 	if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
162b82e384cSTheodore Ts'o 		ext4_msg(inode->i_sb, KERN_EMERG,
163b82e384cSTheodore Ts'o 			 "failed to convert unwritten extents to written "
164b82e384cSTheodore Ts'o 			 "extents -- potential data loss!  "
165b82e384cSTheodore Ts'o 			 "(inode %lu, offset %llu, size %zd, error %d)",
166b82e384cSTheodore Ts'o 			 inode->i_ino, offset, size, ret);
167bd2d0210STheodore Ts'o 	}
16897a851edSJan Kara 	ext4_clear_io_unwritten_flag(io);
16997a851edSJan Kara 	ext4_release_io_end(io);
170bd2d0210STheodore Ts'o 	return ret;
171bd2d0210STheodore Ts'o }
172bd2d0210STheodore Ts'o 
1732e8fa54eSJan Kara static void dump_completed_IO(struct inode *inode, struct list_head *head)
17428a535f9SDmitry Monakhov {
17528a535f9SDmitry Monakhov #ifdef	EXT4FS_DEBUG
17628a535f9SDmitry Monakhov 	struct list_head *cur, *before, *after;
17728a535f9SDmitry Monakhov 	ext4_io_end_t *io, *io0, *io1;
17828a535f9SDmitry Monakhov 
1792e8fa54eSJan Kara 	if (list_empty(head))
18028a535f9SDmitry Monakhov 		return;
18128a535f9SDmitry Monakhov 
1822e8fa54eSJan Kara 	ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
1832e8fa54eSJan Kara 	list_for_each_entry(io, head, list) {
18428a535f9SDmitry Monakhov 		cur = &io->list;
18528a535f9SDmitry Monakhov 		before = cur->prev;
18628a535f9SDmitry Monakhov 		io0 = container_of(before, ext4_io_end_t, list);
18728a535f9SDmitry Monakhov 		after = cur->next;
18828a535f9SDmitry Monakhov 		io1 = container_of(after, ext4_io_end_t, list);
18928a535f9SDmitry Monakhov 
19028a535f9SDmitry Monakhov 		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
19128a535f9SDmitry Monakhov 			    io, inode->i_ino, io0, io1);
19228a535f9SDmitry Monakhov 	}
19328a535f9SDmitry Monakhov #endif
19428a535f9SDmitry Monakhov }
19528a535f9SDmitry Monakhov 
19628a535f9SDmitry Monakhov /* Add the io_end to per-inode completed end_io list. */
19797a851edSJan Kara static void ext4_add_complete_io(ext4_io_end_t *io_end)
19828a535f9SDmitry Monakhov {
19928a535f9SDmitry Monakhov 	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
20078371a45SJan Kara 	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
20128a535f9SDmitry Monakhov 	struct workqueue_struct *wq;
20228a535f9SDmitry Monakhov 	unsigned long flags;
20328a535f9SDmitry Monakhov 
2047b7a8665SChristoph Hellwig 	/* Only reserved conversions from writeback should enter here */
2057b7a8665SChristoph Hellwig 	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
20678371a45SJan Kara 	WARN_ON(!io_end->handle && sbi->s_journal);
20728a535f9SDmitry Monakhov 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
20878371a45SJan Kara 	wq = sbi->rsv_conversion_wq;
2092e8fa54eSJan Kara 	if (list_empty(&ei->i_rsv_conversion_list))
2102e8fa54eSJan Kara 		queue_work(wq, &ei->i_rsv_conversion_work);
2112e8fa54eSJan Kara 	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
21228a535f9SDmitry Monakhov 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
21328a535f9SDmitry Monakhov }
21428a535f9SDmitry Monakhov 
2152e8fa54eSJan Kara static int ext4_do_flush_completed_IO(struct inode *inode,
2162e8fa54eSJan Kara 				      struct list_head *head)
21728a535f9SDmitry Monakhov {
21828a535f9SDmitry Monakhov 	ext4_io_end_t *io;
219002bd7faSJan Kara 	struct list_head unwritten;
22028a535f9SDmitry Monakhov 	unsigned long flags;
22128a535f9SDmitry Monakhov 	struct ext4_inode_info *ei = EXT4_I(inode);
22228a535f9SDmitry Monakhov 	int err, ret = 0;
22328a535f9SDmitry Monakhov 
22428a535f9SDmitry Monakhov 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2252e8fa54eSJan Kara 	dump_completed_IO(inode, head);
2262e8fa54eSJan Kara 	list_replace_init(head, &unwritten);
22728a535f9SDmitry Monakhov 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
22828a535f9SDmitry Monakhov 
22928a535f9SDmitry Monakhov 	while (!list_empty(&unwritten)) {
23028a535f9SDmitry Monakhov 		io = list_entry(unwritten.next, ext4_io_end_t, list);
23128a535f9SDmitry Monakhov 		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
23228a535f9SDmitry Monakhov 		list_del_init(&io->list);
23328a535f9SDmitry Monakhov 
23428a535f9SDmitry Monakhov 		err = ext4_end_io(io);
23528a535f9SDmitry Monakhov 		if (unlikely(!ret && err))
23628a535f9SDmitry Monakhov 			ret = err;
23728a535f9SDmitry Monakhov 	}
23828a535f9SDmitry Monakhov 	return ret;
23928a535f9SDmitry Monakhov }
24028a535f9SDmitry Monakhov 
241bd2d0210STheodore Ts'o /*
2422e8fa54eSJan Kara  * work on completed IO, to convert unwritten extents to extents
243bd2d0210STheodore Ts'o  */
2442e8fa54eSJan Kara void ext4_end_io_rsv_work(struct work_struct *work)
245bd2d0210STheodore Ts'o {
24684c17543SJan Kara 	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
2472e8fa54eSJan Kara 						  i_rsv_conversion_work);
2482e8fa54eSJan Kara 	ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
2492e8fa54eSJan Kara }
2502e8fa54eSJan Kara 
251bd2d0210STheodore Ts'o ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
252bd2d0210STheodore Ts'o {
253b17b35ecSJesper Juhl 	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
254bd2d0210STheodore Ts'o 	if (io) {
255f7ad6d2eSTheodore Ts'o 		io->inode = inode;
256bd2d0210STheodore Ts'o 		INIT_LIST_HEAD(&io->list);
25797a851edSJan Kara 		atomic_set(&io->count, 1);
258bd2d0210STheodore Ts'o 	}
259bd2d0210STheodore Ts'o 	return io;
260bd2d0210STheodore Ts'o }
261bd2d0210STheodore Ts'o 
26297a851edSJan Kara void ext4_put_io_end_defer(ext4_io_end_t *io_end)
26397a851edSJan Kara {
26497a851edSJan Kara 	if (atomic_dec_and_test(&io_end->count)) {
26597a851edSJan Kara 		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
26697a851edSJan Kara 			ext4_release_io_end(io_end);
26797a851edSJan Kara 			return;
26897a851edSJan Kara 		}
26997a851edSJan Kara 		ext4_add_complete_io(io_end);
27097a851edSJan Kara 	}
27197a851edSJan Kara }
27297a851edSJan Kara 
27397a851edSJan Kara int ext4_put_io_end(ext4_io_end_t *io_end)
27497a851edSJan Kara {
27597a851edSJan Kara 	int err = 0;
27697a851edSJan Kara 
27797a851edSJan Kara 	if (atomic_dec_and_test(&io_end->count)) {
27897a851edSJan Kara 		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
2796b523df4SJan Kara 			err = ext4_convert_unwritten_extents(io_end->handle,
2806b523df4SJan Kara 						io_end->inode, io_end->offset,
2816b523df4SJan Kara 						io_end->size);
2826b523df4SJan Kara 			io_end->handle = NULL;
28397a851edSJan Kara 			ext4_clear_io_unwritten_flag(io_end);
28497a851edSJan Kara 		}
28597a851edSJan Kara 		ext4_release_io_end(io_end);
28697a851edSJan Kara 	}
28797a851edSJan Kara 	return err;
28897a851edSJan Kara }
28997a851edSJan Kara 
29097a851edSJan Kara ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
29197a851edSJan Kara {
29297a851edSJan Kara 	atomic_inc(&io_end->count);
29397a851edSJan Kara 	return io_end;
29497a851edSJan Kara }
29597a851edSJan Kara 
296822dbba3SJan Kara /* BIO completion function for page writeback */
2974246a0b6SChristoph Hellwig static void ext4_end_bio(struct bio *bio)
298bd2d0210STheodore Ts'o {
299bd2d0210STheodore Ts'o 	ext4_io_end_t *io_end = bio->bi_private;
3004f024f37SKent Overstreet 	sector_t bi_sector = bio->bi_iter.bi_sector;
30172d622b4STheodore Ts'o 	char b[BDEVNAME_SIZE];
302bd2d0210STheodore Ts'o 
30372d622b4STheodore Ts'o 	if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
30474d46992SChristoph Hellwig 		      bio_devname(bio, b),
30572d622b4STheodore Ts'o 		      (long long) bio->bi_iter.bi_sector,
30672d622b4STheodore Ts'o 		      (unsigned) bio_sectors(bio),
3074e4cbee9SChristoph Hellwig 		      bio->bi_status)) {
30872d622b4STheodore Ts'o 		ext4_finish_bio(bio);
30972d622b4STheodore Ts'o 		bio_put(bio);
31072d622b4STheodore Ts'o 		return;
31172d622b4STheodore Ts'o 	}
312bd2d0210STheodore Ts'o 	bio->bi_end_io = NULL;
3130058f965SJan Kara 
3144e4cbee9SChristoph Hellwig 	if (bio->bi_status) {
315b0857d30SJan Kara 		struct inode *inode = io_end->inode;
316b0857d30SJan Kara 
3179503c67cSMatthew Wilcox 		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
318f7ad6d2eSTheodore Ts'o 			     "(offset %llu size %ld starting block %llu)",
3194e4cbee9SChristoph Hellwig 			     bio->bi_status, inode->i_ino,
320f7ad6d2eSTheodore Ts'o 			     (unsigned long long) io_end->offset,
321f7ad6d2eSTheodore Ts'o 			     (long) io_end->size,
322f7ad6d2eSTheodore Ts'o 			     (unsigned long long)
323d50bdd5aSCurt Wohlgemuth 			     bi_sector >> (inode->i_blkbits - 9));
3244e4cbee9SChristoph Hellwig 		mapping_set_error(inode->i_mapping,
3254e4cbee9SChristoph Hellwig 				blk_status_to_errno(bio->bi_status));
326f7ad6d2eSTheodore Ts'o 	}
327822dbba3SJan Kara 
328822dbba3SJan Kara 	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
329822dbba3SJan Kara 		/*
330822dbba3SJan Kara 		 * Link bio into list hanging from io_end. We have to do it
331822dbba3SJan Kara 		 * atomically as bio completions can be racing against each
332822dbba3SJan Kara 		 * other.
333822dbba3SJan Kara 		 */
334822dbba3SJan Kara 		bio->bi_private = xchg(&io_end->bio, bio);
33597a851edSJan Kara 		ext4_put_io_end_defer(io_end);
336822dbba3SJan Kara 	} else {
337822dbba3SJan Kara 		/*
338822dbba3SJan Kara 		 * Drop io_end reference early. Inode can get freed once
339822dbba3SJan Kara 		 * we finish the bio.
340822dbba3SJan Kara 		 */
341822dbba3SJan Kara 		ext4_put_io_end_defer(io_end);
342822dbba3SJan Kara 		ext4_finish_bio(bio);
343822dbba3SJan Kara 		bio_put(bio);
344822dbba3SJan Kara 	}
345bd2d0210STheodore Ts'o }
346bd2d0210STheodore Ts'o 
347bd2d0210STheodore Ts'o void ext4_io_submit(struct ext4_io_submit *io)
348bd2d0210STheodore Ts'o {
349bd2d0210STheodore Ts'o 	struct bio *bio = io->io_bio;
350bd2d0210STheodore Ts'o 
351bd2d0210STheodore Ts'o 	if (bio) {
35295fe6c1aSMike Christie 		int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
35370fd7614SChristoph Hellwig 				  REQ_SYNC : 0;
3540127251cSJens Axboe 		io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
35595fe6c1aSMike Christie 		bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
3564e49ea4aSMike Christie 		submit_bio(io->io_bio);
357bd2d0210STheodore Ts'o 	}
3587dc57615SPeter Huewe 	io->io_bio = NULL;
35997a851edSJan Kara }
36097a851edSJan Kara 
36197a851edSJan Kara void ext4_io_submit_init(struct ext4_io_submit *io,
36297a851edSJan Kara 			 struct writeback_control *wbc)
36397a851edSJan Kara {
3645a33911fSTejun Heo 	io->io_wbc = wbc;
36597a851edSJan Kara 	io->io_bio = NULL;
3667dc57615SPeter Huewe 	io->io_end = NULL;
367bd2d0210STheodore Ts'o }
368bd2d0210STheodore Ts'o 
36997a851edSJan Kara static int io_submit_init_bio(struct ext4_io_submit *io,
370bd2d0210STheodore Ts'o 			      struct buffer_head *bh)
371bd2d0210STheodore Ts'o {
372bd2d0210STheodore Ts'o 	struct bio *bio;
373bd2d0210STheodore Ts'o 
374b54ffb73SKent Overstreet 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
375a1d8d9a7STheodore Ts'o 	if (!bio)
376a1d8d9a7STheodore Ts'o 		return -ENOMEM;
3774f024f37SKent Overstreet 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
37874d46992SChristoph Hellwig 	bio_set_dev(bio, bh->b_bdev);
379bd2d0210STheodore Ts'o 	bio->bi_end_io = ext4_end_bio;
38097a851edSJan Kara 	bio->bi_private = ext4_get_io_end(io->io_end);
381bd2d0210STheodore Ts'o 	io->io_bio = bio;
382bd2d0210STheodore Ts'o 	io->io_next_block = bh->b_blocknr;
383*fd42df30SDennis Zhou 	wbc_init_bio(io->io_wbc, bio);
384bd2d0210STheodore Ts'o 	return 0;
385bd2d0210STheodore Ts'o }
386bd2d0210STheodore Ts'o 
387bd2d0210STheodore Ts'o static int io_submit_add_bh(struct ext4_io_submit *io,
388bd2d0210STheodore Ts'o 			    struct inode *inode,
3892058f83aSMichael Halcrow 			    struct page *page,
390bd2d0210STheodore Ts'o 			    struct buffer_head *bh)
391bd2d0210STheodore Ts'o {
392bd2d0210STheodore Ts'o 	int ret;
393bd2d0210STheodore Ts'o 
394bd2d0210STheodore Ts'o 	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
395bd2d0210STheodore Ts'o submit_and_retry:
396bd2d0210STheodore Ts'o 		ext4_io_submit(io);
397bd2d0210STheodore Ts'o 	}
398bd2d0210STheodore Ts'o 	if (io->io_bio == NULL) {
39997a851edSJan Kara 		ret = io_submit_init_bio(io, bh);
400bd2d0210STheodore Ts'o 		if (ret)
401bd2d0210STheodore Ts'o 			return ret;
4020127251cSJens Axboe 		io->io_bio->bi_write_hint = inode->i_write_hint;
403bd2d0210STheodore Ts'o 	}
4042058f83aSMichael Halcrow 	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
405a549984bSTheodore Ts'o 	if (ret != bh->b_size)
406a549984bSTheodore Ts'o 		goto submit_and_retry;
407001e4a87STejun Heo 	wbc_account_io(io->io_wbc, page, bh->b_size);
40897a851edSJan Kara 	io->io_next_block++;
409bd2d0210STheodore Ts'o 	return 0;
410bd2d0210STheodore Ts'o }
411bd2d0210STheodore Ts'o 
412bd2d0210STheodore Ts'o int ext4_bio_write_page(struct ext4_io_submit *io,
413bd2d0210STheodore Ts'o 			struct page *page,
414bd2d0210STheodore Ts'o 			int len,
4151c8349a1SNamjae Jeon 			struct writeback_control *wbc,
4161c8349a1SNamjae Jeon 			bool keep_towrite)
417bd2d0210STheodore Ts'o {
4182058f83aSMichael Halcrow 	struct page *data_page = NULL;
419bd2d0210STheodore Ts'o 	struct inode *inode = page->mapping->host;
42018017479SEric Engestrom 	unsigned block_start;
421bd2d0210STheodore Ts'o 	struct buffer_head *bh, *head;
422bd2d0210STheodore Ts'o 	int ret = 0;
4230058f965SJan Kara 	int nr_submitted = 0;
424937d7b84STheodore Ts'o 	int nr_to_submit = 0;
425bd2d0210STheodore Ts'o 
426d50bdd5aSCurt Wohlgemuth 	BUG_ON(!PageLocked(page));
427bd2d0210STheodore Ts'o 	BUG_ON(PageWriteback(page));
428bd2d0210STheodore Ts'o 
4291c8349a1SNamjae Jeon 	if (keep_towrite)
4301c8349a1SNamjae Jeon 		set_page_writeback_keepwrite(page);
4311c8349a1SNamjae Jeon 	else
432a54aa761STheodore Ts'o 		set_page_writeback(page);
433a54aa761STheodore Ts'o 	ClearPageError(page);
434bd2d0210STheodore Ts'o 
4350058f965SJan Kara 	/*
436f8409abdSLinus Torvalds 	 * Comments copied from block_write_full_page:
437eeece469SJan Kara 	 *
438eeece469SJan Kara 	 * The page straddles i_size.  It must be zeroed out on each and every
439eeece469SJan Kara 	 * writepage invocation because it may be mmapped.  "A file is mapped
440eeece469SJan Kara 	 * in multiples of the page size.  For a file that is not a multiple of
441eeece469SJan Kara 	 * the page size, the remaining memory is zeroed when mapped, and
442eeece469SJan Kara 	 * writes to that region are not written out to the file."
443eeece469SJan Kara 	 */
44409cbfeafSKirill A. Shutemov 	if (len < PAGE_SIZE)
44509cbfeafSKirill A. Shutemov 		zero_user_segment(page, len, PAGE_SIZE);
446eeece469SJan Kara 	/*
4470058f965SJan Kara 	 * In the first loop we prepare and mark buffers to submit. We have to
4480058f965SJan Kara 	 * mark all buffers in the page before submitting so that
4490058f965SJan Kara 	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
4500058f965SJan Kara 	 * on the first buffer finishes and we are still working on submitting
4510058f965SJan Kara 	 * the second buffer.
4520058f965SJan Kara 	 */
4530058f965SJan Kara 	bh = head = page_buffers(page);
4540058f965SJan Kara 	do {
4550058f965SJan Kara 		block_start = bh_offset(bh);
456bd2d0210STheodore Ts'o 		if (block_start >= len) {
457bd2d0210STheodore Ts'o 			clear_buffer_dirty(bh);
458bd2d0210STheodore Ts'o 			set_buffer_uptodate(bh);
459bd2d0210STheodore Ts'o 			continue;
460bd2d0210STheodore Ts'o 		}
4618a850c3fSJan Kara 		if (!buffer_dirty(bh) || buffer_delay(bh) ||
4628a850c3fSJan Kara 		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
4638a850c3fSJan Kara 			/* A hole? We can safely clear the dirty bit */
4648a850c3fSJan Kara 			if (!buffer_mapped(bh))
4658a850c3fSJan Kara 				clear_buffer_dirty(bh);
4668a850c3fSJan Kara 			if (io->io_bio)
4678a850c3fSJan Kara 				ext4_io_submit(io);
4688a850c3fSJan Kara 			continue;
4698a850c3fSJan Kara 		}
4700058f965SJan Kara 		if (buffer_new(bh)) {
4710058f965SJan Kara 			clear_buffer_new(bh);
472e64855c6SJan Kara 			clean_bdev_bh_alias(bh);
4730058f965SJan Kara 		}
4740058f965SJan Kara 		set_buffer_async_write(bh);
475937d7b84STheodore Ts'o 		nr_to_submit++;
4760058f965SJan Kara 	} while ((bh = bh->b_this_page) != head);
4770058f965SJan Kara 
4780058f965SJan Kara 	bh = head = page_buffers(page);
4792058f83aSMichael Halcrow 
480937d7b84STheodore Ts'o 	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
481937d7b84STheodore Ts'o 	    nr_to_submit) {
482c9af28fdSTheodore Ts'o 		gfp_t gfp_flags = GFP_NOFS;
483c9af28fdSTheodore Ts'o 
484c9af28fdSTheodore Ts'o 	retry_encrypt:
4859c4bb8a3SDavid Gstir 		data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
4869c4bb8a3SDavid Gstir 						page->index, gfp_flags);
4872058f83aSMichael Halcrow 		if (IS_ERR(data_page)) {
4882058f83aSMichael Halcrow 			ret = PTR_ERR(data_page);
4894762cc3fSDan Carpenter 			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
490c9af28fdSTheodore Ts'o 				if (io->io_bio) {
491c9af28fdSTheodore Ts'o 					ext4_io_submit(io);
492c9af28fdSTheodore Ts'o 					congestion_wait(BLK_RW_ASYNC, HZ/50);
493c9af28fdSTheodore Ts'o 				}
494c9af28fdSTheodore Ts'o 				gfp_flags |= __GFP_NOFAIL;
495c9af28fdSTheodore Ts'o 				goto retry_encrypt;
496c9af28fdSTheodore Ts'o 			}
4972058f83aSMichael Halcrow 			data_page = NULL;
4982058f83aSMichael Halcrow 			goto out;
4992058f83aSMichael Halcrow 		}
5002058f83aSMichael Halcrow 	}
5012058f83aSMichael Halcrow 
5022058f83aSMichael Halcrow 	/* Now submit buffers to write */
5030058f965SJan Kara 	do {
5040058f965SJan Kara 		if (!buffer_async_write(bh))
5050058f965SJan Kara 			continue;
5062058f83aSMichael Halcrow 		ret = io_submit_add_bh(io, inode,
5072058f83aSMichael Halcrow 				       data_page ? data_page : page, bh);
508bd2d0210STheodore Ts'o 		if (ret) {
509bd2d0210STheodore Ts'o 			/*
510bd2d0210STheodore Ts'o 			 * We only get here on ENOMEM.  Not much else
511bd2d0210STheodore Ts'o 			 * we can do but mark the page as dirty, and
512bd2d0210STheodore Ts'o 			 * better luck next time.
513bd2d0210STheodore Ts'o 			 */
514bd2d0210STheodore Ts'o 			break;
515bd2d0210STheodore Ts'o 		}
5160058f965SJan Kara 		nr_submitted++;
5171ae48a63SJan Kara 		clear_buffer_dirty(bh);
5180058f965SJan Kara 	} while ((bh = bh->b_this_page) != head);
5190058f965SJan Kara 
5200058f965SJan Kara 	/* Error stopped previous loop? Clean up buffers... */
5210058f965SJan Kara 	if (ret) {
5222058f83aSMichael Halcrow 	out:
5232058f83aSMichael Halcrow 		if (data_page)
524a7550b30SJaegeuk Kim 			fscrypt_restore_control_page(data_page);
5252058f83aSMichael Halcrow 		printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
5262058f83aSMichael Halcrow 		redirty_page_for_writepage(wbc, page);
5270058f965SJan Kara 		do {
5280058f965SJan Kara 			clear_buffer_async_write(bh);
5290058f965SJan Kara 			bh = bh->b_this_page;
5300058f965SJan Kara 		} while (bh != head);
531bd2d0210STheodore Ts'o 	}
532bd2d0210STheodore Ts'o 	unlock_page(page);
5330058f965SJan Kara 	/* Nothing submitted - we have to end page writeback */
5340058f965SJan Kara 	if (!nr_submitted)
5350058f965SJan Kara 		end_page_writeback(page);
536bd2d0210STheodore Ts'o 	return ret;
537bd2d0210STheodore Ts'o }
538