xref: /linux/fs/ext4/page-io.c (revision 28a535f9a0df060569dcc786e5bc2e1de43d7dc7)
1bd2d0210STheodore Ts'o /*
2bd2d0210STheodore Ts'o  * linux/fs/ext4/page-io.c
3bd2d0210STheodore Ts'o  *
4bd2d0210STheodore Ts'o  * This contains the new page_io functions for ext4
5bd2d0210STheodore Ts'o  *
6bd2d0210STheodore Ts'o  * Written by Theodore Ts'o, 2010.
7bd2d0210STheodore Ts'o  */
8bd2d0210STheodore Ts'o 
9bd2d0210STheodore Ts'o #include <linux/fs.h>
10bd2d0210STheodore Ts'o #include <linux/time.h>
11bd2d0210STheodore Ts'o #include <linux/jbd2.h>
12bd2d0210STheodore Ts'o #include <linux/highuid.h>
13bd2d0210STheodore Ts'o #include <linux/pagemap.h>
14bd2d0210STheodore Ts'o #include <linux/quotaops.h>
15bd2d0210STheodore Ts'o #include <linux/string.h>
16bd2d0210STheodore Ts'o #include <linux/buffer_head.h>
17bd2d0210STheodore Ts'o #include <linux/writeback.h>
18bd2d0210STheodore Ts'o #include <linux/pagevec.h>
19bd2d0210STheodore Ts'o #include <linux/mpage.h>
20bd2d0210STheodore Ts'o #include <linux/namei.h>
21bd2d0210STheodore Ts'o #include <linux/uio.h>
22bd2d0210STheodore Ts'o #include <linux/bio.h>
23bd2d0210STheodore Ts'o #include <linux/workqueue.h>
24bd2d0210STheodore Ts'o #include <linux/kernel.h>
25bd2d0210STheodore Ts'o #include <linux/slab.h>
26bd2d0210STheodore Ts'o 
27bd2d0210STheodore Ts'o #include "ext4_jbd2.h"
28bd2d0210STheodore Ts'o #include "xattr.h"
29bd2d0210STheodore Ts'o #include "acl.h"
30bd2d0210STheodore Ts'o #include "ext4_extents.h"
31bd2d0210STheodore Ts'o 
32bd2d0210STheodore Ts'o static struct kmem_cache *io_page_cachep, *io_end_cachep;
33bd2d0210STheodore Ts'o 
345dabfc78STheodore Ts'o int __init ext4_init_pageio(void)
35bd2d0210STheodore Ts'o {
36bd2d0210STheodore Ts'o 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
37bd2d0210STheodore Ts'o 	if (io_page_cachep == NULL)
38bd2d0210STheodore Ts'o 		return -ENOMEM;
39bd2d0210STheodore Ts'o 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
4013195184SDan Carpenter 	if (io_end_cachep == NULL) {
41bd2d0210STheodore Ts'o 		kmem_cache_destroy(io_page_cachep);
42bd2d0210STheodore Ts'o 		return -ENOMEM;
43bd2d0210STheodore Ts'o 	}
44bd2d0210STheodore Ts'o 	return 0;
45bd2d0210STheodore Ts'o }
46bd2d0210STheodore Ts'o 
475dabfc78STheodore Ts'o void ext4_exit_pageio(void)
48bd2d0210STheodore Ts'o {
49bd2d0210STheodore Ts'o 	kmem_cache_destroy(io_end_cachep);
50bd2d0210STheodore Ts'o 	kmem_cache_destroy(io_page_cachep);
51bd2d0210STheodore Ts'o }
52bd2d0210STheodore Ts'o 
53f7ad6d2eSTheodore Ts'o void ext4_ioend_wait(struct inode *inode)
54f7ad6d2eSTheodore Ts'o {
55e9e3bcecSEric Sandeen 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
56f7ad6d2eSTheodore Ts'o 
57f7ad6d2eSTheodore Ts'o 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
58f7ad6d2eSTheodore Ts'o }
59f7ad6d2eSTheodore Ts'o 
6083668e71STheodore Ts'o static void put_io_page(struct ext4_io_page *io_page)
6183668e71STheodore Ts'o {
6283668e71STheodore Ts'o 	if (atomic_dec_and_test(&io_page->p_count)) {
636268b325SLinus Torvalds 		end_page_writeback(io_page->p_page);
6483668e71STheodore Ts'o 		put_page(io_page->p_page);
6583668e71STheodore Ts'o 		kmem_cache_free(io_page_cachep, io_page);
6683668e71STheodore Ts'o 	}
6783668e71STheodore Ts'o }
6883668e71STheodore Ts'o 
69bd2d0210STheodore Ts'o void ext4_free_io_end(ext4_io_end_t *io)
70bd2d0210STheodore Ts'o {
71bd2d0210STheodore Ts'o 	int i;
72bd2d0210STheodore Ts'o 
73bd2d0210STheodore Ts'o 	BUG_ON(!io);
74*28a535f9SDmitry Monakhov 	BUG_ON(!list_empty(&io->list));
7582e54229SDmitry Monakhov 	BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
7682e54229SDmitry Monakhov 
77bd2d0210STheodore Ts'o 	if (io->page)
78bd2d0210STheodore Ts'o 		put_page(io->page);
7983668e71STheodore Ts'o 	for (i = 0; i < io->num_io_pages; i++)
8083668e71STheodore Ts'o 		put_io_page(io->pages[i]);
81bd2d0210STheodore Ts'o 	io->num_io_pages = 0;
824e298021STheodore Ts'o 	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
834e298021STheodore Ts'o 		wake_up_all(ext4_ioend_wq(io->inode));
84bd2d0210STheodore Ts'o 	kmem_cache_free(io_end_cachep, io);
85bd2d0210STheodore Ts'o }
86bd2d0210STheodore Ts'o 
87*28a535f9SDmitry Monakhov /* check a range of space and convert unwritten extents to written. */
88*28a535f9SDmitry Monakhov static int ext4_end_io(ext4_io_end_t *io)
89bd2d0210STheodore Ts'o {
90bd2d0210STheodore Ts'o 	struct inode *inode = io->inode;
91bd2d0210STheodore Ts'o 	loff_t offset = io->offset;
92bd2d0210STheodore Ts'o 	ssize_t size = io->size;
93bd2d0210STheodore Ts'o 	int ret = 0;
94bd2d0210STheodore Ts'o 
95bd2d0210STheodore Ts'o 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
96bd2d0210STheodore Ts'o 		   "list->prev 0x%p\n",
97bd2d0210STheodore Ts'o 		   io, inode->i_ino, io->list.next, io->list.prev);
98bd2d0210STheodore Ts'o 
99bd2d0210STheodore Ts'o 	ret = ext4_convert_unwritten_extents(inode, offset, size);
100bd2d0210STheodore Ts'o 	if (ret < 0) {
101b82e384cSTheodore Ts'o 		ext4_msg(inode->i_sb, KERN_EMERG,
102b82e384cSTheodore Ts'o 			 "failed to convert unwritten extents to written "
103b82e384cSTheodore Ts'o 			 "extents -- potential data loss!  "
104b82e384cSTheodore Ts'o 			 "(inode %lu, offset %llu, size %zd, error %d)",
105b82e384cSTheodore Ts'o 			 inode->i_ino, offset, size, ret);
106bd2d0210STheodore Ts'o 	}
107bd2d0210STheodore Ts'o 	if (io->iocb)
108bd2d0210STheodore Ts'o 		aio_complete(io->iocb, io->result, 0);
109b82e384cSTheodore Ts'o 
110266991b1SJeff Moyer 	if (io->flag & EXT4_IO_END_DIRECT)
111266991b1SJeff Moyer 		inode_dio_done(inode);
112e9e3bcecSEric Sandeen 	/* Wake up anyone waiting on unwritten extent conversion */
113e27f41e1SDmitry Monakhov 	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
1144e298021STheodore Ts'o 		wake_up_all(ext4_ioend_wq(io->inode));
115bd2d0210STheodore Ts'o 	return ret;
116bd2d0210STheodore Ts'o }
117bd2d0210STheodore Ts'o 
118*28a535f9SDmitry Monakhov static void dump_completed_IO(struct inode *inode)
119*28a535f9SDmitry Monakhov {
120*28a535f9SDmitry Monakhov #ifdef	EXT4FS_DEBUG
121*28a535f9SDmitry Monakhov 	struct list_head *cur, *before, *after;
122*28a535f9SDmitry Monakhov 	ext4_io_end_t *io, *io0, *io1;
123*28a535f9SDmitry Monakhov 	unsigned long flags;
124*28a535f9SDmitry Monakhov 
125*28a535f9SDmitry Monakhov 	if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
126*28a535f9SDmitry Monakhov 		ext4_debug("inode %lu completed_io list is empty\n",
127*28a535f9SDmitry Monakhov 			   inode->i_ino);
128*28a535f9SDmitry Monakhov 		return;
129*28a535f9SDmitry Monakhov 	}
130*28a535f9SDmitry Monakhov 
131*28a535f9SDmitry Monakhov 	ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
132*28a535f9SDmitry Monakhov 	list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
133*28a535f9SDmitry Monakhov 		cur = &io->list;
134*28a535f9SDmitry Monakhov 		before = cur->prev;
135*28a535f9SDmitry Monakhov 		io0 = container_of(before, ext4_io_end_t, list);
136*28a535f9SDmitry Monakhov 		after = cur->next;
137*28a535f9SDmitry Monakhov 		io1 = container_of(after, ext4_io_end_t, list);
138*28a535f9SDmitry Monakhov 
139*28a535f9SDmitry Monakhov 		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
140*28a535f9SDmitry Monakhov 			    io, inode->i_ino, io0, io1);
141*28a535f9SDmitry Monakhov 	}
142*28a535f9SDmitry Monakhov #endif
143*28a535f9SDmitry Monakhov }
144*28a535f9SDmitry Monakhov 
145*28a535f9SDmitry Monakhov /* Add the io_end to per-inode completed end_io list. */
146*28a535f9SDmitry Monakhov void ext4_add_complete_io(ext4_io_end_t *io_end)
147*28a535f9SDmitry Monakhov {
148*28a535f9SDmitry Monakhov 	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
149*28a535f9SDmitry Monakhov 	struct workqueue_struct *wq;
150*28a535f9SDmitry Monakhov 	unsigned long flags;
151*28a535f9SDmitry Monakhov 
152*28a535f9SDmitry Monakhov 	BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
153*28a535f9SDmitry Monakhov 	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
154*28a535f9SDmitry Monakhov 
155*28a535f9SDmitry Monakhov 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
156*28a535f9SDmitry Monakhov 	if (list_empty(&ei->i_completed_io_list)) {
157*28a535f9SDmitry Monakhov 		io_end->flag |= EXT4_IO_END_QUEUED;
158*28a535f9SDmitry Monakhov 		queue_work(wq, &io_end->work);
159*28a535f9SDmitry Monakhov 	}
160*28a535f9SDmitry Monakhov 	list_add_tail(&io_end->list, &ei->i_completed_io_list);
161*28a535f9SDmitry Monakhov 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
162*28a535f9SDmitry Monakhov }
163*28a535f9SDmitry Monakhov 
164*28a535f9SDmitry Monakhov static int ext4_do_flush_completed_IO(struct inode *inode,
165*28a535f9SDmitry Monakhov 				      ext4_io_end_t *work_io)
166*28a535f9SDmitry Monakhov {
167*28a535f9SDmitry Monakhov 	ext4_io_end_t *io;
168*28a535f9SDmitry Monakhov 	struct list_head unwritten, complete, to_free;
169*28a535f9SDmitry Monakhov 	unsigned long flags;
170*28a535f9SDmitry Monakhov 	struct ext4_inode_info *ei = EXT4_I(inode);
171*28a535f9SDmitry Monakhov 	int err, ret = 0;
172*28a535f9SDmitry Monakhov 
173*28a535f9SDmitry Monakhov 	INIT_LIST_HEAD(&complete);
174*28a535f9SDmitry Monakhov 	INIT_LIST_HEAD(&to_free);
175*28a535f9SDmitry Monakhov 
176*28a535f9SDmitry Monakhov 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
177*28a535f9SDmitry Monakhov 	dump_completed_IO(inode);
178*28a535f9SDmitry Monakhov 	list_replace_init(&ei->i_completed_io_list, &unwritten);
179*28a535f9SDmitry Monakhov 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
180*28a535f9SDmitry Monakhov 
181*28a535f9SDmitry Monakhov 	while (!list_empty(&unwritten)) {
182*28a535f9SDmitry Monakhov 		io = list_entry(unwritten.next, ext4_io_end_t, list);
183*28a535f9SDmitry Monakhov 		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
184*28a535f9SDmitry Monakhov 		list_del_init(&io->list);
185*28a535f9SDmitry Monakhov 
186*28a535f9SDmitry Monakhov 		err = ext4_end_io(io);
187*28a535f9SDmitry Monakhov 		if (unlikely(!ret && err))
188*28a535f9SDmitry Monakhov 			ret = err;
189*28a535f9SDmitry Monakhov 
190*28a535f9SDmitry Monakhov 		list_add_tail(&io->list, &complete);
191*28a535f9SDmitry Monakhov 	}
192*28a535f9SDmitry Monakhov 	/* It is important to update all flags for all end_io in one shot w/o
193*28a535f9SDmitry Monakhov 	 * dropping the lock.*/
194*28a535f9SDmitry Monakhov 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
195*28a535f9SDmitry Monakhov 	while (!list_empty(&complete)) {
196*28a535f9SDmitry Monakhov 		io = list_entry(complete.next, ext4_io_end_t, list);
197*28a535f9SDmitry Monakhov 		io->flag &= ~EXT4_IO_END_UNWRITTEN;
198*28a535f9SDmitry Monakhov 		/* end_io context can not be destroyed now because it still
199*28a535f9SDmitry Monakhov 		 * used by queued worker. Worker thread will destroy it later */
200*28a535f9SDmitry Monakhov 		if (io->flag & EXT4_IO_END_QUEUED)
201*28a535f9SDmitry Monakhov 			list_del_init(&io->list);
202*28a535f9SDmitry Monakhov 		else
203*28a535f9SDmitry Monakhov 			list_move(&io->list, &to_free);
204*28a535f9SDmitry Monakhov 	}
205*28a535f9SDmitry Monakhov 	/* If we are called from worker context, it is time to clear queued
206*28a535f9SDmitry Monakhov 	 * flag, and destroy it's end_io if it was converted already */
207*28a535f9SDmitry Monakhov 	if (work_io) {
208*28a535f9SDmitry Monakhov 		work_io->flag &= ~EXT4_IO_END_QUEUED;
209*28a535f9SDmitry Monakhov 		if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
210*28a535f9SDmitry Monakhov 			list_add_tail(&work_io->list, &to_free);
211*28a535f9SDmitry Monakhov 	}
212*28a535f9SDmitry Monakhov 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
213*28a535f9SDmitry Monakhov 
214*28a535f9SDmitry Monakhov 	while (!list_empty(&to_free)) {
215*28a535f9SDmitry Monakhov 		io = list_entry(to_free.next, ext4_io_end_t, list);
216*28a535f9SDmitry Monakhov 		list_del_init(&io->list);
217*28a535f9SDmitry Monakhov 		ext4_free_io_end(io);
218*28a535f9SDmitry Monakhov 	}
219*28a535f9SDmitry Monakhov 	return ret;
220*28a535f9SDmitry Monakhov }
221*28a535f9SDmitry Monakhov 
222bd2d0210STheodore Ts'o /*
223bd2d0210STheodore Ts'o  * work on completed aio dio IO, to convert unwritten extents to extents
224bd2d0210STheodore Ts'o  */
225bd2d0210STheodore Ts'o static void ext4_end_io_work(struct work_struct *work)
226bd2d0210STheodore Ts'o {
227bd2d0210STheodore Ts'o 	ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
228*28a535f9SDmitry Monakhov 	ext4_do_flush_completed_IO(io->inode, io);
229d73d5046STao Ma }
230d73d5046STao Ma 
231*28a535f9SDmitry Monakhov int ext4_flush_completed_IO(struct inode *inode)
232*28a535f9SDmitry Monakhov {
233*28a535f9SDmitry Monakhov 	return ext4_do_flush_completed_IO(inode, NULL);
234bd2d0210STheodore Ts'o }
235bd2d0210STheodore Ts'o 
236bd2d0210STheodore Ts'o ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
237bd2d0210STheodore Ts'o {
238b17b35ecSJesper Juhl 	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
239bd2d0210STheodore Ts'o 	if (io) {
240f7ad6d2eSTheodore Ts'o 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
241f7ad6d2eSTheodore Ts'o 		io->inode = inode;
242bd2d0210STheodore Ts'o 		INIT_WORK(&io->work, ext4_end_io_work);
243bd2d0210STheodore Ts'o 		INIT_LIST_HEAD(&io->list);
244bd2d0210STheodore Ts'o 	}
245bd2d0210STheodore Ts'o 	return io;
246bd2d0210STheodore Ts'o }
247bd2d0210STheodore Ts'o 
248bd2d0210STheodore Ts'o /*
249bd2d0210STheodore Ts'o  * Print an buffer I/O error compatible with the fs/buffer.c.  This
250bd2d0210STheodore Ts'o  * provides compatibility with dmesg scrapers that look for a specific
251bd2d0210STheodore Ts'o  * buffer I/O error message.  We really need a unified error reporting
252bd2d0210STheodore Ts'o  * structure to userspace ala Digital Unix's uerf system, but it's
253bd2d0210STheodore Ts'o  * probably not going to happen in my lifetime, due to LKML politics...
254bd2d0210STheodore Ts'o  */
255bd2d0210STheodore Ts'o static void buffer_io_error(struct buffer_head *bh)
256bd2d0210STheodore Ts'o {
257bd2d0210STheodore Ts'o 	char b[BDEVNAME_SIZE];
258bd2d0210STheodore Ts'o 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
259bd2d0210STheodore Ts'o 			bdevname(bh->b_bdev, b),
260bd2d0210STheodore Ts'o 			(unsigned long long)bh->b_blocknr);
261bd2d0210STheodore Ts'o }
262bd2d0210STheodore Ts'o 
263bd2d0210STheodore Ts'o static void ext4_end_bio(struct bio *bio, int error)
264bd2d0210STheodore Ts'o {
265bd2d0210STheodore Ts'o 	ext4_io_end_t *io_end = bio->bi_private;
266bd2d0210STheodore Ts'o 	struct inode *inode;
267bd2d0210STheodore Ts'o 	int i;
268d50bdd5aSCurt Wohlgemuth 	sector_t bi_sector = bio->bi_sector;
269bd2d0210STheodore Ts'o 
270bd2d0210STheodore Ts'o 	BUG_ON(!io_end);
271bd2d0210STheodore Ts'o 	bio->bi_private = NULL;
272bd2d0210STheodore Ts'o 	bio->bi_end_io = NULL;
273bd2d0210STheodore Ts'o 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
274bd2d0210STheodore Ts'o 		error = 0;
275bd2d0210STheodore Ts'o 	bio_put(bio);
276bd2d0210STheodore Ts'o 
277bd2d0210STheodore Ts'o 	for (i = 0; i < io_end->num_io_pages; i++) {
278bd2d0210STheodore Ts'o 		struct page *page = io_end->pages[i]->p_page;
279bd2d0210STheodore Ts'o 		struct buffer_head *bh, *head;
280bd2d0210STheodore Ts'o 		loff_t offset;
28139db00f1SCurt Wohlgemuth 		loff_t io_end_offset;
28239db00f1SCurt Wohlgemuth 
28339db00f1SCurt Wohlgemuth 		if (error) {
28439db00f1SCurt Wohlgemuth 			SetPageError(page);
28539db00f1SCurt Wohlgemuth 			set_bit(AS_EIO, &page->mapping->flags);
28639db00f1SCurt Wohlgemuth 			head = page_buffers(page);
28739db00f1SCurt Wohlgemuth 			BUG_ON(!head);
28839db00f1SCurt Wohlgemuth 
28939db00f1SCurt Wohlgemuth 			io_end_offset = io_end->offset + io_end->size;
290bd2d0210STheodore Ts'o 
291bd2d0210STheodore Ts'o 			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
292bd2d0210STheodore Ts'o 			bh = head;
293bd2d0210STheodore Ts'o 			do {
294bd2d0210STheodore Ts'o 				if ((offset >= io_end->offset) &&
29539db00f1SCurt Wohlgemuth 				    (offset+bh->b_size <= io_end_offset))
296bd2d0210STheodore Ts'o 					buffer_io_error(bh);
297bd2d0210STheodore Ts'o 
298bd2d0210STheodore Ts'o 				offset += bh->b_size;
299bd2d0210STheodore Ts'o 				bh = bh->b_this_page;
300bd2d0210STheodore Ts'o 			} while (bh != head);
301bd2d0210STheodore Ts'o 		}
302bd2d0210STheodore Ts'o 
3036268b325SLinus Torvalds 		put_io_page(io_end->pages[i]);
304bd2d0210STheodore Ts'o 	}
3056268b325SLinus Torvalds 	io_end->num_io_pages = 0;
306f7ad6d2eSTheodore Ts'o 	inode = io_end->inode;
307f7ad6d2eSTheodore Ts'o 
308f7ad6d2eSTheodore Ts'o 	if (error) {
309f7ad6d2eSTheodore Ts'o 		io_end->flag |= EXT4_IO_END_ERROR;
310f7ad6d2eSTheodore Ts'o 		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
311f7ad6d2eSTheodore Ts'o 			     "(offset %llu size %ld starting block %llu)",
312f7ad6d2eSTheodore Ts'o 			     inode->i_ino,
313f7ad6d2eSTheodore Ts'o 			     (unsigned long long) io_end->offset,
314f7ad6d2eSTheodore Ts'o 			     (long) io_end->size,
315f7ad6d2eSTheodore Ts'o 			     (unsigned long long)
316d50bdd5aSCurt Wohlgemuth 			     bi_sector >> (inode->i_blkbits - 9));
317f7ad6d2eSTheodore Ts'o 	}
318bd2d0210STheodore Ts'o 
319b6168443STheodore Ts'o 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
320b6168443STheodore Ts'o 		ext4_free_io_end(io_end);
321b6168443STheodore Ts'o 		return;
322b6168443STheodore Ts'o 	}
323b6168443STheodore Ts'o 
324*28a535f9SDmitry Monakhov 	ext4_add_complete_io(io_end);
325bd2d0210STheodore Ts'o }
326bd2d0210STheodore Ts'o 
327bd2d0210STheodore Ts'o void ext4_io_submit(struct ext4_io_submit *io)
328bd2d0210STheodore Ts'o {
329bd2d0210STheodore Ts'o 	struct bio *bio = io->io_bio;
330bd2d0210STheodore Ts'o 
331bd2d0210STheodore Ts'o 	if (bio) {
332bd2d0210STheodore Ts'o 		bio_get(io->io_bio);
333bd2d0210STheodore Ts'o 		submit_bio(io->io_op, io->io_bio);
334bd2d0210STheodore Ts'o 		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
335bd2d0210STheodore Ts'o 		bio_put(io->io_bio);
336bd2d0210STheodore Ts'o 	}
3377dc57615SPeter Huewe 	io->io_bio = NULL;
338bd2d0210STheodore Ts'o 	io->io_op = 0;
3397dc57615SPeter Huewe 	io->io_end = NULL;
340bd2d0210STheodore Ts'o }
341bd2d0210STheodore Ts'o 
342bd2d0210STheodore Ts'o static int io_submit_init(struct ext4_io_submit *io,
343bd2d0210STheodore Ts'o 			  struct inode *inode,
344bd2d0210STheodore Ts'o 			  struct writeback_control *wbc,
345bd2d0210STheodore Ts'o 			  struct buffer_head *bh)
346bd2d0210STheodore Ts'o {
347bd2d0210STheodore Ts'o 	ext4_io_end_t *io_end;
348bd2d0210STheodore Ts'o 	struct page *page = bh->b_page;
349bd2d0210STheodore Ts'o 	int nvecs = bio_get_nr_vecs(bh->b_bdev);
350bd2d0210STheodore Ts'o 	struct bio *bio;
351bd2d0210STheodore Ts'o 
352bd2d0210STheodore Ts'o 	io_end = ext4_init_io_end(inode, GFP_NOFS);
353bd2d0210STheodore Ts'o 	if (!io_end)
354bd2d0210STheodore Ts'o 		return -ENOMEM;
355275d3ba6STheodore Ts'o 	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
356bd2d0210STheodore Ts'o 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
357bd2d0210STheodore Ts'o 	bio->bi_bdev = bh->b_bdev;
358bd2d0210STheodore Ts'o 	bio->bi_private = io->io_end = io_end;
359bd2d0210STheodore Ts'o 	bio->bi_end_io = ext4_end_bio;
360bd2d0210STheodore Ts'o 
361bd2d0210STheodore Ts'o 	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
362bd2d0210STheodore Ts'o 
363bd2d0210STheodore Ts'o 	io->io_bio = bio;
364721a9602SJens Axboe 	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
365bd2d0210STheodore Ts'o 	io->io_next_block = bh->b_blocknr;
366bd2d0210STheodore Ts'o 	return 0;
367bd2d0210STheodore Ts'o }
368bd2d0210STheodore Ts'o 
369bd2d0210STheodore Ts'o static int io_submit_add_bh(struct ext4_io_submit *io,
370bd2d0210STheodore Ts'o 			    struct ext4_io_page *io_page,
371bd2d0210STheodore Ts'o 			    struct inode *inode,
372bd2d0210STheodore Ts'o 			    struct writeback_control *wbc,
373bd2d0210STheodore Ts'o 			    struct buffer_head *bh)
374bd2d0210STheodore Ts'o {
375bd2d0210STheodore Ts'o 	ext4_io_end_t *io_end;
376bd2d0210STheodore Ts'o 	int ret;
377bd2d0210STheodore Ts'o 
378bd2d0210STheodore Ts'o 	if (buffer_new(bh)) {
379bd2d0210STheodore Ts'o 		clear_buffer_new(bh);
380bd2d0210STheodore Ts'o 		unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
381bd2d0210STheodore Ts'o 	}
382bd2d0210STheodore Ts'o 
383bd2d0210STheodore Ts'o 	if (!buffer_mapped(bh) || buffer_delay(bh)) {
384bd2d0210STheodore Ts'o 		if (!buffer_mapped(bh))
385bd2d0210STheodore Ts'o 			clear_buffer_dirty(bh);
386bd2d0210STheodore Ts'o 		if (io->io_bio)
387bd2d0210STheodore Ts'o 			ext4_io_submit(io);
388bd2d0210STheodore Ts'o 		return 0;
389bd2d0210STheodore Ts'o 	}
390bd2d0210STheodore Ts'o 
391bd2d0210STheodore Ts'o 	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
392bd2d0210STheodore Ts'o submit_and_retry:
393bd2d0210STheodore Ts'o 		ext4_io_submit(io);
394bd2d0210STheodore Ts'o 	}
395bd2d0210STheodore Ts'o 	if (io->io_bio == NULL) {
396bd2d0210STheodore Ts'o 		ret = io_submit_init(io, inode, wbc, bh);
397bd2d0210STheodore Ts'o 		if (ret)
398bd2d0210STheodore Ts'o 			return ret;
399bd2d0210STheodore Ts'o 	}
400bd2d0210STheodore Ts'o 	io_end = io->io_end;
401bd2d0210STheodore Ts'o 	if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
402bd2d0210STheodore Ts'o 	    (io_end->pages[io_end->num_io_pages-1] != io_page))
403bd2d0210STheodore Ts'o 		goto submit_and_retry;
4040edeb71dSTao Ma 	if (buffer_uninit(bh))
4050edeb71dSTao Ma 		ext4_set_io_unwritten_flag(inode, io_end);
406bd2d0210STheodore Ts'o 	io->io_end->size += bh->b_size;
407bd2d0210STheodore Ts'o 	io->io_next_block++;
408bd2d0210STheodore Ts'o 	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
409bd2d0210STheodore Ts'o 	if (ret != bh->b_size)
410bd2d0210STheodore Ts'o 		goto submit_and_retry;
411bd2d0210STheodore Ts'o 	if ((io_end->num_io_pages == 0) ||
412bd2d0210STheodore Ts'o 	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
413bd2d0210STheodore Ts'o 		io_end->pages[io_end->num_io_pages++] = io_page;
41483668e71STheodore Ts'o 		atomic_inc(&io_page->p_count);
415bd2d0210STheodore Ts'o 	}
416bd2d0210STheodore Ts'o 	return 0;
417bd2d0210STheodore Ts'o }
418bd2d0210STheodore Ts'o 
419bd2d0210STheodore Ts'o int ext4_bio_write_page(struct ext4_io_submit *io,
420bd2d0210STheodore Ts'o 			struct page *page,
421bd2d0210STheodore Ts'o 			int len,
422bd2d0210STheodore Ts'o 			struct writeback_control *wbc)
423bd2d0210STheodore Ts'o {
424bd2d0210STheodore Ts'o 	struct inode *inode = page->mapping->host;
425bd2d0210STheodore Ts'o 	unsigned block_start, block_end, blocksize;
426bd2d0210STheodore Ts'o 	struct ext4_io_page *io_page;
427bd2d0210STheodore Ts'o 	struct buffer_head *bh, *head;
428bd2d0210STheodore Ts'o 	int ret = 0;
429bd2d0210STheodore Ts'o 
430bd2d0210STheodore Ts'o 	blocksize = 1 << inode->i_blkbits;
431bd2d0210STheodore Ts'o 
432d50bdd5aSCurt Wohlgemuth 	BUG_ON(!PageLocked(page));
433bd2d0210STheodore Ts'o 	BUG_ON(PageWriteback(page));
434bd2d0210STheodore Ts'o 
435bd2d0210STheodore Ts'o 	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
436bd2d0210STheodore Ts'o 	if (!io_page) {
437bd2d0210STheodore Ts'o 		set_page_dirty(page);
438bd2d0210STheodore Ts'o 		unlock_page(page);
439bd2d0210STheodore Ts'o 		return -ENOMEM;
440bd2d0210STheodore Ts'o 	}
441bd2d0210STheodore Ts'o 	io_page->p_page = page;
44283668e71STheodore Ts'o 	atomic_set(&io_page->p_count, 1);
443bd2d0210STheodore Ts'o 	get_page(page);
444a54aa761STheodore Ts'o 	set_page_writeback(page);
445a54aa761STheodore Ts'o 	ClearPageError(page);
446bd2d0210STheodore Ts'o 
447bd2d0210STheodore Ts'o 	for (bh = head = page_buffers(page), block_start = 0;
448bd2d0210STheodore Ts'o 	     bh != head || !block_start;
449bd2d0210STheodore Ts'o 	     block_start = block_end, bh = bh->b_this_page) {
450d50bdd5aSCurt Wohlgemuth 
451bd2d0210STheodore Ts'o 		block_end = block_start + blocksize;
452bd2d0210STheodore Ts'o 		if (block_start >= len) {
4535a0dc736SYongqiang Yang 			/*
4545a0dc736SYongqiang Yang 			 * Comments copied from block_write_full_page_endio:
4555a0dc736SYongqiang Yang 			 *
4565a0dc736SYongqiang Yang 			 * The page straddles i_size.  It must be zeroed out on
4575a0dc736SYongqiang Yang 			 * each and every writepage invocation because it may
4585a0dc736SYongqiang Yang 			 * be mmapped.  "A file is mapped in multiples of the
4595a0dc736SYongqiang Yang 			 * page size.  For a file that is not a multiple of
4605a0dc736SYongqiang Yang 			 * the  page size, the remaining memory is zeroed when
4615a0dc736SYongqiang Yang 			 * mapped, and writes to that region are not written
4625a0dc736SYongqiang Yang 			 * out to the file."
4635a0dc736SYongqiang Yang 			 */
4645a0dc736SYongqiang Yang 			zero_user_segment(page, block_start, block_end);
465bd2d0210STheodore Ts'o 			clear_buffer_dirty(bh);
466bd2d0210STheodore Ts'o 			set_buffer_uptodate(bh);
467bd2d0210STheodore Ts'o 			continue;
468bd2d0210STheodore Ts'o 		}
469d50bdd5aSCurt Wohlgemuth 		clear_buffer_dirty(bh);
470bd2d0210STheodore Ts'o 		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
471bd2d0210STheodore Ts'o 		if (ret) {
472bd2d0210STheodore Ts'o 			/*
473bd2d0210STheodore Ts'o 			 * We only get here on ENOMEM.  Not much else
474bd2d0210STheodore Ts'o 			 * we can do but mark the page as dirty, and
475bd2d0210STheodore Ts'o 			 * better luck next time.
476bd2d0210STheodore Ts'o 			 */
477bd2d0210STheodore Ts'o 			set_page_dirty(page);
478bd2d0210STheodore Ts'o 			break;
479bd2d0210STheodore Ts'o 		}
480bd2d0210STheodore Ts'o 	}
481bd2d0210STheodore Ts'o 	unlock_page(page);
482bd2d0210STheodore Ts'o 	/*
483bd2d0210STheodore Ts'o 	 * If the page was truncated before we could do the writeback,
484bd2d0210STheodore Ts'o 	 * or we had a memory allocation error while trying to write
485bd2d0210STheodore Ts'o 	 * the first buffer head, we won't have submitted any pages for
486bd2d0210STheodore Ts'o 	 * I/O.  In that case we need to make sure we've cleared the
487bd2d0210STheodore Ts'o 	 * PageWriteback bit from the page to prevent the system from
488bd2d0210STheodore Ts'o 	 * wedging later on.
489bd2d0210STheodore Ts'o 	 */
49083668e71STheodore Ts'o 	put_io_page(io_page);
491bd2d0210STheodore Ts'o 	return ret;
492bd2d0210STheodore Ts'o }
493