xref: /linux/fs/ext4/page-io.c (revision 068df0f34e81bc06c5eb5012ec2eda25624e87aa)
1 /*
2  * linux/fs/ext4/page-io.c
3  *
4  * This contains the new page_io functions for ext4
5  *
6  * Written by Theodore Ts'o, 2010.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/time.h>
12 #include <linux/jbd2.h>
13 #include <linux/highuid.h>
14 #include <linux/pagemap.h>
15 #include <linux/quotaops.h>
16 #include <linux/string.h>
17 #include <linux/buffer_head.h>
18 #include <linux/writeback.h>
19 #include <linux/pagevec.h>
20 #include <linux/mpage.h>
21 #include <linux/namei.h>
22 #include <linux/uio.h>
23 #include <linux/bio.h>
24 #include <linux/workqueue.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 
28 #include "ext4_jbd2.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "ext4_extents.h"
32 
33 static struct kmem_cache *io_page_cachep, *io_end_cachep;
34 
35 int __init ext4_init_pageio(void)
36 {
37 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
38 	if (io_page_cachep == NULL)
39 		return -ENOMEM;
40 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
41 	if (io_end_cachep == NULL) {
42 		kmem_cache_destroy(io_page_cachep);
43 		return -ENOMEM;
44 	}
45 	return 0;
46 }
47 
48 void ext4_exit_pageio(void)
49 {
50 	kmem_cache_destroy(io_end_cachep);
51 	kmem_cache_destroy(io_page_cachep);
52 }
53 
54 void ext4_ioend_wait(struct inode *inode)
55 {
56 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
57 
58 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
59 }
60 
61 static void put_io_page(struct ext4_io_page *io_page)
62 {
63 	if (atomic_dec_and_test(&io_page->p_count)) {
64 		end_page_writeback(io_page->p_page);
65 		put_page(io_page->p_page);
66 		kmem_cache_free(io_page_cachep, io_page);
67 	}
68 }
69 
70 void ext4_free_io_end(ext4_io_end_t *io)
71 {
72 	int i;
73 
74 	BUG_ON(!io);
75 	if (io->page)
76 		put_page(io->page);
77 	for (i = 0; i < io->num_io_pages; i++)
78 		put_io_page(io->pages[i]);
79 	io->num_io_pages = 0;
80 	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
81 		wake_up_all(ext4_ioend_wq(io->inode));
82 	kmem_cache_free(io_end_cachep, io);
83 }
84 
85 /*
86  * check a range of space and convert unwritten extents to written.
87  *
88  * Called with inode->i_mutex; we depend on this when we manipulate
89  * io->flag, since we could otherwise race with ext4_flush_completed_IO()
90  */
91 int ext4_end_io_nolock(ext4_io_end_t *io)
92 {
93 	struct inode *inode = io->inode;
94 	loff_t offset = io->offset;
95 	ssize_t size = io->size;
96 	int ret = 0;
97 
98 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
99 		   "list->prev 0x%p\n",
100 		   io, inode->i_ino, io->list.next, io->list.prev);
101 
102 	ret = ext4_convert_unwritten_extents(inode, offset, size);
103 	if (ret < 0) {
104 		ext4_msg(inode->i_sb, KERN_EMERG,
105 			 "failed to convert unwritten extents to written "
106 			 "extents -- potential data loss!  "
107 			 "(inode %lu, offset %llu, size %zd, error %d)",
108 			 inode->i_ino, offset, size, ret);
109 	}
110 
111 	if (io->iocb)
112 		aio_complete(io->iocb, io->result, 0);
113 
114 	/* Wake up anyone waiting on unwritten extent conversion */
115 	if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
116 		wake_up_all(ext4_ioend_wq(io->inode));
117 	return ret;
118 }
119 
120 /*
121  * work on completed aio dio IO, to convert unwritten extents to extents
122  */
123 static void ext4_end_io_work(struct work_struct *work)
124 {
125 	ext4_io_end_t		*io = container_of(work, ext4_io_end_t, work);
126 	struct inode		*inode = io->inode;
127 	struct ext4_inode_info	*ei = EXT4_I(inode);
128 	unsigned long		flags;
129 
130 	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
131 	if (list_empty(&io->list)) {
132 		spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
133 		goto free;
134 	}
135 
136 	if (!mutex_trylock(&inode->i_mutex)) {
137 		spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
138 		/*
139 		 * Requeue the work instead of waiting so that the work
140 		 * items queued after this can be processed.
141 		 */
142 		queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
143 		/*
144 		 * To prevent the ext4-dio-unwritten thread from keeping
145 		 * requeueing end_io requests and occupying cpu for too long,
146 		 * yield the cpu if it sees an end_io request that has already
147 		 * been requeued.
148 		 */
149 		if (io->flag & EXT4_IO_END_QUEUED)
150 			yield();
151 		io->flag |= EXT4_IO_END_QUEUED;
152 		return;
153 	}
154 	list_del_init(&io->list);
155 	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
156 	(void) ext4_end_io_nolock(io);
157 	mutex_unlock(&inode->i_mutex);
158 free:
159 	ext4_free_io_end(io);
160 }
161 
162 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
163 {
164 	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
165 	if (io) {
166 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
167 		io->inode = inode;
168 		INIT_WORK(&io->work, ext4_end_io_work);
169 		INIT_LIST_HEAD(&io->list);
170 	}
171 	return io;
172 }
173 
174 /*
175  * Print an buffer I/O error compatible with the fs/buffer.c.  This
176  * provides compatibility with dmesg scrapers that look for a specific
177  * buffer I/O error message.  We really need a unified error reporting
178  * structure to userspace ala Digital Unix's uerf system, but it's
179  * probably not going to happen in my lifetime, due to LKML politics...
180  */
181 static void buffer_io_error(struct buffer_head *bh)
182 {
183 	char b[BDEVNAME_SIZE];
184 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
185 			bdevname(bh->b_bdev, b),
186 			(unsigned long long)bh->b_blocknr);
187 }
188 
189 static void ext4_end_bio(struct bio *bio, int error)
190 {
191 	ext4_io_end_t *io_end = bio->bi_private;
192 	struct workqueue_struct *wq;
193 	struct inode *inode;
194 	unsigned long flags;
195 	int i;
196 	sector_t bi_sector = bio->bi_sector;
197 
198 	BUG_ON(!io_end);
199 	bio->bi_private = NULL;
200 	bio->bi_end_io = NULL;
201 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
202 		error = 0;
203 	bio_put(bio);
204 
205 	for (i = 0; i < io_end->num_io_pages; i++) {
206 		struct page *page = io_end->pages[i]->p_page;
207 		struct buffer_head *bh, *head;
208 		loff_t offset;
209 		loff_t io_end_offset;
210 
211 		if (error) {
212 			SetPageError(page);
213 			set_bit(AS_EIO, &page->mapping->flags);
214 			head = page_buffers(page);
215 			BUG_ON(!head);
216 
217 			io_end_offset = io_end->offset + io_end->size;
218 
219 			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
220 			bh = head;
221 			do {
222 				if ((offset >= io_end->offset) &&
223 				    (offset+bh->b_size <= io_end_offset))
224 					buffer_io_error(bh);
225 
226 				offset += bh->b_size;
227 				bh = bh->b_this_page;
228 			} while (bh != head);
229 		}
230 
231 		put_io_page(io_end->pages[i]);
232 	}
233 	io_end->num_io_pages = 0;
234 	inode = io_end->inode;
235 
236 	if (error) {
237 		io_end->flag |= EXT4_IO_END_ERROR;
238 		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
239 			     "(offset %llu size %ld starting block %llu)",
240 			     inode->i_ino,
241 			     (unsigned long long) io_end->offset,
242 			     (long) io_end->size,
243 			     (unsigned long long)
244 			     bi_sector >> (inode->i_blkbits - 9));
245 	}
246 
247 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
248 		ext4_free_io_end(io_end);
249 		return;
250 	}
251 
252 	/* Add the io_end to per-inode completed io list*/
253 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
254 	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
255 	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
256 
257 	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
258 	/* queue the work to convert unwritten extents to written */
259 	queue_work(wq, &io_end->work);
260 }
261 
262 void ext4_io_submit(struct ext4_io_submit *io)
263 {
264 	struct bio *bio = io->io_bio;
265 
266 	if (bio) {
267 		bio_get(io->io_bio);
268 		submit_bio(io->io_op, io->io_bio);
269 		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
270 		bio_put(io->io_bio);
271 	}
272 	io->io_bio = NULL;
273 	io->io_op = 0;
274 	io->io_end = NULL;
275 }
276 
277 static int io_submit_init(struct ext4_io_submit *io,
278 			  struct inode *inode,
279 			  struct writeback_control *wbc,
280 			  struct buffer_head *bh)
281 {
282 	ext4_io_end_t *io_end;
283 	struct page *page = bh->b_page;
284 	int nvecs = bio_get_nr_vecs(bh->b_bdev);
285 	struct bio *bio;
286 
287 	io_end = ext4_init_io_end(inode, GFP_NOFS);
288 	if (!io_end)
289 		return -ENOMEM;
290 	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
291 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
292 	bio->bi_bdev = bh->b_bdev;
293 	bio->bi_private = io->io_end = io_end;
294 	bio->bi_end_io = ext4_end_bio;
295 
296 	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
297 
298 	io->io_bio = bio;
299 	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
300 	io->io_next_block = bh->b_blocknr;
301 	return 0;
302 }
303 
304 static int io_submit_add_bh(struct ext4_io_submit *io,
305 			    struct ext4_io_page *io_page,
306 			    struct inode *inode,
307 			    struct writeback_control *wbc,
308 			    struct buffer_head *bh)
309 {
310 	ext4_io_end_t *io_end;
311 	int ret;
312 
313 	if (buffer_new(bh)) {
314 		clear_buffer_new(bh);
315 		unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
316 	}
317 
318 	if (!buffer_mapped(bh) || buffer_delay(bh)) {
319 		if (!buffer_mapped(bh))
320 			clear_buffer_dirty(bh);
321 		if (io->io_bio)
322 			ext4_io_submit(io);
323 		return 0;
324 	}
325 
326 	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
327 submit_and_retry:
328 		ext4_io_submit(io);
329 	}
330 	if (io->io_bio == NULL) {
331 		ret = io_submit_init(io, inode, wbc, bh);
332 		if (ret)
333 			return ret;
334 	}
335 	io_end = io->io_end;
336 	if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
337 	    (io_end->pages[io_end->num_io_pages-1] != io_page))
338 		goto submit_and_retry;
339 	if (buffer_uninit(bh))
340 		ext4_set_io_unwritten_flag(inode, io_end);
341 	io->io_end->size += bh->b_size;
342 	io->io_next_block++;
343 	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
344 	if (ret != bh->b_size)
345 		goto submit_and_retry;
346 	if ((io_end->num_io_pages == 0) ||
347 	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
348 		io_end->pages[io_end->num_io_pages++] = io_page;
349 		atomic_inc(&io_page->p_count);
350 	}
351 	return 0;
352 }
353 
354 int ext4_bio_write_page(struct ext4_io_submit *io,
355 			struct page *page,
356 			int len,
357 			struct writeback_control *wbc)
358 {
359 	struct inode *inode = page->mapping->host;
360 	unsigned block_start, block_end, blocksize;
361 	struct ext4_io_page *io_page;
362 	struct buffer_head *bh, *head;
363 	int ret = 0;
364 
365 	blocksize = 1 << inode->i_blkbits;
366 
367 	BUG_ON(!PageLocked(page));
368 	BUG_ON(PageWriteback(page));
369 
370 	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
371 	if (!io_page) {
372 		set_page_dirty(page);
373 		unlock_page(page);
374 		return -ENOMEM;
375 	}
376 	io_page->p_page = page;
377 	atomic_set(&io_page->p_count, 1);
378 	get_page(page);
379 	set_page_writeback(page);
380 	ClearPageError(page);
381 
382 	for (bh = head = page_buffers(page), block_start = 0;
383 	     bh != head || !block_start;
384 	     block_start = block_end, bh = bh->b_this_page) {
385 
386 		block_end = block_start + blocksize;
387 		if (block_start >= len) {
388 			clear_buffer_dirty(bh);
389 			set_buffer_uptodate(bh);
390 			continue;
391 		}
392 		clear_buffer_dirty(bh);
393 		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
394 		if (ret) {
395 			/*
396 			 * We only get here on ENOMEM.  Not much else
397 			 * we can do but mark the page as dirty, and
398 			 * better luck next time.
399 			 */
400 			set_page_dirty(page);
401 			break;
402 		}
403 	}
404 	unlock_page(page);
405 	/*
406 	 * If the page was truncated before we could do the writeback,
407 	 * or we had a memory allocation error while trying to write
408 	 * the first buffer head, we won't have submitted any pages for
409 	 * I/O.  In that case we need to make sure we've cleared the
410 	 * PageWriteback bit from the page to prevent the system from
411 	 * wedging later on.
412 	 */
413 	put_io_page(io_page);
414 	return ret;
415 }
416