xref: /linux/fs/xfs/xfs_aops.c (revision 84803fb78237014cbbc86c0f012b273a199f4691)
1c59d87c4SChristoph Hellwig /*
2c59d87c4SChristoph Hellwig  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3c59d87c4SChristoph Hellwig  * All Rights Reserved.
4c59d87c4SChristoph Hellwig  *
5c59d87c4SChristoph Hellwig  * This program is free software; you can redistribute it and/or
6c59d87c4SChristoph Hellwig  * modify it under the terms of the GNU General Public License as
7c59d87c4SChristoph Hellwig  * published by the Free Software Foundation.
8c59d87c4SChristoph Hellwig  *
9c59d87c4SChristoph Hellwig  * This program is distributed in the hope that it would be useful,
10c59d87c4SChristoph Hellwig  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11c59d87c4SChristoph Hellwig  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12c59d87c4SChristoph Hellwig  * GNU General Public License for more details.
13c59d87c4SChristoph Hellwig  *
14c59d87c4SChristoph Hellwig  * You should have received a copy of the GNU General Public License
15c59d87c4SChristoph Hellwig  * along with this program; if not, write the Free Software Foundation,
16c59d87c4SChristoph Hellwig  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17c59d87c4SChristoph Hellwig  */
18c59d87c4SChristoph Hellwig #include "xfs.h"
19c59d87c4SChristoph Hellwig #include "xfs_bit.h"
20c59d87c4SChristoph Hellwig #include "xfs_log.h"
21c59d87c4SChristoph Hellwig #include "xfs_inum.h"
22c59d87c4SChristoph Hellwig #include "xfs_sb.h"
23c59d87c4SChristoph Hellwig #include "xfs_ag.h"
24c59d87c4SChristoph Hellwig #include "xfs_trans.h"
25c59d87c4SChristoph Hellwig #include "xfs_mount.h"
26c59d87c4SChristoph Hellwig #include "xfs_bmap_btree.h"
27c59d87c4SChristoph Hellwig #include "xfs_dinode.h"
28c59d87c4SChristoph Hellwig #include "xfs_inode.h"
29c59d87c4SChristoph Hellwig #include "xfs_alloc.h"
30c59d87c4SChristoph Hellwig #include "xfs_error.h"
31c59d87c4SChristoph Hellwig #include "xfs_rw.h"
32c59d87c4SChristoph Hellwig #include "xfs_iomap.h"
33c59d87c4SChristoph Hellwig #include "xfs_vnodeops.h"
34c59d87c4SChristoph Hellwig #include "xfs_trace.h"
35c59d87c4SChristoph Hellwig #include "xfs_bmap.h"
36c59d87c4SChristoph Hellwig #include <linux/gfp.h>
37c59d87c4SChristoph Hellwig #include <linux/mpage.h>
38c59d87c4SChristoph Hellwig #include <linux/pagevec.h>
39c59d87c4SChristoph Hellwig #include <linux/writeback.h>
40c59d87c4SChristoph Hellwig 
41c59d87c4SChristoph Hellwig void
42c59d87c4SChristoph Hellwig xfs_count_page_state(
43c59d87c4SChristoph Hellwig 	struct page		*page,
44c59d87c4SChristoph Hellwig 	int			*delalloc,
45c59d87c4SChristoph Hellwig 	int			*unwritten)
46c59d87c4SChristoph Hellwig {
47c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
48c59d87c4SChristoph Hellwig 
49c59d87c4SChristoph Hellwig 	*delalloc = *unwritten = 0;
50c59d87c4SChristoph Hellwig 
51c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
52c59d87c4SChristoph Hellwig 	do {
53c59d87c4SChristoph Hellwig 		if (buffer_unwritten(bh))
54c59d87c4SChristoph Hellwig 			(*unwritten) = 1;
55c59d87c4SChristoph Hellwig 		else if (buffer_delay(bh))
56c59d87c4SChristoph Hellwig 			(*delalloc) = 1;
57c59d87c4SChristoph Hellwig 	} while ((bh = bh->b_this_page) != head);
58c59d87c4SChristoph Hellwig }
59c59d87c4SChristoph Hellwig 
60c59d87c4SChristoph Hellwig STATIC struct block_device *
61c59d87c4SChristoph Hellwig xfs_find_bdev_for_inode(
62c59d87c4SChristoph Hellwig 	struct inode		*inode)
63c59d87c4SChristoph Hellwig {
64c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
65c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
66c59d87c4SChristoph Hellwig 
67c59d87c4SChristoph Hellwig 	if (XFS_IS_REALTIME_INODE(ip))
68c59d87c4SChristoph Hellwig 		return mp->m_rtdev_targp->bt_bdev;
69c59d87c4SChristoph Hellwig 	else
70c59d87c4SChristoph Hellwig 		return mp->m_ddev_targp->bt_bdev;
71c59d87c4SChristoph Hellwig }
72c59d87c4SChristoph Hellwig 
73c59d87c4SChristoph Hellwig /*
74c59d87c4SChristoph Hellwig  * We're now finished for good with this ioend structure.
75c59d87c4SChristoph Hellwig  * Update the page state via the associated buffer_heads,
76c59d87c4SChristoph Hellwig  * release holds on the inode and bio, and finally free
77c59d87c4SChristoph Hellwig  * up memory.  Do not use the ioend after this.
78c59d87c4SChristoph Hellwig  */
79c59d87c4SChristoph Hellwig STATIC void
80c59d87c4SChristoph Hellwig xfs_destroy_ioend(
81c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend)
82c59d87c4SChristoph Hellwig {
83c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *next;
84c59d87c4SChristoph Hellwig 
85c59d87c4SChristoph Hellwig 	for (bh = ioend->io_buffer_head; bh; bh = next) {
86c59d87c4SChristoph Hellwig 		next = bh->b_private;
87c59d87c4SChristoph Hellwig 		bh->b_end_io(bh, !ioend->io_error);
88c59d87c4SChristoph Hellwig 	}
89c59d87c4SChristoph Hellwig 
90c859cdd1SChristoph Hellwig 	if (ioend->io_iocb) {
9104f658eeSChristoph Hellwig 		if (ioend->io_isasync) {
9204f658eeSChristoph Hellwig 			aio_complete(ioend->io_iocb, ioend->io_error ?
9304f658eeSChristoph Hellwig 					ioend->io_error : ioend->io_result, 0);
9404f658eeSChristoph Hellwig 		}
95c859cdd1SChristoph Hellwig 		inode_dio_done(ioend->io_inode);
96c859cdd1SChristoph Hellwig 	}
974a06fd26SChristoph Hellwig 
98c59d87c4SChristoph Hellwig 	mempool_free(ioend, xfs_ioend_pool);
99c59d87c4SChristoph Hellwig }
100c59d87c4SChristoph Hellwig 
101c59d87c4SChristoph Hellwig /*
102fc0063c4SChristoph Hellwig  * Fast and loose check if this write could update the on-disk inode size.
103fc0063c4SChristoph Hellwig  */
104fc0063c4SChristoph Hellwig static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
105fc0063c4SChristoph Hellwig {
106fc0063c4SChristoph Hellwig 	return ioend->io_offset + ioend->io_size >
107fc0063c4SChristoph Hellwig 		XFS_I(ioend->io_inode)->i_d.di_size;
108fc0063c4SChristoph Hellwig }
109fc0063c4SChristoph Hellwig 
110fc0063c4SChristoph Hellwig /*
1112813d682SChristoph Hellwig  * Update on-disk file size now that data has been written to disk.
112c59d87c4SChristoph Hellwig  */
113aa6bf01dSChristoph Hellwig STATIC void
114c59d87c4SChristoph Hellwig xfs_setfilesize(
115aa6bf01dSChristoph Hellwig 	struct xfs_ioend	*ioend)
116c59d87c4SChristoph Hellwig {
117aa6bf01dSChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
118c59d87c4SChristoph Hellwig 	xfs_fsize_t		isize;
119c59d87c4SChristoph Hellwig 
120aa6bf01dSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1216923e686SChristoph Hellwig 	isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
122c59d87c4SChristoph Hellwig 	if (isize) {
123c59d87c4SChristoph Hellwig 		trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
124c59d87c4SChristoph Hellwig 		ip->i_d.di_size = isize;
125c59d87c4SChristoph Hellwig 		xfs_mark_inode_dirty(ip);
126c59d87c4SChristoph Hellwig 	}
127c59d87c4SChristoph Hellwig 
128c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
129c59d87c4SChristoph Hellwig }
130c59d87c4SChristoph Hellwig 
131c59d87c4SChristoph Hellwig /*
132c59d87c4SChristoph Hellwig  * Schedule IO completion handling on the final put of an ioend.
133fc0063c4SChristoph Hellwig  *
134fc0063c4SChristoph Hellwig  * If there is no work to do we might as well call it a day and free the
135fc0063c4SChristoph Hellwig  * ioend right now.
136c59d87c4SChristoph Hellwig  */
137c59d87c4SChristoph Hellwig STATIC void
138c59d87c4SChristoph Hellwig xfs_finish_ioend(
139c59d87c4SChristoph Hellwig 	struct xfs_ioend	*ioend)
140c59d87c4SChristoph Hellwig {
141c59d87c4SChristoph Hellwig 	if (atomic_dec_and_test(&ioend->io_remaining)) {
142aa6bf01dSChristoph Hellwig 		struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
143aa6bf01dSChristoph Hellwig 
144c59d87c4SChristoph Hellwig 		if (ioend->io_type == IO_UNWRITTEN)
145aa6bf01dSChristoph Hellwig 			queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
146fc0063c4SChristoph Hellwig 		else if (xfs_ioend_is_append(ioend))
147aa6bf01dSChristoph Hellwig 			queue_work(mp->m_data_workqueue, &ioend->io_work);
148fc0063c4SChristoph Hellwig 		else
149fc0063c4SChristoph Hellwig 			xfs_destroy_ioend(ioend);
150c59d87c4SChristoph Hellwig 	}
151c59d87c4SChristoph Hellwig }
152c59d87c4SChristoph Hellwig 
153c59d87c4SChristoph Hellwig /*
154c59d87c4SChristoph Hellwig  * IO write completion.
155c59d87c4SChristoph Hellwig  */
156c59d87c4SChristoph Hellwig STATIC void
157c59d87c4SChristoph Hellwig xfs_end_io(
158c59d87c4SChristoph Hellwig 	struct work_struct *work)
159c59d87c4SChristoph Hellwig {
160c59d87c4SChristoph Hellwig 	xfs_ioend_t	*ioend = container_of(work, xfs_ioend_t, io_work);
161c59d87c4SChristoph Hellwig 	struct xfs_inode *ip = XFS_I(ioend->io_inode);
162c59d87c4SChristoph Hellwig 	int		error = 0;
163c59d87c4SChristoph Hellwig 
16404f658eeSChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
165810627d9SChristoph Hellwig 		ioend->io_error = -EIO;
16604f658eeSChristoph Hellwig 		goto done;
16704f658eeSChristoph Hellwig 	}
16804f658eeSChristoph Hellwig 	if (ioend->io_error)
16904f658eeSChristoph Hellwig 		goto done;
17004f658eeSChristoph Hellwig 
171c59d87c4SChristoph Hellwig 	/*
172c59d87c4SChristoph Hellwig 	 * For unwritten extents we need to issue transactions to convert a
173c59d87c4SChristoph Hellwig 	 * range to normal written extens after the data I/O has finished.
174c59d87c4SChristoph Hellwig 	 */
17504f658eeSChristoph Hellwig 	if (ioend->io_type == IO_UNWRITTEN) {
176c59d87c4SChristoph Hellwig 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
177c59d87c4SChristoph Hellwig 						 ioend->io_size);
17804f658eeSChristoph Hellwig 		if (error) {
17904f658eeSChristoph Hellwig 			ioend->io_error = -error;
18004f658eeSChristoph Hellwig 			goto done;
18104f658eeSChristoph Hellwig 		}
182*84803fb7SChristoph Hellwig 	} else {
183c59d87c4SChristoph Hellwig 		/*
184*84803fb7SChristoph Hellwig 		 * We might have to update the on-disk file size after
185*84803fb7SChristoph Hellwig 		 * extending writes.
186c59d87c4SChristoph Hellwig 		 */
187aa6bf01dSChristoph Hellwig 		xfs_setfilesize(ioend);
188*84803fb7SChristoph Hellwig 	}
189*84803fb7SChristoph Hellwig 
19004f658eeSChristoph Hellwig done:
191c59d87c4SChristoph Hellwig 	xfs_destroy_ioend(ioend);
192c59d87c4SChristoph Hellwig }
193c59d87c4SChristoph Hellwig 
194c59d87c4SChristoph Hellwig /*
195c59d87c4SChristoph Hellwig  * Call IO completion handling in caller context on the final put of an ioend.
196c59d87c4SChristoph Hellwig  */
197c59d87c4SChristoph Hellwig STATIC void
198c59d87c4SChristoph Hellwig xfs_finish_ioend_sync(
199c59d87c4SChristoph Hellwig 	struct xfs_ioend	*ioend)
200c59d87c4SChristoph Hellwig {
201c59d87c4SChristoph Hellwig 	if (atomic_dec_and_test(&ioend->io_remaining))
202c59d87c4SChristoph Hellwig 		xfs_end_io(&ioend->io_work);
203c59d87c4SChristoph Hellwig }
204c59d87c4SChristoph Hellwig 
205c59d87c4SChristoph Hellwig /*
206c59d87c4SChristoph Hellwig  * Allocate and initialise an IO completion structure.
207c59d87c4SChristoph Hellwig  * We need to track unwritten extent write completion here initially.
208c59d87c4SChristoph Hellwig  * We'll need to extend this for updating the ondisk inode size later
209c59d87c4SChristoph Hellwig  * (vs. incore size).
210c59d87c4SChristoph Hellwig  */
211c59d87c4SChristoph Hellwig STATIC xfs_ioend_t *
212c59d87c4SChristoph Hellwig xfs_alloc_ioend(
213c59d87c4SChristoph Hellwig 	struct inode		*inode,
214c59d87c4SChristoph Hellwig 	unsigned int		type)
215c59d87c4SChristoph Hellwig {
216c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend;
217c59d87c4SChristoph Hellwig 
218c59d87c4SChristoph Hellwig 	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
219c59d87c4SChristoph Hellwig 
220c59d87c4SChristoph Hellwig 	/*
221c59d87c4SChristoph Hellwig 	 * Set the count to 1 initially, which will prevent an I/O
222c59d87c4SChristoph Hellwig 	 * completion callback from happening before we have started
223c59d87c4SChristoph Hellwig 	 * all the I/O from calling the completion routine too early.
224c59d87c4SChristoph Hellwig 	 */
225c59d87c4SChristoph Hellwig 	atomic_set(&ioend->io_remaining, 1);
226c859cdd1SChristoph Hellwig 	ioend->io_isasync = 0;
227c59d87c4SChristoph Hellwig 	ioend->io_error = 0;
228c59d87c4SChristoph Hellwig 	ioend->io_list = NULL;
229c59d87c4SChristoph Hellwig 	ioend->io_type = type;
230c59d87c4SChristoph Hellwig 	ioend->io_inode = inode;
231c59d87c4SChristoph Hellwig 	ioend->io_buffer_head = NULL;
232c59d87c4SChristoph Hellwig 	ioend->io_buffer_tail = NULL;
233c59d87c4SChristoph Hellwig 	ioend->io_offset = 0;
234c59d87c4SChristoph Hellwig 	ioend->io_size = 0;
235c59d87c4SChristoph Hellwig 	ioend->io_iocb = NULL;
236c59d87c4SChristoph Hellwig 	ioend->io_result = 0;
237c59d87c4SChristoph Hellwig 
238c59d87c4SChristoph Hellwig 	INIT_WORK(&ioend->io_work, xfs_end_io);
239c59d87c4SChristoph Hellwig 	return ioend;
240c59d87c4SChristoph Hellwig }
241c59d87c4SChristoph Hellwig 
242c59d87c4SChristoph Hellwig STATIC int
243c59d87c4SChristoph Hellwig xfs_map_blocks(
244c59d87c4SChristoph Hellwig 	struct inode		*inode,
245c59d87c4SChristoph Hellwig 	loff_t			offset,
246c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
247c59d87c4SChristoph Hellwig 	int			type,
248c59d87c4SChristoph Hellwig 	int			nonblocking)
249c59d87c4SChristoph Hellwig {
250c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
251c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
252c59d87c4SChristoph Hellwig 	ssize_t			count = 1 << inode->i_blkbits;
253c59d87c4SChristoph Hellwig 	xfs_fileoff_t		offset_fsb, end_fsb;
254c59d87c4SChristoph Hellwig 	int			error = 0;
255c59d87c4SChristoph Hellwig 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
256c59d87c4SChristoph Hellwig 	int			nimaps = 1;
257c59d87c4SChristoph Hellwig 
258c59d87c4SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
259c59d87c4SChristoph Hellwig 		return -XFS_ERROR(EIO);
260c59d87c4SChristoph Hellwig 
261c59d87c4SChristoph Hellwig 	if (type == IO_UNWRITTEN)
262c59d87c4SChristoph Hellwig 		bmapi_flags |= XFS_BMAPI_IGSTATE;
263c59d87c4SChristoph Hellwig 
264c59d87c4SChristoph Hellwig 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
265c59d87c4SChristoph Hellwig 		if (nonblocking)
266c59d87c4SChristoph Hellwig 			return -XFS_ERROR(EAGAIN);
267c59d87c4SChristoph Hellwig 		xfs_ilock(ip, XFS_ILOCK_SHARED);
268c59d87c4SChristoph Hellwig 	}
269c59d87c4SChristoph Hellwig 
270c59d87c4SChristoph Hellwig 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
271c59d87c4SChristoph Hellwig 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
272c59d87c4SChristoph Hellwig 	ASSERT(offset <= mp->m_maxioffset);
273c59d87c4SChristoph Hellwig 
274c59d87c4SChristoph Hellwig 	if (offset + count > mp->m_maxioffset)
275c59d87c4SChristoph Hellwig 		count = mp->m_maxioffset - offset;
276c59d87c4SChristoph Hellwig 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
277c59d87c4SChristoph Hellwig 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
2785c8ed202SDave Chinner 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
2795c8ed202SDave Chinner 				imap, &nimaps, bmapi_flags);
280c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
281c59d87c4SChristoph Hellwig 
282c59d87c4SChristoph Hellwig 	if (error)
283c59d87c4SChristoph Hellwig 		return -XFS_ERROR(error);
284c59d87c4SChristoph Hellwig 
285c59d87c4SChristoph Hellwig 	if (type == IO_DELALLOC &&
286c59d87c4SChristoph Hellwig 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
287c59d87c4SChristoph Hellwig 		error = xfs_iomap_write_allocate(ip, offset, count, imap);
288c59d87c4SChristoph Hellwig 		if (!error)
289c59d87c4SChristoph Hellwig 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
290c59d87c4SChristoph Hellwig 		return -XFS_ERROR(error);
291c59d87c4SChristoph Hellwig 	}
292c59d87c4SChristoph Hellwig 
293c59d87c4SChristoph Hellwig #ifdef DEBUG
294c59d87c4SChristoph Hellwig 	if (type == IO_UNWRITTEN) {
295c59d87c4SChristoph Hellwig 		ASSERT(nimaps);
296c59d87c4SChristoph Hellwig 		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
297c59d87c4SChristoph Hellwig 		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
298c59d87c4SChristoph Hellwig 	}
299c59d87c4SChristoph Hellwig #endif
300c59d87c4SChristoph Hellwig 	if (nimaps)
301c59d87c4SChristoph Hellwig 		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
302c59d87c4SChristoph Hellwig 	return 0;
303c59d87c4SChristoph Hellwig }
304c59d87c4SChristoph Hellwig 
305c59d87c4SChristoph Hellwig STATIC int
306c59d87c4SChristoph Hellwig xfs_imap_valid(
307c59d87c4SChristoph Hellwig 	struct inode		*inode,
308c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
309c59d87c4SChristoph Hellwig 	xfs_off_t		offset)
310c59d87c4SChristoph Hellwig {
311c59d87c4SChristoph Hellwig 	offset >>= inode->i_blkbits;
312c59d87c4SChristoph Hellwig 
313c59d87c4SChristoph Hellwig 	return offset >= imap->br_startoff &&
314c59d87c4SChristoph Hellwig 		offset < imap->br_startoff + imap->br_blockcount;
315c59d87c4SChristoph Hellwig }
316c59d87c4SChristoph Hellwig 
317c59d87c4SChristoph Hellwig /*
318c59d87c4SChristoph Hellwig  * BIO completion handler for buffered IO.
319c59d87c4SChristoph Hellwig  */
320c59d87c4SChristoph Hellwig STATIC void
321c59d87c4SChristoph Hellwig xfs_end_bio(
322c59d87c4SChristoph Hellwig 	struct bio		*bio,
323c59d87c4SChristoph Hellwig 	int			error)
324c59d87c4SChristoph Hellwig {
325c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend = bio->bi_private;
326c59d87c4SChristoph Hellwig 
327c59d87c4SChristoph Hellwig 	ASSERT(atomic_read(&bio->bi_cnt) >= 1);
328c59d87c4SChristoph Hellwig 	ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
329c59d87c4SChristoph Hellwig 
330c59d87c4SChristoph Hellwig 	/* Toss bio and pass work off to an xfsdatad thread */
331c59d87c4SChristoph Hellwig 	bio->bi_private = NULL;
332c59d87c4SChristoph Hellwig 	bio->bi_end_io = NULL;
333c59d87c4SChristoph Hellwig 	bio_put(bio);
334c59d87c4SChristoph Hellwig 
335c59d87c4SChristoph Hellwig 	xfs_finish_ioend(ioend);
336c59d87c4SChristoph Hellwig }
337c59d87c4SChristoph Hellwig 
338c59d87c4SChristoph Hellwig STATIC void
339c59d87c4SChristoph Hellwig xfs_submit_ioend_bio(
340c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
341c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend,
342c59d87c4SChristoph Hellwig 	struct bio		*bio)
343c59d87c4SChristoph Hellwig {
3446923e686SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
345c59d87c4SChristoph Hellwig 	atomic_inc(&ioend->io_remaining);
346c59d87c4SChristoph Hellwig 	bio->bi_private = ioend;
347c59d87c4SChristoph Hellwig 	bio->bi_end_io = xfs_end_bio;
348c59d87c4SChristoph Hellwig 
349c59d87c4SChristoph Hellwig 	/*
350c59d87c4SChristoph Hellwig 	 * If the I/O is beyond EOF we mark the inode dirty immediately
351c59d87c4SChristoph Hellwig 	 * but don't update the inode size until I/O completion.
352c59d87c4SChristoph Hellwig 	 */
3536923e686SChristoph Hellwig 	if (xfs_new_eof(ip, ioend->io_offset + ioend->io_size))
3546923e686SChristoph Hellwig 		xfs_mark_inode_dirty(ip);
355c59d87c4SChristoph Hellwig 
356c59d87c4SChristoph Hellwig 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
357c59d87c4SChristoph Hellwig }
358c59d87c4SChristoph Hellwig 
359c59d87c4SChristoph Hellwig STATIC struct bio *
360c59d87c4SChristoph Hellwig xfs_alloc_ioend_bio(
361c59d87c4SChristoph Hellwig 	struct buffer_head	*bh)
362c59d87c4SChristoph Hellwig {
363c59d87c4SChristoph Hellwig 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
364c59d87c4SChristoph Hellwig 	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
365c59d87c4SChristoph Hellwig 
366c59d87c4SChristoph Hellwig 	ASSERT(bio->bi_private == NULL);
367c59d87c4SChristoph Hellwig 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
368c59d87c4SChristoph Hellwig 	bio->bi_bdev = bh->b_bdev;
369c59d87c4SChristoph Hellwig 	return bio;
370c59d87c4SChristoph Hellwig }
371c59d87c4SChristoph Hellwig 
372c59d87c4SChristoph Hellwig STATIC void
373c59d87c4SChristoph Hellwig xfs_start_buffer_writeback(
374c59d87c4SChristoph Hellwig 	struct buffer_head	*bh)
375c59d87c4SChristoph Hellwig {
376c59d87c4SChristoph Hellwig 	ASSERT(buffer_mapped(bh));
377c59d87c4SChristoph Hellwig 	ASSERT(buffer_locked(bh));
378c59d87c4SChristoph Hellwig 	ASSERT(!buffer_delay(bh));
379c59d87c4SChristoph Hellwig 	ASSERT(!buffer_unwritten(bh));
380c59d87c4SChristoph Hellwig 
381c59d87c4SChristoph Hellwig 	mark_buffer_async_write(bh);
382c59d87c4SChristoph Hellwig 	set_buffer_uptodate(bh);
383c59d87c4SChristoph Hellwig 	clear_buffer_dirty(bh);
384c59d87c4SChristoph Hellwig }
385c59d87c4SChristoph Hellwig 
386c59d87c4SChristoph Hellwig STATIC void
387c59d87c4SChristoph Hellwig xfs_start_page_writeback(
388c59d87c4SChristoph Hellwig 	struct page		*page,
389c59d87c4SChristoph Hellwig 	int			clear_dirty,
390c59d87c4SChristoph Hellwig 	int			buffers)
391c59d87c4SChristoph Hellwig {
392c59d87c4SChristoph Hellwig 	ASSERT(PageLocked(page));
393c59d87c4SChristoph Hellwig 	ASSERT(!PageWriteback(page));
394c59d87c4SChristoph Hellwig 	if (clear_dirty)
395c59d87c4SChristoph Hellwig 		clear_page_dirty_for_io(page);
396c59d87c4SChristoph Hellwig 	set_page_writeback(page);
397c59d87c4SChristoph Hellwig 	unlock_page(page);
398c59d87c4SChristoph Hellwig 	/* If no buffers on the page are to be written, finish it here */
399c59d87c4SChristoph Hellwig 	if (!buffers)
400c59d87c4SChristoph Hellwig 		end_page_writeback(page);
401c59d87c4SChristoph Hellwig }
402c59d87c4SChristoph Hellwig 
403c59d87c4SChristoph Hellwig static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
404c59d87c4SChristoph Hellwig {
405c59d87c4SChristoph Hellwig 	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
406c59d87c4SChristoph Hellwig }
407c59d87c4SChristoph Hellwig 
408c59d87c4SChristoph Hellwig /*
409c59d87c4SChristoph Hellwig  * Submit all of the bios for all of the ioends we have saved up, covering the
410c59d87c4SChristoph Hellwig  * initial writepage page and also any probed pages.
411c59d87c4SChristoph Hellwig  *
412c59d87c4SChristoph Hellwig  * Because we may have multiple ioends spanning a page, we need to start
413c59d87c4SChristoph Hellwig  * writeback on all the buffers before we submit them for I/O. If we mark the
414c59d87c4SChristoph Hellwig  * buffers as we got, then we can end up with a page that only has buffers
415c59d87c4SChristoph Hellwig  * marked async write and I/O complete on can occur before we mark the other
416c59d87c4SChristoph Hellwig  * buffers async write.
417c59d87c4SChristoph Hellwig  *
418c59d87c4SChristoph Hellwig  * The end result of this is that we trip a bug in end_page_writeback() because
419c59d87c4SChristoph Hellwig  * we call it twice for the one page as the code in end_buffer_async_write()
420c59d87c4SChristoph Hellwig  * assumes that all buffers on the page are started at the same time.
421c59d87c4SChristoph Hellwig  *
422c59d87c4SChristoph Hellwig  * The fix is two passes across the ioend list - one to start writeback on the
423c59d87c4SChristoph Hellwig  * buffer_heads, and then submit them for I/O on the second pass.
424c59d87c4SChristoph Hellwig  */
425c59d87c4SChristoph Hellwig STATIC void
426c59d87c4SChristoph Hellwig xfs_submit_ioend(
427c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
428c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend)
429c59d87c4SChristoph Hellwig {
430c59d87c4SChristoph Hellwig 	xfs_ioend_t		*head = ioend;
431c59d87c4SChristoph Hellwig 	xfs_ioend_t		*next;
432c59d87c4SChristoph Hellwig 	struct buffer_head	*bh;
433c59d87c4SChristoph Hellwig 	struct bio		*bio;
434c59d87c4SChristoph Hellwig 	sector_t		lastblock = 0;
435c59d87c4SChristoph Hellwig 
436c59d87c4SChristoph Hellwig 	/* Pass 1 - start writeback */
437c59d87c4SChristoph Hellwig 	do {
438c59d87c4SChristoph Hellwig 		next = ioend->io_list;
439c59d87c4SChristoph Hellwig 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
440c59d87c4SChristoph Hellwig 			xfs_start_buffer_writeback(bh);
441c59d87c4SChristoph Hellwig 	} while ((ioend = next) != NULL);
442c59d87c4SChristoph Hellwig 
443c59d87c4SChristoph Hellwig 	/* Pass 2 - submit I/O */
444c59d87c4SChristoph Hellwig 	ioend = head;
445c59d87c4SChristoph Hellwig 	do {
446c59d87c4SChristoph Hellwig 		next = ioend->io_list;
447c59d87c4SChristoph Hellwig 		bio = NULL;
448c59d87c4SChristoph Hellwig 
449c59d87c4SChristoph Hellwig 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
450c59d87c4SChristoph Hellwig 
451c59d87c4SChristoph Hellwig 			if (!bio) {
452c59d87c4SChristoph Hellwig  retry:
453c59d87c4SChristoph Hellwig 				bio = xfs_alloc_ioend_bio(bh);
454c59d87c4SChristoph Hellwig 			} else if (bh->b_blocknr != lastblock + 1) {
455c59d87c4SChristoph Hellwig 				xfs_submit_ioend_bio(wbc, ioend, bio);
456c59d87c4SChristoph Hellwig 				goto retry;
457c59d87c4SChristoph Hellwig 			}
458c59d87c4SChristoph Hellwig 
459c59d87c4SChristoph Hellwig 			if (bio_add_buffer(bio, bh) != bh->b_size) {
460c59d87c4SChristoph Hellwig 				xfs_submit_ioend_bio(wbc, ioend, bio);
461c59d87c4SChristoph Hellwig 				goto retry;
462c59d87c4SChristoph Hellwig 			}
463c59d87c4SChristoph Hellwig 
464c59d87c4SChristoph Hellwig 			lastblock = bh->b_blocknr;
465c59d87c4SChristoph Hellwig 		}
466c59d87c4SChristoph Hellwig 		if (bio)
467c59d87c4SChristoph Hellwig 			xfs_submit_ioend_bio(wbc, ioend, bio);
468c59d87c4SChristoph Hellwig 		xfs_finish_ioend(ioend);
469c59d87c4SChristoph Hellwig 	} while ((ioend = next) != NULL);
470c59d87c4SChristoph Hellwig }
471c59d87c4SChristoph Hellwig 
472c59d87c4SChristoph Hellwig /*
473c59d87c4SChristoph Hellwig  * Cancel submission of all buffer_heads so far in this endio.
474c59d87c4SChristoph Hellwig  * Toss the endio too.  Only ever called for the initial page
475c59d87c4SChristoph Hellwig  * in a writepage request, so only ever one page.
476c59d87c4SChristoph Hellwig  */
477c59d87c4SChristoph Hellwig STATIC void
478c59d87c4SChristoph Hellwig xfs_cancel_ioend(
479c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend)
480c59d87c4SChristoph Hellwig {
481c59d87c4SChristoph Hellwig 	xfs_ioend_t		*next;
482c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *next_bh;
483c59d87c4SChristoph Hellwig 
484c59d87c4SChristoph Hellwig 	do {
485c59d87c4SChristoph Hellwig 		next = ioend->io_list;
486c59d87c4SChristoph Hellwig 		bh = ioend->io_buffer_head;
487c59d87c4SChristoph Hellwig 		do {
488c59d87c4SChristoph Hellwig 			next_bh = bh->b_private;
489c59d87c4SChristoph Hellwig 			clear_buffer_async_write(bh);
490c59d87c4SChristoph Hellwig 			unlock_buffer(bh);
491c59d87c4SChristoph Hellwig 		} while ((bh = next_bh) != NULL);
492c59d87c4SChristoph Hellwig 
493c59d87c4SChristoph Hellwig 		mempool_free(ioend, xfs_ioend_pool);
494c59d87c4SChristoph Hellwig 	} while ((ioend = next) != NULL);
495c59d87c4SChristoph Hellwig }
496c59d87c4SChristoph Hellwig 
497c59d87c4SChristoph Hellwig /*
498c59d87c4SChristoph Hellwig  * Test to see if we've been building up a completion structure for
499c59d87c4SChristoph Hellwig  * earlier buffers -- if so, we try to append to this ioend if we
500c59d87c4SChristoph Hellwig  * can, otherwise we finish off any current ioend and start another.
501c59d87c4SChristoph Hellwig  * Return true if we've finished the given ioend.
502c59d87c4SChristoph Hellwig  */
503c59d87c4SChristoph Hellwig STATIC void
504c59d87c4SChristoph Hellwig xfs_add_to_ioend(
505c59d87c4SChristoph Hellwig 	struct inode		*inode,
506c59d87c4SChristoph Hellwig 	struct buffer_head	*bh,
507c59d87c4SChristoph Hellwig 	xfs_off_t		offset,
508c59d87c4SChristoph Hellwig 	unsigned int		type,
509c59d87c4SChristoph Hellwig 	xfs_ioend_t		**result,
510c59d87c4SChristoph Hellwig 	int			need_ioend)
511c59d87c4SChristoph Hellwig {
512c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend = *result;
513c59d87c4SChristoph Hellwig 
514c59d87c4SChristoph Hellwig 	if (!ioend || need_ioend || type != ioend->io_type) {
515c59d87c4SChristoph Hellwig 		xfs_ioend_t	*previous = *result;
516c59d87c4SChristoph Hellwig 
517c59d87c4SChristoph Hellwig 		ioend = xfs_alloc_ioend(inode, type);
518c59d87c4SChristoph Hellwig 		ioend->io_offset = offset;
519c59d87c4SChristoph Hellwig 		ioend->io_buffer_head = bh;
520c59d87c4SChristoph Hellwig 		ioend->io_buffer_tail = bh;
521c59d87c4SChristoph Hellwig 		if (previous)
522c59d87c4SChristoph Hellwig 			previous->io_list = ioend;
523c59d87c4SChristoph Hellwig 		*result = ioend;
524c59d87c4SChristoph Hellwig 	} else {
525c59d87c4SChristoph Hellwig 		ioend->io_buffer_tail->b_private = bh;
526c59d87c4SChristoph Hellwig 		ioend->io_buffer_tail = bh;
527c59d87c4SChristoph Hellwig 	}
528c59d87c4SChristoph Hellwig 
529c59d87c4SChristoph Hellwig 	bh->b_private = NULL;
530c59d87c4SChristoph Hellwig 	ioend->io_size += bh->b_size;
531c59d87c4SChristoph Hellwig }
532c59d87c4SChristoph Hellwig 
533c59d87c4SChristoph Hellwig STATIC void
534c59d87c4SChristoph Hellwig xfs_map_buffer(
535c59d87c4SChristoph Hellwig 	struct inode		*inode,
536c59d87c4SChristoph Hellwig 	struct buffer_head	*bh,
537c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
538c59d87c4SChristoph Hellwig 	xfs_off_t		offset)
539c59d87c4SChristoph Hellwig {
540c59d87c4SChristoph Hellwig 	sector_t		bn;
541c59d87c4SChristoph Hellwig 	struct xfs_mount	*m = XFS_I(inode)->i_mount;
542c59d87c4SChristoph Hellwig 	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
543c59d87c4SChristoph Hellwig 	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
544c59d87c4SChristoph Hellwig 
545c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
546c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
547c59d87c4SChristoph Hellwig 
548c59d87c4SChristoph Hellwig 	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
549c59d87c4SChristoph Hellwig 	      ((offset - iomap_offset) >> inode->i_blkbits);
550c59d87c4SChristoph Hellwig 
551c59d87c4SChristoph Hellwig 	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
552c59d87c4SChristoph Hellwig 
553c59d87c4SChristoph Hellwig 	bh->b_blocknr = bn;
554c59d87c4SChristoph Hellwig 	set_buffer_mapped(bh);
555c59d87c4SChristoph Hellwig }
556c59d87c4SChristoph Hellwig 
557c59d87c4SChristoph Hellwig STATIC void
558c59d87c4SChristoph Hellwig xfs_map_at_offset(
559c59d87c4SChristoph Hellwig 	struct inode		*inode,
560c59d87c4SChristoph Hellwig 	struct buffer_head	*bh,
561c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
562c59d87c4SChristoph Hellwig 	xfs_off_t		offset)
563c59d87c4SChristoph Hellwig {
564c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
565c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
566c59d87c4SChristoph Hellwig 
567c59d87c4SChristoph Hellwig 	xfs_map_buffer(inode, bh, imap, offset);
568c59d87c4SChristoph Hellwig 	set_buffer_mapped(bh);
569c59d87c4SChristoph Hellwig 	clear_buffer_delay(bh);
570c59d87c4SChristoph Hellwig 	clear_buffer_unwritten(bh);
571c59d87c4SChristoph Hellwig }
572c59d87c4SChristoph Hellwig 
573c59d87c4SChristoph Hellwig /*
574c59d87c4SChristoph Hellwig  * Test if a given page is suitable for writing as part of an unwritten
575c59d87c4SChristoph Hellwig  * or delayed allocate extent.
576c59d87c4SChristoph Hellwig  */
577c59d87c4SChristoph Hellwig STATIC int
578c59d87c4SChristoph Hellwig xfs_is_delayed_page(
579c59d87c4SChristoph Hellwig 	struct page		*page,
580c59d87c4SChristoph Hellwig 	unsigned int		type)
581c59d87c4SChristoph Hellwig {
582c59d87c4SChristoph Hellwig 	if (PageWriteback(page))
583c59d87c4SChristoph Hellwig 		return 0;
584c59d87c4SChristoph Hellwig 
585c59d87c4SChristoph Hellwig 	if (page->mapping && page_has_buffers(page)) {
586c59d87c4SChristoph Hellwig 		struct buffer_head	*bh, *head;
587c59d87c4SChristoph Hellwig 		int			acceptable = 0;
588c59d87c4SChristoph Hellwig 
589c59d87c4SChristoph Hellwig 		bh = head = page_buffers(page);
590c59d87c4SChristoph Hellwig 		do {
591c59d87c4SChristoph Hellwig 			if (buffer_unwritten(bh))
592c59d87c4SChristoph Hellwig 				acceptable = (type == IO_UNWRITTEN);
593c59d87c4SChristoph Hellwig 			else if (buffer_delay(bh))
594c59d87c4SChristoph Hellwig 				acceptable = (type == IO_DELALLOC);
595c59d87c4SChristoph Hellwig 			else if (buffer_dirty(bh) && buffer_mapped(bh))
596c59d87c4SChristoph Hellwig 				acceptable = (type == IO_OVERWRITE);
597c59d87c4SChristoph Hellwig 			else
598c59d87c4SChristoph Hellwig 				break;
599c59d87c4SChristoph Hellwig 		} while ((bh = bh->b_this_page) != head);
600c59d87c4SChristoph Hellwig 
601c59d87c4SChristoph Hellwig 		if (acceptable)
602c59d87c4SChristoph Hellwig 			return 1;
603c59d87c4SChristoph Hellwig 	}
604c59d87c4SChristoph Hellwig 
605c59d87c4SChristoph Hellwig 	return 0;
606c59d87c4SChristoph Hellwig }
607c59d87c4SChristoph Hellwig 
608c59d87c4SChristoph Hellwig /*
609c59d87c4SChristoph Hellwig  * Allocate & map buffers for page given the extent map. Write it out.
610c59d87c4SChristoph Hellwig  * except for the original page of a writepage, this is called on
611c59d87c4SChristoph Hellwig  * delalloc/unwritten pages only, for the original page it is possible
612c59d87c4SChristoph Hellwig  * that the page has no mapping at all.
613c59d87c4SChristoph Hellwig  */
614c59d87c4SChristoph Hellwig STATIC int
615c59d87c4SChristoph Hellwig xfs_convert_page(
616c59d87c4SChristoph Hellwig 	struct inode		*inode,
617c59d87c4SChristoph Hellwig 	struct page		*page,
618c59d87c4SChristoph Hellwig 	loff_t			tindex,
619c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
620c59d87c4SChristoph Hellwig 	xfs_ioend_t		**ioendp,
621c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
622c59d87c4SChristoph Hellwig {
623c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
624c59d87c4SChristoph Hellwig 	xfs_off_t		end_offset;
625c59d87c4SChristoph Hellwig 	unsigned long		p_offset;
626c59d87c4SChristoph Hellwig 	unsigned int		type;
627c59d87c4SChristoph Hellwig 	int			len, page_dirty;
628c59d87c4SChristoph Hellwig 	int			count = 0, done = 0, uptodate = 1;
629c59d87c4SChristoph Hellwig  	xfs_off_t		offset = page_offset(page);
630c59d87c4SChristoph Hellwig 
631c59d87c4SChristoph Hellwig 	if (page->index != tindex)
632c59d87c4SChristoph Hellwig 		goto fail;
633c59d87c4SChristoph Hellwig 	if (!trylock_page(page))
634c59d87c4SChristoph Hellwig 		goto fail;
635c59d87c4SChristoph Hellwig 	if (PageWriteback(page))
636c59d87c4SChristoph Hellwig 		goto fail_unlock_page;
637c59d87c4SChristoph Hellwig 	if (page->mapping != inode->i_mapping)
638c59d87c4SChristoph Hellwig 		goto fail_unlock_page;
639c59d87c4SChristoph Hellwig 	if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
640c59d87c4SChristoph Hellwig 		goto fail_unlock_page;
641c59d87c4SChristoph Hellwig 
642c59d87c4SChristoph Hellwig 	/*
643c59d87c4SChristoph Hellwig 	 * page_dirty is initially a count of buffers on the page before
644c59d87c4SChristoph Hellwig 	 * EOF and is decremented as we move each into a cleanable state.
645c59d87c4SChristoph Hellwig 	 *
646c59d87c4SChristoph Hellwig 	 * Derivation:
647c59d87c4SChristoph Hellwig 	 *
648c59d87c4SChristoph Hellwig 	 * End offset is the highest offset that this page should represent.
649c59d87c4SChristoph Hellwig 	 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
650c59d87c4SChristoph Hellwig 	 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
651c59d87c4SChristoph Hellwig 	 * hence give us the correct page_dirty count. On any other page,
652c59d87c4SChristoph Hellwig 	 * it will be zero and in that case we need page_dirty to be the
653c59d87c4SChristoph Hellwig 	 * count of buffers on the page.
654c59d87c4SChristoph Hellwig 	 */
655c59d87c4SChristoph Hellwig 	end_offset = min_t(unsigned long long,
656c59d87c4SChristoph Hellwig 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
657c59d87c4SChristoph Hellwig 			i_size_read(inode));
658c59d87c4SChristoph Hellwig 
659c59d87c4SChristoph Hellwig 	len = 1 << inode->i_blkbits;
660c59d87c4SChristoph Hellwig 	p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
661c59d87c4SChristoph Hellwig 					PAGE_CACHE_SIZE);
662c59d87c4SChristoph Hellwig 	p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
663c59d87c4SChristoph Hellwig 	page_dirty = p_offset / len;
664c59d87c4SChristoph Hellwig 
665c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
666c59d87c4SChristoph Hellwig 	do {
667c59d87c4SChristoph Hellwig 		if (offset >= end_offset)
668c59d87c4SChristoph Hellwig 			break;
669c59d87c4SChristoph Hellwig 		if (!buffer_uptodate(bh))
670c59d87c4SChristoph Hellwig 			uptodate = 0;
671c59d87c4SChristoph Hellwig 		if (!(PageUptodate(page) || buffer_uptodate(bh))) {
672c59d87c4SChristoph Hellwig 			done = 1;
673c59d87c4SChristoph Hellwig 			continue;
674c59d87c4SChristoph Hellwig 		}
675c59d87c4SChristoph Hellwig 
676c59d87c4SChristoph Hellwig 		if (buffer_unwritten(bh) || buffer_delay(bh) ||
677c59d87c4SChristoph Hellwig 		    buffer_mapped(bh)) {
678c59d87c4SChristoph Hellwig 			if (buffer_unwritten(bh))
679c59d87c4SChristoph Hellwig 				type = IO_UNWRITTEN;
680c59d87c4SChristoph Hellwig 			else if (buffer_delay(bh))
681c59d87c4SChristoph Hellwig 				type = IO_DELALLOC;
682c59d87c4SChristoph Hellwig 			else
683c59d87c4SChristoph Hellwig 				type = IO_OVERWRITE;
684c59d87c4SChristoph Hellwig 
685c59d87c4SChristoph Hellwig 			if (!xfs_imap_valid(inode, imap, offset)) {
686c59d87c4SChristoph Hellwig 				done = 1;
687c59d87c4SChristoph Hellwig 				continue;
688c59d87c4SChristoph Hellwig 			}
689c59d87c4SChristoph Hellwig 
690c59d87c4SChristoph Hellwig 			lock_buffer(bh);
691c59d87c4SChristoph Hellwig 			if (type != IO_OVERWRITE)
692c59d87c4SChristoph Hellwig 				xfs_map_at_offset(inode, bh, imap, offset);
693c59d87c4SChristoph Hellwig 			xfs_add_to_ioend(inode, bh, offset, type,
694c59d87c4SChristoph Hellwig 					 ioendp, done);
695c59d87c4SChristoph Hellwig 
696c59d87c4SChristoph Hellwig 			page_dirty--;
697c59d87c4SChristoph Hellwig 			count++;
698c59d87c4SChristoph Hellwig 		} else {
699c59d87c4SChristoph Hellwig 			done = 1;
700c59d87c4SChristoph Hellwig 		}
701c59d87c4SChristoph Hellwig 	} while (offset += len, (bh = bh->b_this_page) != head);
702c59d87c4SChristoph Hellwig 
703c59d87c4SChristoph Hellwig 	if (uptodate && bh == head)
704c59d87c4SChristoph Hellwig 		SetPageUptodate(page);
705c59d87c4SChristoph Hellwig 
706c59d87c4SChristoph Hellwig 	if (count) {
707c59d87c4SChristoph Hellwig 		if (--wbc->nr_to_write <= 0 &&
708c59d87c4SChristoph Hellwig 		    wbc->sync_mode == WB_SYNC_NONE)
709c59d87c4SChristoph Hellwig 			done = 1;
710c59d87c4SChristoph Hellwig 	}
711c59d87c4SChristoph Hellwig 	xfs_start_page_writeback(page, !page_dirty, count);
712c59d87c4SChristoph Hellwig 
713c59d87c4SChristoph Hellwig 	return done;
714c59d87c4SChristoph Hellwig  fail_unlock_page:
715c59d87c4SChristoph Hellwig 	unlock_page(page);
716c59d87c4SChristoph Hellwig  fail:
717c59d87c4SChristoph Hellwig 	return 1;
718c59d87c4SChristoph Hellwig }
719c59d87c4SChristoph Hellwig 
720c59d87c4SChristoph Hellwig /*
721c59d87c4SChristoph Hellwig  * Convert & write out a cluster of pages in the same extent as defined
722c59d87c4SChristoph Hellwig  * by mp and following the start page.
723c59d87c4SChristoph Hellwig  */
724c59d87c4SChristoph Hellwig STATIC void
725c59d87c4SChristoph Hellwig xfs_cluster_write(
726c59d87c4SChristoph Hellwig 	struct inode		*inode,
727c59d87c4SChristoph Hellwig 	pgoff_t			tindex,
728c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
729c59d87c4SChristoph Hellwig 	xfs_ioend_t		**ioendp,
730c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
731c59d87c4SChristoph Hellwig 	pgoff_t			tlast)
732c59d87c4SChristoph Hellwig {
733c59d87c4SChristoph Hellwig 	struct pagevec		pvec;
734c59d87c4SChristoph Hellwig 	int			done = 0, i;
735c59d87c4SChristoph Hellwig 
736c59d87c4SChristoph Hellwig 	pagevec_init(&pvec, 0);
737c59d87c4SChristoph Hellwig 	while (!done && tindex <= tlast) {
738c59d87c4SChristoph Hellwig 		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
739c59d87c4SChristoph Hellwig 
740c59d87c4SChristoph Hellwig 		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
741c59d87c4SChristoph Hellwig 			break;
742c59d87c4SChristoph Hellwig 
743c59d87c4SChristoph Hellwig 		for (i = 0; i < pagevec_count(&pvec); i++) {
744c59d87c4SChristoph Hellwig 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
745c59d87c4SChristoph Hellwig 					imap, ioendp, wbc);
746c59d87c4SChristoph Hellwig 			if (done)
747c59d87c4SChristoph Hellwig 				break;
748c59d87c4SChristoph Hellwig 		}
749c59d87c4SChristoph Hellwig 
750c59d87c4SChristoph Hellwig 		pagevec_release(&pvec);
751c59d87c4SChristoph Hellwig 		cond_resched();
752c59d87c4SChristoph Hellwig 	}
753c59d87c4SChristoph Hellwig }
754c59d87c4SChristoph Hellwig 
755c59d87c4SChristoph Hellwig STATIC void
756c59d87c4SChristoph Hellwig xfs_vm_invalidatepage(
757c59d87c4SChristoph Hellwig 	struct page		*page,
758c59d87c4SChristoph Hellwig 	unsigned long		offset)
759c59d87c4SChristoph Hellwig {
760c59d87c4SChristoph Hellwig 	trace_xfs_invalidatepage(page->mapping->host, page, offset);
761c59d87c4SChristoph Hellwig 	block_invalidatepage(page, offset);
762c59d87c4SChristoph Hellwig }
763c59d87c4SChristoph Hellwig 
764c59d87c4SChristoph Hellwig /*
765c59d87c4SChristoph Hellwig  * If the page has delalloc buffers on it, we need to punch them out before we
766c59d87c4SChristoph Hellwig  * invalidate the page. If we don't, we leave a stale delalloc mapping on the
767c59d87c4SChristoph Hellwig  * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
768c59d87c4SChristoph Hellwig  * is done on that same region - the delalloc extent is returned when none is
769c59d87c4SChristoph Hellwig  * supposed to be there.
770c59d87c4SChristoph Hellwig  *
771c59d87c4SChristoph Hellwig  * We prevent this by truncating away the delalloc regions on the page before
772c59d87c4SChristoph Hellwig  * invalidating it. Because they are delalloc, we can do this without needing a
773c59d87c4SChristoph Hellwig  * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
774c59d87c4SChristoph Hellwig  * truncation without a transaction as there is no space left for block
775c59d87c4SChristoph Hellwig  * reservation (typically why we see a ENOSPC in writeback).
776c59d87c4SChristoph Hellwig  *
777c59d87c4SChristoph Hellwig  * This is not a performance critical path, so for now just do the punching a
778c59d87c4SChristoph Hellwig  * buffer head at a time.
779c59d87c4SChristoph Hellwig  */
780c59d87c4SChristoph Hellwig STATIC void
781c59d87c4SChristoph Hellwig xfs_aops_discard_page(
782c59d87c4SChristoph Hellwig 	struct page		*page)
783c59d87c4SChristoph Hellwig {
784c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
785c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
786c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
787c59d87c4SChristoph Hellwig 	loff_t			offset = page_offset(page);
788c59d87c4SChristoph Hellwig 
789c59d87c4SChristoph Hellwig 	if (!xfs_is_delayed_page(page, IO_DELALLOC))
790c59d87c4SChristoph Hellwig 		goto out_invalidate;
791c59d87c4SChristoph Hellwig 
792c59d87c4SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
793c59d87c4SChristoph Hellwig 		goto out_invalidate;
794c59d87c4SChristoph Hellwig 
795c59d87c4SChristoph Hellwig 	xfs_alert(ip->i_mount,
796c59d87c4SChristoph Hellwig 		"page discard on page %p, inode 0x%llx, offset %llu.",
797c59d87c4SChristoph Hellwig 			page, ip->i_ino, offset);
798c59d87c4SChristoph Hellwig 
799c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
800c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
801c59d87c4SChristoph Hellwig 	do {
802c59d87c4SChristoph Hellwig 		int		error;
803c59d87c4SChristoph Hellwig 		xfs_fileoff_t	start_fsb;
804c59d87c4SChristoph Hellwig 
805c59d87c4SChristoph Hellwig 		if (!buffer_delay(bh))
806c59d87c4SChristoph Hellwig 			goto next_buffer;
807c59d87c4SChristoph Hellwig 
808c59d87c4SChristoph Hellwig 		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
809c59d87c4SChristoph Hellwig 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
810c59d87c4SChristoph Hellwig 		if (error) {
811c59d87c4SChristoph Hellwig 			/* something screwed, just bail */
812c59d87c4SChristoph Hellwig 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
813c59d87c4SChristoph Hellwig 				xfs_alert(ip->i_mount,
814c59d87c4SChristoph Hellwig 			"page discard unable to remove delalloc mapping.");
815c59d87c4SChristoph Hellwig 			}
816c59d87c4SChristoph Hellwig 			break;
817c59d87c4SChristoph Hellwig 		}
818c59d87c4SChristoph Hellwig next_buffer:
819c59d87c4SChristoph Hellwig 		offset += 1 << inode->i_blkbits;
820c59d87c4SChristoph Hellwig 
821c59d87c4SChristoph Hellwig 	} while ((bh = bh->b_this_page) != head);
822c59d87c4SChristoph Hellwig 
823c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
824c59d87c4SChristoph Hellwig out_invalidate:
825c59d87c4SChristoph Hellwig 	xfs_vm_invalidatepage(page, 0);
826c59d87c4SChristoph Hellwig 	return;
827c59d87c4SChristoph Hellwig }
828c59d87c4SChristoph Hellwig 
829c59d87c4SChristoph Hellwig /*
830c59d87c4SChristoph Hellwig  * Write out a dirty page.
831c59d87c4SChristoph Hellwig  *
832c59d87c4SChristoph Hellwig  * For delalloc space on the page we need to allocate space and flush it.
833c59d87c4SChristoph Hellwig  * For unwritten space on the page we need to start the conversion to
834c59d87c4SChristoph Hellwig  * regular allocated space.
835c59d87c4SChristoph Hellwig  * For any other dirty buffer heads on the page we should flush them.
836c59d87c4SChristoph Hellwig  */
837c59d87c4SChristoph Hellwig STATIC int
838c59d87c4SChristoph Hellwig xfs_vm_writepage(
839c59d87c4SChristoph Hellwig 	struct page		*page,
840c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
841c59d87c4SChristoph Hellwig {
842c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
843c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
844c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	imap;
845c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend = NULL, *iohead = NULL;
846c59d87c4SChristoph Hellwig 	loff_t			offset;
847c59d87c4SChristoph Hellwig 	unsigned int		type;
848c59d87c4SChristoph Hellwig 	__uint64_t              end_offset;
849c59d87c4SChristoph Hellwig 	pgoff_t                 end_index, last_index;
850c59d87c4SChristoph Hellwig 	ssize_t			len;
851c59d87c4SChristoph Hellwig 	int			err, imap_valid = 0, uptodate = 1;
852c59d87c4SChristoph Hellwig 	int			count = 0;
853c59d87c4SChristoph Hellwig 	int			nonblocking = 0;
854c59d87c4SChristoph Hellwig 
855c59d87c4SChristoph Hellwig 	trace_xfs_writepage(inode, page, 0);
856c59d87c4SChristoph Hellwig 
857c59d87c4SChristoph Hellwig 	ASSERT(page_has_buffers(page));
858c59d87c4SChristoph Hellwig 
859c59d87c4SChristoph Hellwig 	/*
860c59d87c4SChristoph Hellwig 	 * Refuse to write the page out if we are called from reclaim context.
861c59d87c4SChristoph Hellwig 	 *
862c59d87c4SChristoph Hellwig 	 * This avoids stack overflows when called from deeply used stacks in
863c59d87c4SChristoph Hellwig 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
864c59d87c4SChristoph Hellwig 	 * allow reclaim from kswapd as the stack usage there is relatively low.
865c59d87c4SChristoph Hellwig 	 *
86694054fa3SMel Gorman 	 * This should never happen except in the case of a VM regression so
86794054fa3SMel Gorman 	 * warn about it.
868c59d87c4SChristoph Hellwig 	 */
86994054fa3SMel Gorman 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
87094054fa3SMel Gorman 			PF_MEMALLOC))
871c59d87c4SChristoph Hellwig 		goto redirty;
872c59d87c4SChristoph Hellwig 
873c59d87c4SChristoph Hellwig 	/*
874c59d87c4SChristoph Hellwig 	 * Given that we do not allow direct reclaim to call us, we should
875c59d87c4SChristoph Hellwig 	 * never be called while in a filesystem transaction.
876c59d87c4SChristoph Hellwig 	 */
877c59d87c4SChristoph Hellwig 	if (WARN_ON(current->flags & PF_FSTRANS))
878c59d87c4SChristoph Hellwig 		goto redirty;
879c59d87c4SChristoph Hellwig 
880c59d87c4SChristoph Hellwig 	/* Is this page beyond the end of the file? */
881c59d87c4SChristoph Hellwig 	offset = i_size_read(inode);
882c59d87c4SChristoph Hellwig 	end_index = offset >> PAGE_CACHE_SHIFT;
883c59d87c4SChristoph Hellwig 	last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
884c59d87c4SChristoph Hellwig 	if (page->index >= end_index) {
885c59d87c4SChristoph Hellwig 		if ((page->index >= end_index + 1) ||
886c59d87c4SChristoph Hellwig 		    !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
887c59d87c4SChristoph Hellwig 			unlock_page(page);
888c59d87c4SChristoph Hellwig 			return 0;
889c59d87c4SChristoph Hellwig 		}
890c59d87c4SChristoph Hellwig 	}
891c59d87c4SChristoph Hellwig 
892c59d87c4SChristoph Hellwig 	end_offset = min_t(unsigned long long,
893c59d87c4SChristoph Hellwig 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
894c59d87c4SChristoph Hellwig 			offset);
895c59d87c4SChristoph Hellwig 	len = 1 << inode->i_blkbits;
896c59d87c4SChristoph Hellwig 
897c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
898c59d87c4SChristoph Hellwig 	offset = page_offset(page);
899c59d87c4SChristoph Hellwig 	type = IO_OVERWRITE;
900c59d87c4SChristoph Hellwig 
901c59d87c4SChristoph Hellwig 	if (wbc->sync_mode == WB_SYNC_NONE)
902c59d87c4SChristoph Hellwig 		nonblocking = 1;
903c59d87c4SChristoph Hellwig 
904c59d87c4SChristoph Hellwig 	do {
905c59d87c4SChristoph Hellwig 		int new_ioend = 0;
906c59d87c4SChristoph Hellwig 
907c59d87c4SChristoph Hellwig 		if (offset >= end_offset)
908c59d87c4SChristoph Hellwig 			break;
909c59d87c4SChristoph Hellwig 		if (!buffer_uptodate(bh))
910c59d87c4SChristoph Hellwig 			uptodate = 0;
911c59d87c4SChristoph Hellwig 
912c59d87c4SChristoph Hellwig 		/*
913c59d87c4SChristoph Hellwig 		 * set_page_dirty dirties all buffers in a page, independent
914c59d87c4SChristoph Hellwig 		 * of their state.  The dirty state however is entirely
915c59d87c4SChristoph Hellwig 		 * meaningless for holes (!mapped && uptodate), so skip
916c59d87c4SChristoph Hellwig 		 * buffers covering holes here.
917c59d87c4SChristoph Hellwig 		 */
918c59d87c4SChristoph Hellwig 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
919c59d87c4SChristoph Hellwig 			imap_valid = 0;
920c59d87c4SChristoph Hellwig 			continue;
921c59d87c4SChristoph Hellwig 		}
922c59d87c4SChristoph Hellwig 
923c59d87c4SChristoph Hellwig 		if (buffer_unwritten(bh)) {
924c59d87c4SChristoph Hellwig 			if (type != IO_UNWRITTEN) {
925c59d87c4SChristoph Hellwig 				type = IO_UNWRITTEN;
926c59d87c4SChristoph Hellwig 				imap_valid = 0;
927c59d87c4SChristoph Hellwig 			}
928c59d87c4SChristoph Hellwig 		} else if (buffer_delay(bh)) {
929c59d87c4SChristoph Hellwig 			if (type != IO_DELALLOC) {
930c59d87c4SChristoph Hellwig 				type = IO_DELALLOC;
931c59d87c4SChristoph Hellwig 				imap_valid = 0;
932c59d87c4SChristoph Hellwig 			}
933c59d87c4SChristoph Hellwig 		} else if (buffer_uptodate(bh)) {
934c59d87c4SChristoph Hellwig 			if (type != IO_OVERWRITE) {
935c59d87c4SChristoph Hellwig 				type = IO_OVERWRITE;
936c59d87c4SChristoph Hellwig 				imap_valid = 0;
937c59d87c4SChristoph Hellwig 			}
938c59d87c4SChristoph Hellwig 		} else {
939c59d87c4SChristoph Hellwig 			if (PageUptodate(page)) {
940c59d87c4SChristoph Hellwig 				ASSERT(buffer_mapped(bh));
941c59d87c4SChristoph Hellwig 				imap_valid = 0;
942c59d87c4SChristoph Hellwig 			}
943c59d87c4SChristoph Hellwig 			continue;
944c59d87c4SChristoph Hellwig 		}
945c59d87c4SChristoph Hellwig 
946c59d87c4SChristoph Hellwig 		if (imap_valid)
947c59d87c4SChristoph Hellwig 			imap_valid = xfs_imap_valid(inode, &imap, offset);
948c59d87c4SChristoph Hellwig 		if (!imap_valid) {
949c59d87c4SChristoph Hellwig 			/*
950c59d87c4SChristoph Hellwig 			 * If we didn't have a valid mapping then we need to
951c59d87c4SChristoph Hellwig 			 * put the new mapping into a separate ioend structure.
952c59d87c4SChristoph Hellwig 			 * This ensures non-contiguous extents always have
953c59d87c4SChristoph Hellwig 			 * separate ioends, which is particularly important
954c59d87c4SChristoph Hellwig 			 * for unwritten extent conversion at I/O completion
955c59d87c4SChristoph Hellwig 			 * time.
956c59d87c4SChristoph Hellwig 			 */
957c59d87c4SChristoph Hellwig 			new_ioend = 1;
958c59d87c4SChristoph Hellwig 			err = xfs_map_blocks(inode, offset, &imap, type,
959c59d87c4SChristoph Hellwig 					     nonblocking);
960c59d87c4SChristoph Hellwig 			if (err)
961c59d87c4SChristoph Hellwig 				goto error;
962c59d87c4SChristoph Hellwig 			imap_valid = xfs_imap_valid(inode, &imap, offset);
963c59d87c4SChristoph Hellwig 		}
964c59d87c4SChristoph Hellwig 		if (imap_valid) {
965c59d87c4SChristoph Hellwig 			lock_buffer(bh);
966c59d87c4SChristoph Hellwig 			if (type != IO_OVERWRITE)
967c59d87c4SChristoph Hellwig 				xfs_map_at_offset(inode, bh, &imap, offset);
968c59d87c4SChristoph Hellwig 			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
969c59d87c4SChristoph Hellwig 					 new_ioend);
970c59d87c4SChristoph Hellwig 			count++;
971c59d87c4SChristoph Hellwig 		}
972c59d87c4SChristoph Hellwig 
973c59d87c4SChristoph Hellwig 		if (!iohead)
974c59d87c4SChristoph Hellwig 			iohead = ioend;
975c59d87c4SChristoph Hellwig 
976c59d87c4SChristoph Hellwig 	} while (offset += len, ((bh = bh->b_this_page) != head));
977c59d87c4SChristoph Hellwig 
978c59d87c4SChristoph Hellwig 	if (uptodate && bh == head)
979c59d87c4SChristoph Hellwig 		SetPageUptodate(page);
980c59d87c4SChristoph Hellwig 
981c59d87c4SChristoph Hellwig 	xfs_start_page_writeback(page, 1, count);
982c59d87c4SChristoph Hellwig 
983c59d87c4SChristoph Hellwig 	if (ioend && imap_valid) {
984c59d87c4SChristoph Hellwig 		xfs_off_t		end_index;
985c59d87c4SChristoph Hellwig 
986c59d87c4SChristoph Hellwig 		end_index = imap.br_startoff + imap.br_blockcount;
987c59d87c4SChristoph Hellwig 
988c59d87c4SChristoph Hellwig 		/* to bytes */
989c59d87c4SChristoph Hellwig 		end_index <<= inode->i_blkbits;
990c59d87c4SChristoph Hellwig 
991c59d87c4SChristoph Hellwig 		/* to pages */
992c59d87c4SChristoph Hellwig 		end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
993c59d87c4SChristoph Hellwig 
994c59d87c4SChristoph Hellwig 		/* check against file size */
995c59d87c4SChristoph Hellwig 		if (end_index > last_index)
996c59d87c4SChristoph Hellwig 			end_index = last_index;
997c59d87c4SChristoph Hellwig 
998c59d87c4SChristoph Hellwig 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
999c59d87c4SChristoph Hellwig 				  wbc, end_index);
1000c59d87c4SChristoph Hellwig 	}
1001c59d87c4SChristoph Hellwig 
1002c59d87c4SChristoph Hellwig 	if (iohead)
1003c59d87c4SChristoph Hellwig 		xfs_submit_ioend(wbc, iohead);
1004c59d87c4SChristoph Hellwig 
1005c59d87c4SChristoph Hellwig 	return 0;
1006c59d87c4SChristoph Hellwig 
1007c59d87c4SChristoph Hellwig error:
1008c59d87c4SChristoph Hellwig 	if (iohead)
1009c59d87c4SChristoph Hellwig 		xfs_cancel_ioend(iohead);
1010c59d87c4SChristoph Hellwig 
1011c59d87c4SChristoph Hellwig 	if (err == -EAGAIN)
1012c59d87c4SChristoph Hellwig 		goto redirty;
1013c59d87c4SChristoph Hellwig 
1014c59d87c4SChristoph Hellwig 	xfs_aops_discard_page(page);
1015c59d87c4SChristoph Hellwig 	ClearPageUptodate(page);
1016c59d87c4SChristoph Hellwig 	unlock_page(page);
1017c59d87c4SChristoph Hellwig 	return err;
1018c59d87c4SChristoph Hellwig 
1019c59d87c4SChristoph Hellwig redirty:
1020c59d87c4SChristoph Hellwig 	redirty_page_for_writepage(wbc, page);
1021c59d87c4SChristoph Hellwig 	unlock_page(page);
1022c59d87c4SChristoph Hellwig 	return 0;
1023c59d87c4SChristoph Hellwig }
1024c59d87c4SChristoph Hellwig 
1025c59d87c4SChristoph Hellwig STATIC int
1026c59d87c4SChristoph Hellwig xfs_vm_writepages(
1027c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1028c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
1029c59d87c4SChristoph Hellwig {
1030c59d87c4SChristoph Hellwig 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1031c59d87c4SChristoph Hellwig 	return generic_writepages(mapping, wbc);
1032c59d87c4SChristoph Hellwig }
1033c59d87c4SChristoph Hellwig 
1034c59d87c4SChristoph Hellwig /*
1035c59d87c4SChristoph Hellwig  * Called to move a page into cleanable state - and from there
1036c59d87c4SChristoph Hellwig  * to be released. The page should already be clean. We always
1037c59d87c4SChristoph Hellwig  * have buffer heads in this call.
1038c59d87c4SChristoph Hellwig  *
1039c59d87c4SChristoph Hellwig  * Returns 1 if the page is ok to release, 0 otherwise.
1040c59d87c4SChristoph Hellwig  */
1041c59d87c4SChristoph Hellwig STATIC int
1042c59d87c4SChristoph Hellwig xfs_vm_releasepage(
1043c59d87c4SChristoph Hellwig 	struct page		*page,
1044c59d87c4SChristoph Hellwig 	gfp_t			gfp_mask)
1045c59d87c4SChristoph Hellwig {
1046c59d87c4SChristoph Hellwig 	int			delalloc, unwritten;
1047c59d87c4SChristoph Hellwig 
1048c59d87c4SChristoph Hellwig 	trace_xfs_releasepage(page->mapping->host, page, 0);
1049c59d87c4SChristoph Hellwig 
1050c59d87c4SChristoph Hellwig 	xfs_count_page_state(page, &delalloc, &unwritten);
1051c59d87c4SChristoph Hellwig 
1052c59d87c4SChristoph Hellwig 	if (WARN_ON(delalloc))
1053c59d87c4SChristoph Hellwig 		return 0;
1054c59d87c4SChristoph Hellwig 	if (WARN_ON(unwritten))
1055c59d87c4SChristoph Hellwig 		return 0;
1056c59d87c4SChristoph Hellwig 
1057c59d87c4SChristoph Hellwig 	return try_to_free_buffers(page);
1058c59d87c4SChristoph Hellwig }
1059c59d87c4SChristoph Hellwig 
1060c59d87c4SChristoph Hellwig STATIC int
1061c59d87c4SChristoph Hellwig __xfs_get_blocks(
1062c59d87c4SChristoph Hellwig 	struct inode		*inode,
1063c59d87c4SChristoph Hellwig 	sector_t		iblock,
1064c59d87c4SChristoph Hellwig 	struct buffer_head	*bh_result,
1065c59d87c4SChristoph Hellwig 	int			create,
1066c59d87c4SChristoph Hellwig 	int			direct)
1067c59d87c4SChristoph Hellwig {
1068c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
1069c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
1070c59d87c4SChristoph Hellwig 	xfs_fileoff_t		offset_fsb, end_fsb;
1071c59d87c4SChristoph Hellwig 	int			error = 0;
1072c59d87c4SChristoph Hellwig 	int			lockmode = 0;
1073c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	imap;
1074c59d87c4SChristoph Hellwig 	int			nimaps = 1;
1075c59d87c4SChristoph Hellwig 	xfs_off_t		offset;
1076c59d87c4SChristoph Hellwig 	ssize_t			size;
1077c59d87c4SChristoph Hellwig 	int			new = 0;
1078c59d87c4SChristoph Hellwig 
1079c59d87c4SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
1080c59d87c4SChristoph Hellwig 		return -XFS_ERROR(EIO);
1081c59d87c4SChristoph Hellwig 
1082c59d87c4SChristoph Hellwig 	offset = (xfs_off_t)iblock << inode->i_blkbits;
1083c59d87c4SChristoph Hellwig 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1084c59d87c4SChristoph Hellwig 	size = bh_result->b_size;
1085c59d87c4SChristoph Hellwig 
1086c59d87c4SChristoph Hellwig 	if (!create && direct && offset >= i_size_read(inode))
1087c59d87c4SChristoph Hellwig 		return 0;
1088c59d87c4SChristoph Hellwig 
1089c59d87c4SChristoph Hellwig 	if (create) {
1090c59d87c4SChristoph Hellwig 		lockmode = XFS_ILOCK_EXCL;
1091c59d87c4SChristoph Hellwig 		xfs_ilock(ip, lockmode);
1092c59d87c4SChristoph Hellwig 	} else {
1093c59d87c4SChristoph Hellwig 		lockmode = xfs_ilock_map_shared(ip);
1094c59d87c4SChristoph Hellwig 	}
1095c59d87c4SChristoph Hellwig 
1096c59d87c4SChristoph Hellwig 	ASSERT(offset <= mp->m_maxioffset);
1097c59d87c4SChristoph Hellwig 	if (offset + size > mp->m_maxioffset)
1098c59d87c4SChristoph Hellwig 		size = mp->m_maxioffset - offset;
1099c59d87c4SChristoph Hellwig 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1100c59d87c4SChristoph Hellwig 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
1101c59d87c4SChristoph Hellwig 
11025c8ed202SDave Chinner 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
11035c8ed202SDave Chinner 				&imap, &nimaps, XFS_BMAPI_ENTIRE);
1104c59d87c4SChristoph Hellwig 	if (error)
1105c59d87c4SChristoph Hellwig 		goto out_unlock;
1106c59d87c4SChristoph Hellwig 
1107c59d87c4SChristoph Hellwig 	if (create &&
1108c59d87c4SChristoph Hellwig 	    (!nimaps ||
1109c59d87c4SChristoph Hellwig 	     (imap.br_startblock == HOLESTARTBLOCK ||
1110c59d87c4SChristoph Hellwig 	      imap.br_startblock == DELAYSTARTBLOCK))) {
1111c59d87c4SChristoph Hellwig 		if (direct) {
1112c59d87c4SChristoph Hellwig 			error = xfs_iomap_write_direct(ip, offset, size,
1113c59d87c4SChristoph Hellwig 						       &imap, nimaps);
1114c59d87c4SChristoph Hellwig 		} else {
1115c59d87c4SChristoph Hellwig 			error = xfs_iomap_write_delay(ip, offset, size, &imap);
1116c59d87c4SChristoph Hellwig 		}
1117c59d87c4SChristoph Hellwig 		if (error)
1118c59d87c4SChristoph Hellwig 			goto out_unlock;
1119c59d87c4SChristoph Hellwig 
1120c59d87c4SChristoph Hellwig 		trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1121c59d87c4SChristoph Hellwig 	} else if (nimaps) {
1122c59d87c4SChristoph Hellwig 		trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1123c59d87c4SChristoph Hellwig 	} else {
1124c59d87c4SChristoph Hellwig 		trace_xfs_get_blocks_notfound(ip, offset, size);
1125c59d87c4SChristoph Hellwig 		goto out_unlock;
1126c59d87c4SChristoph Hellwig 	}
1127c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, lockmode);
1128c59d87c4SChristoph Hellwig 
1129c59d87c4SChristoph Hellwig 	if (imap.br_startblock != HOLESTARTBLOCK &&
1130c59d87c4SChristoph Hellwig 	    imap.br_startblock != DELAYSTARTBLOCK) {
1131c59d87c4SChristoph Hellwig 		/*
1132c59d87c4SChristoph Hellwig 		 * For unwritten extents do not report a disk address on
1133c59d87c4SChristoph Hellwig 		 * the read case (treat as if we're reading into a hole).
1134c59d87c4SChristoph Hellwig 		 */
1135c59d87c4SChristoph Hellwig 		if (create || !ISUNWRITTEN(&imap))
1136c59d87c4SChristoph Hellwig 			xfs_map_buffer(inode, bh_result, &imap, offset);
1137c59d87c4SChristoph Hellwig 		if (create && ISUNWRITTEN(&imap)) {
1138c59d87c4SChristoph Hellwig 			if (direct)
1139c59d87c4SChristoph Hellwig 				bh_result->b_private = inode;
1140c59d87c4SChristoph Hellwig 			set_buffer_unwritten(bh_result);
1141c59d87c4SChristoph Hellwig 		}
1142c59d87c4SChristoph Hellwig 	}
1143c59d87c4SChristoph Hellwig 
1144c59d87c4SChristoph Hellwig 	/*
1145c59d87c4SChristoph Hellwig 	 * If this is a realtime file, data may be on a different device.
1146c59d87c4SChristoph Hellwig 	 * to that pointed to from the buffer_head b_bdev currently.
1147c59d87c4SChristoph Hellwig 	 */
1148c59d87c4SChristoph Hellwig 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1149c59d87c4SChristoph Hellwig 
1150c59d87c4SChristoph Hellwig 	/*
1151c59d87c4SChristoph Hellwig 	 * If we previously allocated a block out beyond eof and we are now
1152c59d87c4SChristoph Hellwig 	 * coming back to use it then we will need to flag it as new even if it
1153c59d87c4SChristoph Hellwig 	 * has a disk address.
1154c59d87c4SChristoph Hellwig 	 *
1155c59d87c4SChristoph Hellwig 	 * With sub-block writes into unwritten extents we also need to mark
1156c59d87c4SChristoph Hellwig 	 * the buffer as new so that the unwritten parts of the buffer gets
1157c59d87c4SChristoph Hellwig 	 * correctly zeroed.
1158c59d87c4SChristoph Hellwig 	 */
1159c59d87c4SChristoph Hellwig 	if (create &&
1160c59d87c4SChristoph Hellwig 	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1161c59d87c4SChristoph Hellwig 	     (offset >= i_size_read(inode)) ||
1162c59d87c4SChristoph Hellwig 	     (new || ISUNWRITTEN(&imap))))
1163c59d87c4SChristoph Hellwig 		set_buffer_new(bh_result);
1164c59d87c4SChristoph Hellwig 
1165c59d87c4SChristoph Hellwig 	if (imap.br_startblock == DELAYSTARTBLOCK) {
1166c59d87c4SChristoph Hellwig 		BUG_ON(direct);
1167c59d87c4SChristoph Hellwig 		if (create) {
1168c59d87c4SChristoph Hellwig 			set_buffer_uptodate(bh_result);
1169c59d87c4SChristoph Hellwig 			set_buffer_mapped(bh_result);
1170c59d87c4SChristoph Hellwig 			set_buffer_delay(bh_result);
1171c59d87c4SChristoph Hellwig 		}
1172c59d87c4SChristoph Hellwig 	}
1173c59d87c4SChristoph Hellwig 
1174c59d87c4SChristoph Hellwig 	/*
1175c59d87c4SChristoph Hellwig 	 * If this is O_DIRECT or the mpage code calling tell them how large
1176c59d87c4SChristoph Hellwig 	 * the mapping is, so that we can avoid repeated get_blocks calls.
1177c59d87c4SChristoph Hellwig 	 */
1178c59d87c4SChristoph Hellwig 	if (direct || size > (1 << inode->i_blkbits)) {
1179c59d87c4SChristoph Hellwig 		xfs_off_t		mapping_size;
1180c59d87c4SChristoph Hellwig 
1181c59d87c4SChristoph Hellwig 		mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1182c59d87c4SChristoph Hellwig 		mapping_size <<= inode->i_blkbits;
1183c59d87c4SChristoph Hellwig 
1184c59d87c4SChristoph Hellwig 		ASSERT(mapping_size > 0);
1185c59d87c4SChristoph Hellwig 		if (mapping_size > size)
1186c59d87c4SChristoph Hellwig 			mapping_size = size;
1187c59d87c4SChristoph Hellwig 		if (mapping_size > LONG_MAX)
1188c59d87c4SChristoph Hellwig 			mapping_size = LONG_MAX;
1189c59d87c4SChristoph Hellwig 
1190c59d87c4SChristoph Hellwig 		bh_result->b_size = mapping_size;
1191c59d87c4SChristoph Hellwig 	}
1192c59d87c4SChristoph Hellwig 
1193c59d87c4SChristoph Hellwig 	return 0;
1194c59d87c4SChristoph Hellwig 
1195c59d87c4SChristoph Hellwig out_unlock:
1196c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, lockmode);
1197c59d87c4SChristoph Hellwig 	return -error;
1198c59d87c4SChristoph Hellwig }
1199c59d87c4SChristoph Hellwig 
1200c59d87c4SChristoph Hellwig int
1201c59d87c4SChristoph Hellwig xfs_get_blocks(
1202c59d87c4SChristoph Hellwig 	struct inode		*inode,
1203c59d87c4SChristoph Hellwig 	sector_t		iblock,
1204c59d87c4SChristoph Hellwig 	struct buffer_head	*bh_result,
1205c59d87c4SChristoph Hellwig 	int			create)
1206c59d87c4SChristoph Hellwig {
1207c59d87c4SChristoph Hellwig 	return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1208c59d87c4SChristoph Hellwig }
1209c59d87c4SChristoph Hellwig 
1210c59d87c4SChristoph Hellwig STATIC int
1211c59d87c4SChristoph Hellwig xfs_get_blocks_direct(
1212c59d87c4SChristoph Hellwig 	struct inode		*inode,
1213c59d87c4SChristoph Hellwig 	sector_t		iblock,
1214c59d87c4SChristoph Hellwig 	struct buffer_head	*bh_result,
1215c59d87c4SChristoph Hellwig 	int			create)
1216c59d87c4SChristoph Hellwig {
1217c59d87c4SChristoph Hellwig 	return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1218c59d87c4SChristoph Hellwig }
1219c59d87c4SChristoph Hellwig 
1220c59d87c4SChristoph Hellwig /*
1221c59d87c4SChristoph Hellwig  * Complete a direct I/O write request.
1222c59d87c4SChristoph Hellwig  *
1223c59d87c4SChristoph Hellwig  * If the private argument is non-NULL __xfs_get_blocks signals us that we
1224c59d87c4SChristoph Hellwig  * need to issue a transaction to convert the range from unwritten to written
1225c59d87c4SChristoph Hellwig  * extents.  In case this is regular synchronous I/O we just call xfs_end_io
1226c59d87c4SChristoph Hellwig  * to do this and we are done.  But in case this was a successful AIO
1227c59d87c4SChristoph Hellwig  * request this handler is called from interrupt context, from which we
1228c59d87c4SChristoph Hellwig  * can't start transactions.  In that case offload the I/O completion to
1229c59d87c4SChristoph Hellwig  * the workqueues we also use for buffered I/O completion.
1230c59d87c4SChristoph Hellwig  */
1231c59d87c4SChristoph Hellwig STATIC void
1232c59d87c4SChristoph Hellwig xfs_end_io_direct_write(
1233c59d87c4SChristoph Hellwig 	struct kiocb		*iocb,
1234c59d87c4SChristoph Hellwig 	loff_t			offset,
1235c59d87c4SChristoph Hellwig 	ssize_t			size,
1236c59d87c4SChristoph Hellwig 	void			*private,
1237c59d87c4SChristoph Hellwig 	int			ret,
1238c59d87c4SChristoph Hellwig 	bool			is_async)
1239c59d87c4SChristoph Hellwig {
1240c59d87c4SChristoph Hellwig 	struct xfs_ioend	*ioend = iocb->private;
1241c59d87c4SChristoph Hellwig 
1242c59d87c4SChristoph Hellwig 	/*
12432813d682SChristoph Hellwig 	 * While the generic direct I/O code updates the inode size, it does
12442813d682SChristoph Hellwig 	 * so only after the end_io handler is called, which means our
12452813d682SChristoph Hellwig 	 * end_io handler thinks the on-disk size is outside the in-core
12462813d682SChristoph Hellwig 	 * size.  To prevent this just update it a little bit earlier here.
12472813d682SChristoph Hellwig 	 */
12482813d682SChristoph Hellwig 	if (offset + size > i_size_read(ioend->io_inode))
12492813d682SChristoph Hellwig 		i_size_write(ioend->io_inode, offset + size);
12502813d682SChristoph Hellwig 
12512813d682SChristoph Hellwig 	/*
1252c59d87c4SChristoph Hellwig 	 * blockdev_direct_IO can return an error even after the I/O
1253c59d87c4SChristoph Hellwig 	 * completion handler was called.  Thus we need to protect
1254c59d87c4SChristoph Hellwig 	 * against double-freeing.
1255c59d87c4SChristoph Hellwig 	 */
1256c59d87c4SChristoph Hellwig 	iocb->private = NULL;
1257c59d87c4SChristoph Hellwig 
1258c59d87c4SChristoph Hellwig 	ioend->io_offset = offset;
1259c59d87c4SChristoph Hellwig 	ioend->io_size = size;
1260c859cdd1SChristoph Hellwig 	ioend->io_iocb = iocb;
1261c859cdd1SChristoph Hellwig 	ioend->io_result = ret;
1262c59d87c4SChristoph Hellwig 	if (private && size > 0)
1263c59d87c4SChristoph Hellwig 		ioend->io_type = IO_UNWRITTEN;
1264c59d87c4SChristoph Hellwig 
1265c59d87c4SChristoph Hellwig 	if (is_async) {
1266c859cdd1SChristoph Hellwig 		ioend->io_isasync = 1;
1267c59d87c4SChristoph Hellwig 		xfs_finish_ioend(ioend);
1268c59d87c4SChristoph Hellwig 	} else {
1269c59d87c4SChristoph Hellwig 		xfs_finish_ioend_sync(ioend);
1270c59d87c4SChristoph Hellwig 	}
1271c59d87c4SChristoph Hellwig }
1272c59d87c4SChristoph Hellwig 
1273c59d87c4SChristoph Hellwig STATIC ssize_t
1274c59d87c4SChristoph Hellwig xfs_vm_direct_IO(
1275c59d87c4SChristoph Hellwig 	int			rw,
1276c59d87c4SChristoph Hellwig 	struct kiocb		*iocb,
1277c59d87c4SChristoph Hellwig 	const struct iovec	*iov,
1278c59d87c4SChristoph Hellwig 	loff_t			offset,
1279c59d87c4SChristoph Hellwig 	unsigned long		nr_segs)
1280c59d87c4SChristoph Hellwig {
1281c59d87c4SChristoph Hellwig 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
1282c59d87c4SChristoph Hellwig 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
1283c59d87c4SChristoph Hellwig 	ssize_t			ret;
1284c59d87c4SChristoph Hellwig 
1285c59d87c4SChristoph Hellwig 	if (rw & WRITE) {
1286c59d87c4SChristoph Hellwig 		iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
1287c59d87c4SChristoph Hellwig 
1288c59d87c4SChristoph Hellwig 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1289c59d87c4SChristoph Hellwig 					    offset, nr_segs,
1290c59d87c4SChristoph Hellwig 					    xfs_get_blocks_direct,
1291c59d87c4SChristoph Hellwig 					    xfs_end_io_direct_write, NULL, 0);
1292c59d87c4SChristoph Hellwig 		if (ret != -EIOCBQUEUED && iocb->private)
1293c59d87c4SChristoph Hellwig 			xfs_destroy_ioend(iocb->private);
1294c59d87c4SChristoph Hellwig 	} else {
1295c59d87c4SChristoph Hellwig 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1296c59d87c4SChristoph Hellwig 					    offset, nr_segs,
1297c59d87c4SChristoph Hellwig 					    xfs_get_blocks_direct,
1298c59d87c4SChristoph Hellwig 					    NULL, NULL, 0);
1299c59d87c4SChristoph Hellwig 	}
1300c59d87c4SChristoph Hellwig 
1301c59d87c4SChristoph Hellwig 	return ret;
1302c59d87c4SChristoph Hellwig }
1303c59d87c4SChristoph Hellwig 
1304c59d87c4SChristoph Hellwig STATIC void
1305c59d87c4SChristoph Hellwig xfs_vm_write_failed(
1306c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1307c59d87c4SChristoph Hellwig 	loff_t			to)
1308c59d87c4SChristoph Hellwig {
1309c59d87c4SChristoph Hellwig 	struct inode		*inode = mapping->host;
1310c59d87c4SChristoph Hellwig 
1311c59d87c4SChristoph Hellwig 	if (to > inode->i_size) {
1312c59d87c4SChristoph Hellwig 		/*
13132813d682SChristoph Hellwig 		 * Punch out the delalloc blocks we have already allocated.
13142813d682SChristoph Hellwig 		 *
13152813d682SChristoph Hellwig 		 * Don't bother with xfs_setattr given that nothing can have
13162813d682SChristoph Hellwig 		 * made it to disk yet as the page is still locked at this
13172813d682SChristoph Hellwig 		 * point.
1318c59d87c4SChristoph Hellwig 		 */
1319c59d87c4SChristoph Hellwig 		struct xfs_inode	*ip = XFS_I(inode);
1320c59d87c4SChristoph Hellwig 		xfs_fileoff_t		start_fsb;
1321c59d87c4SChristoph Hellwig 		xfs_fileoff_t		end_fsb;
1322c59d87c4SChristoph Hellwig 		int			error;
1323c59d87c4SChristoph Hellwig 
1324c59d87c4SChristoph Hellwig 		truncate_pagecache(inode, to, inode->i_size);
1325c59d87c4SChristoph Hellwig 
1326c59d87c4SChristoph Hellwig 		/*
1327c59d87c4SChristoph Hellwig 		 * Check if there are any blocks that are outside of i_size
1328c59d87c4SChristoph Hellwig 		 * that need to be trimmed back.
1329c59d87c4SChristoph Hellwig 		 */
1330c59d87c4SChristoph Hellwig 		start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1331c59d87c4SChristoph Hellwig 		end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1332c59d87c4SChristoph Hellwig 		if (end_fsb <= start_fsb)
1333c59d87c4SChristoph Hellwig 			return;
1334c59d87c4SChristoph Hellwig 
1335c59d87c4SChristoph Hellwig 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1336c59d87c4SChristoph Hellwig 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1337c59d87c4SChristoph Hellwig 							end_fsb - start_fsb);
1338c59d87c4SChristoph Hellwig 		if (error) {
1339c59d87c4SChristoph Hellwig 			/* something screwed, just bail */
1340c59d87c4SChristoph Hellwig 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1341c59d87c4SChristoph Hellwig 				xfs_alert(ip->i_mount,
1342c59d87c4SChristoph Hellwig 			"xfs_vm_write_failed: unable to clean up ino %lld",
1343c59d87c4SChristoph Hellwig 						ip->i_ino);
1344c59d87c4SChristoph Hellwig 			}
1345c59d87c4SChristoph Hellwig 		}
1346c59d87c4SChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1347c59d87c4SChristoph Hellwig 	}
1348c59d87c4SChristoph Hellwig }
1349c59d87c4SChristoph Hellwig 
1350c59d87c4SChristoph Hellwig STATIC int
1351c59d87c4SChristoph Hellwig xfs_vm_write_begin(
1352c59d87c4SChristoph Hellwig 	struct file		*file,
1353c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1354c59d87c4SChristoph Hellwig 	loff_t			pos,
1355c59d87c4SChristoph Hellwig 	unsigned		len,
1356c59d87c4SChristoph Hellwig 	unsigned		flags,
1357c59d87c4SChristoph Hellwig 	struct page		**pagep,
1358c59d87c4SChristoph Hellwig 	void			**fsdata)
1359c59d87c4SChristoph Hellwig {
1360c59d87c4SChristoph Hellwig 	int			ret;
1361c59d87c4SChristoph Hellwig 
1362c59d87c4SChristoph Hellwig 	ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
1363c59d87c4SChristoph Hellwig 				pagep, xfs_get_blocks);
1364c59d87c4SChristoph Hellwig 	if (unlikely(ret))
1365c59d87c4SChristoph Hellwig 		xfs_vm_write_failed(mapping, pos + len);
1366c59d87c4SChristoph Hellwig 	return ret;
1367c59d87c4SChristoph Hellwig }
1368c59d87c4SChristoph Hellwig 
1369c59d87c4SChristoph Hellwig STATIC int
1370c59d87c4SChristoph Hellwig xfs_vm_write_end(
1371c59d87c4SChristoph Hellwig 	struct file		*file,
1372c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1373c59d87c4SChristoph Hellwig 	loff_t			pos,
1374c59d87c4SChristoph Hellwig 	unsigned		len,
1375c59d87c4SChristoph Hellwig 	unsigned		copied,
1376c59d87c4SChristoph Hellwig 	struct page		*page,
1377c59d87c4SChristoph Hellwig 	void			*fsdata)
1378c59d87c4SChristoph Hellwig {
1379c59d87c4SChristoph Hellwig 	int			ret;
1380c59d87c4SChristoph Hellwig 
1381c59d87c4SChristoph Hellwig 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1382c59d87c4SChristoph Hellwig 	if (unlikely(ret < len))
1383c59d87c4SChristoph Hellwig 		xfs_vm_write_failed(mapping, pos + len);
1384c59d87c4SChristoph Hellwig 	return ret;
1385c59d87c4SChristoph Hellwig }
1386c59d87c4SChristoph Hellwig 
1387c59d87c4SChristoph Hellwig STATIC sector_t
1388c59d87c4SChristoph Hellwig xfs_vm_bmap(
1389c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1390c59d87c4SChristoph Hellwig 	sector_t		block)
1391c59d87c4SChristoph Hellwig {
1392c59d87c4SChristoph Hellwig 	struct inode		*inode = (struct inode *)mapping->host;
1393c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
1394c59d87c4SChristoph Hellwig 
1395c59d87c4SChristoph Hellwig 	trace_xfs_vm_bmap(XFS_I(inode));
1396c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
1397c59d87c4SChristoph Hellwig 	xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1398c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1399c59d87c4SChristoph Hellwig 	return generic_block_bmap(mapping, block, xfs_get_blocks);
1400c59d87c4SChristoph Hellwig }
1401c59d87c4SChristoph Hellwig 
1402c59d87c4SChristoph Hellwig STATIC int
1403c59d87c4SChristoph Hellwig xfs_vm_readpage(
1404c59d87c4SChristoph Hellwig 	struct file		*unused,
1405c59d87c4SChristoph Hellwig 	struct page		*page)
1406c59d87c4SChristoph Hellwig {
1407c59d87c4SChristoph Hellwig 	return mpage_readpage(page, xfs_get_blocks);
1408c59d87c4SChristoph Hellwig }
1409c59d87c4SChristoph Hellwig 
1410c59d87c4SChristoph Hellwig STATIC int
1411c59d87c4SChristoph Hellwig xfs_vm_readpages(
1412c59d87c4SChristoph Hellwig 	struct file		*unused,
1413c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1414c59d87c4SChristoph Hellwig 	struct list_head	*pages,
1415c59d87c4SChristoph Hellwig 	unsigned		nr_pages)
1416c59d87c4SChristoph Hellwig {
1417c59d87c4SChristoph Hellwig 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1418c59d87c4SChristoph Hellwig }
1419c59d87c4SChristoph Hellwig 
1420c59d87c4SChristoph Hellwig const struct address_space_operations xfs_address_space_operations = {
1421c59d87c4SChristoph Hellwig 	.readpage		= xfs_vm_readpage,
1422c59d87c4SChristoph Hellwig 	.readpages		= xfs_vm_readpages,
1423c59d87c4SChristoph Hellwig 	.writepage		= xfs_vm_writepage,
1424c59d87c4SChristoph Hellwig 	.writepages		= xfs_vm_writepages,
1425c59d87c4SChristoph Hellwig 	.releasepage		= xfs_vm_releasepage,
1426c59d87c4SChristoph Hellwig 	.invalidatepage		= xfs_vm_invalidatepage,
1427c59d87c4SChristoph Hellwig 	.write_begin		= xfs_vm_write_begin,
1428c59d87c4SChristoph Hellwig 	.write_end		= xfs_vm_write_end,
1429c59d87c4SChristoph Hellwig 	.bmap			= xfs_vm_bmap,
1430c59d87c4SChristoph Hellwig 	.direct_IO		= xfs_vm_direct_IO,
1431c59d87c4SChristoph Hellwig 	.migratepage		= buffer_migrate_page,
1432c59d87c4SChristoph Hellwig 	.is_partially_uptodate  = block_is_partially_uptodate,
1433c59d87c4SChristoph Hellwig 	.error_remove_page	= generic_error_remove_page,
1434c59d87c4SChristoph Hellwig };
1435