xref: /linux/fs/xfs/xfs_aops.c (revision 4906e21545814e4129595118287a2f1415483c0b)
1c59d87c4SChristoph Hellwig /*
2c59d87c4SChristoph Hellwig  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3c59d87c4SChristoph Hellwig  * All Rights Reserved.
4c59d87c4SChristoph Hellwig  *
5c59d87c4SChristoph Hellwig  * This program is free software; you can redistribute it and/or
6c59d87c4SChristoph Hellwig  * modify it under the terms of the GNU General Public License as
7c59d87c4SChristoph Hellwig  * published by the Free Software Foundation.
8c59d87c4SChristoph Hellwig  *
9c59d87c4SChristoph Hellwig  * This program is distributed in the hope that it would be useful,
10c59d87c4SChristoph Hellwig  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11c59d87c4SChristoph Hellwig  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12c59d87c4SChristoph Hellwig  * GNU General Public License for more details.
13c59d87c4SChristoph Hellwig  *
14c59d87c4SChristoph Hellwig  * You should have received a copy of the GNU General Public License
15c59d87c4SChristoph Hellwig  * along with this program; if not, write the Free Software Foundation,
16c59d87c4SChristoph Hellwig  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17c59d87c4SChristoph Hellwig  */
18c59d87c4SChristoph Hellwig #include "xfs.h"
1970a9883cSDave Chinner #include "xfs_shared.h"
20239880efSDave Chinner #include "xfs_format.h"
21239880efSDave Chinner #include "xfs_log_format.h"
22239880efSDave Chinner #include "xfs_trans_resv.h"
23c59d87c4SChristoph Hellwig #include "xfs_mount.h"
24c59d87c4SChristoph Hellwig #include "xfs_inode.h"
25239880efSDave Chinner #include "xfs_trans.h"
26281627dfSChristoph Hellwig #include "xfs_inode_item.h"
27c59d87c4SChristoph Hellwig #include "xfs_alloc.h"
28c59d87c4SChristoph Hellwig #include "xfs_error.h"
29c59d87c4SChristoph Hellwig #include "xfs_iomap.h"
30c59d87c4SChristoph Hellwig #include "xfs_trace.h"
31c59d87c4SChristoph Hellwig #include "xfs_bmap.h"
3268988114SDave Chinner #include "xfs_bmap_util.h"
33a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
34c59d87c4SChristoph Hellwig #include <linux/gfp.h>
35c59d87c4SChristoph Hellwig #include <linux/mpage.h>
36c59d87c4SChristoph Hellwig #include <linux/pagevec.h>
37c59d87c4SChristoph Hellwig #include <linux/writeback.h>
38c59d87c4SChristoph Hellwig 
39c59d87c4SChristoph Hellwig void
40c59d87c4SChristoph Hellwig xfs_count_page_state(
41c59d87c4SChristoph Hellwig 	struct page		*page,
42c59d87c4SChristoph Hellwig 	int			*delalloc,
43c59d87c4SChristoph Hellwig 	int			*unwritten)
44c59d87c4SChristoph Hellwig {
45c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
46c59d87c4SChristoph Hellwig 
47c59d87c4SChristoph Hellwig 	*delalloc = *unwritten = 0;
48c59d87c4SChristoph Hellwig 
49c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
50c59d87c4SChristoph Hellwig 	do {
51c59d87c4SChristoph Hellwig 		if (buffer_unwritten(bh))
52c59d87c4SChristoph Hellwig 			(*unwritten) = 1;
53c59d87c4SChristoph Hellwig 		else if (buffer_delay(bh))
54c59d87c4SChristoph Hellwig 			(*delalloc) = 1;
55c59d87c4SChristoph Hellwig 	} while ((bh = bh->b_this_page) != head);
56c59d87c4SChristoph Hellwig }
57c59d87c4SChristoph Hellwig 
58c59d87c4SChristoph Hellwig STATIC struct block_device *
59c59d87c4SChristoph Hellwig xfs_find_bdev_for_inode(
60c59d87c4SChristoph Hellwig 	struct inode		*inode)
61c59d87c4SChristoph Hellwig {
62c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
63c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
64c59d87c4SChristoph Hellwig 
65c59d87c4SChristoph Hellwig 	if (XFS_IS_REALTIME_INODE(ip))
66c59d87c4SChristoph Hellwig 		return mp->m_rtdev_targp->bt_bdev;
67c59d87c4SChristoph Hellwig 	else
68c59d87c4SChristoph Hellwig 		return mp->m_ddev_targp->bt_bdev;
69c59d87c4SChristoph Hellwig }
70c59d87c4SChristoph Hellwig 
71c59d87c4SChristoph Hellwig /*
72c59d87c4SChristoph Hellwig  * We're now finished for good with this ioend structure.
73c59d87c4SChristoph Hellwig  * Update the page state via the associated buffer_heads,
74c59d87c4SChristoph Hellwig  * release holds on the inode and bio, and finally free
75c59d87c4SChristoph Hellwig  * up memory.  Do not use the ioend after this.
76c59d87c4SChristoph Hellwig  */
77c59d87c4SChristoph Hellwig STATIC void
78c59d87c4SChristoph Hellwig xfs_destroy_ioend(
79c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend)
80c59d87c4SChristoph Hellwig {
81c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *next;
82c59d87c4SChristoph Hellwig 
83c59d87c4SChristoph Hellwig 	for (bh = ioend->io_buffer_head; bh; bh = next) {
84c59d87c4SChristoph Hellwig 		next = bh->b_private;
85c59d87c4SChristoph Hellwig 		bh->b_end_io(bh, !ioend->io_error);
86c59d87c4SChristoph Hellwig 	}
87c59d87c4SChristoph Hellwig 
88c59d87c4SChristoph Hellwig 	mempool_free(ioend, xfs_ioend_pool);
89c59d87c4SChristoph Hellwig }
90c59d87c4SChristoph Hellwig 
91c59d87c4SChristoph Hellwig /*
92fc0063c4SChristoph Hellwig  * Fast and loose check if this write could update the on-disk inode size.
93fc0063c4SChristoph Hellwig  */
94fc0063c4SChristoph Hellwig static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
95fc0063c4SChristoph Hellwig {
96fc0063c4SChristoph Hellwig 	return ioend->io_offset + ioend->io_size >
97fc0063c4SChristoph Hellwig 		XFS_I(ioend->io_inode)->i_d.di_size;
98fc0063c4SChristoph Hellwig }
99fc0063c4SChristoph Hellwig 
100281627dfSChristoph Hellwig STATIC int
101281627dfSChristoph Hellwig xfs_setfilesize_trans_alloc(
102281627dfSChristoph Hellwig 	struct xfs_ioend	*ioend)
103281627dfSChristoph Hellwig {
104281627dfSChristoph Hellwig 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
105281627dfSChristoph Hellwig 	struct xfs_trans	*tp;
106281627dfSChristoph Hellwig 	int			error;
107281627dfSChristoph Hellwig 
108281627dfSChristoph Hellwig 	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
109281627dfSChristoph Hellwig 
1103d3c8b52SJie Liu 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
111281627dfSChristoph Hellwig 	if (error) {
112*4906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
113281627dfSChristoph Hellwig 		return error;
114281627dfSChristoph Hellwig 	}
115281627dfSChristoph Hellwig 
116281627dfSChristoph Hellwig 	ioend->io_append_trans = tp;
117281627dfSChristoph Hellwig 
118281627dfSChristoph Hellwig 	/*
119437a255aSDave Chinner 	 * We may pass freeze protection with a transaction.  So tell lockdep
120d9457dc0SJan Kara 	 * we released it.
121d9457dc0SJan Kara 	 */
122d9457dc0SJan Kara 	rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
123d9457dc0SJan Kara 		      1, _THIS_IP_);
124d9457dc0SJan Kara 	/*
125281627dfSChristoph Hellwig 	 * We hand off the transaction to the completion thread now, so
126281627dfSChristoph Hellwig 	 * clear the flag here.
127281627dfSChristoph Hellwig 	 */
128281627dfSChristoph Hellwig 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
129281627dfSChristoph Hellwig 	return 0;
130281627dfSChristoph Hellwig }
131281627dfSChristoph Hellwig 
132fc0063c4SChristoph Hellwig /*
1332813d682SChristoph Hellwig  * Update on-disk file size now that data has been written to disk.
134c59d87c4SChristoph Hellwig  */
135281627dfSChristoph Hellwig STATIC int
136c59d87c4SChristoph Hellwig xfs_setfilesize(
1372ba66237SChristoph Hellwig 	struct xfs_inode	*ip,
1382ba66237SChristoph Hellwig 	struct xfs_trans	*tp,
1392ba66237SChristoph Hellwig 	xfs_off_t		offset,
1402ba66237SChristoph Hellwig 	size_t			size)
141c59d87c4SChristoph Hellwig {
142c59d87c4SChristoph Hellwig 	xfs_fsize_t		isize;
143c59d87c4SChristoph Hellwig 
144aa6bf01dSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1452ba66237SChristoph Hellwig 	isize = xfs_new_eof(ip, offset + size);
146281627dfSChristoph Hellwig 	if (!isize) {
147281627dfSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
148*4906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
149281627dfSChristoph Hellwig 		return 0;
150c59d87c4SChristoph Hellwig 	}
151c59d87c4SChristoph Hellwig 
1522ba66237SChristoph Hellwig 	trace_xfs_setfilesize(ip, offset, size);
153281627dfSChristoph Hellwig 
154281627dfSChristoph Hellwig 	ip->i_d.di_size = isize;
155281627dfSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
156281627dfSChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
157281627dfSChristoph Hellwig 
158281627dfSChristoph Hellwig 	return xfs_trans_commit(tp, 0);
159c59d87c4SChristoph Hellwig }
160c59d87c4SChristoph Hellwig 
1612ba66237SChristoph Hellwig STATIC int
1622ba66237SChristoph Hellwig xfs_setfilesize_ioend(
1632ba66237SChristoph Hellwig 	struct xfs_ioend	*ioend)
1642ba66237SChristoph Hellwig {
1652ba66237SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
1662ba66237SChristoph Hellwig 	struct xfs_trans	*tp = ioend->io_append_trans;
1672ba66237SChristoph Hellwig 
1682ba66237SChristoph Hellwig 	/*
1692ba66237SChristoph Hellwig 	 * The transaction may have been allocated in the I/O submission thread,
1702ba66237SChristoph Hellwig 	 * thus we need to mark ourselves as being in a transaction manually.
1712ba66237SChristoph Hellwig 	 * Similarly for freeze protection.
1722ba66237SChristoph Hellwig 	 */
1732ba66237SChristoph Hellwig 	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
1742ba66237SChristoph Hellwig 	rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
1752ba66237SChristoph Hellwig 			   0, 1, _THIS_IP_);
1762ba66237SChristoph Hellwig 
1772ba66237SChristoph Hellwig 	return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
1782ba66237SChristoph Hellwig }
1792ba66237SChristoph Hellwig 
180c59d87c4SChristoph Hellwig /*
181c59d87c4SChristoph Hellwig  * Schedule IO completion handling on the final put of an ioend.
182fc0063c4SChristoph Hellwig  *
183fc0063c4SChristoph Hellwig  * If there is no work to do we might as well call it a day and free the
184fc0063c4SChristoph Hellwig  * ioend right now.
185c59d87c4SChristoph Hellwig  */
186c59d87c4SChristoph Hellwig STATIC void
187c59d87c4SChristoph Hellwig xfs_finish_ioend(
188c59d87c4SChristoph Hellwig 	struct xfs_ioend	*ioend)
189c59d87c4SChristoph Hellwig {
190c59d87c4SChristoph Hellwig 	if (atomic_dec_and_test(&ioend->io_remaining)) {
191aa6bf01dSChristoph Hellwig 		struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
192aa6bf01dSChristoph Hellwig 
1930d882a36SAlain Renaud 		if (ioend->io_type == XFS_IO_UNWRITTEN)
194aa6bf01dSChristoph Hellwig 			queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
1952ba66237SChristoph Hellwig 		else if (ioend->io_append_trans)
196aa6bf01dSChristoph Hellwig 			queue_work(mp->m_data_workqueue, &ioend->io_work);
197fc0063c4SChristoph Hellwig 		else
198fc0063c4SChristoph Hellwig 			xfs_destroy_ioend(ioend);
199c59d87c4SChristoph Hellwig 	}
200c59d87c4SChristoph Hellwig }
201c59d87c4SChristoph Hellwig 
202c59d87c4SChristoph Hellwig /*
203c59d87c4SChristoph Hellwig  * IO write completion.
204c59d87c4SChristoph Hellwig  */
205c59d87c4SChristoph Hellwig STATIC void
206c59d87c4SChristoph Hellwig xfs_end_io(
207c59d87c4SChristoph Hellwig 	struct work_struct *work)
208c59d87c4SChristoph Hellwig {
209c59d87c4SChristoph Hellwig 	xfs_ioend_t	*ioend = container_of(work, xfs_ioend_t, io_work);
210c59d87c4SChristoph Hellwig 	struct xfs_inode *ip = XFS_I(ioend->io_inode);
211c59d87c4SChristoph Hellwig 	int		error = 0;
212c59d87c4SChristoph Hellwig 
21304f658eeSChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
214810627d9SChristoph Hellwig 		ioend->io_error = -EIO;
21504f658eeSChristoph Hellwig 		goto done;
21604f658eeSChristoph Hellwig 	}
21704f658eeSChristoph Hellwig 	if (ioend->io_error)
21804f658eeSChristoph Hellwig 		goto done;
21904f658eeSChristoph Hellwig 
220c59d87c4SChristoph Hellwig 	/*
221c59d87c4SChristoph Hellwig 	 * For unwritten extents we need to issue transactions to convert a
222c59d87c4SChristoph Hellwig 	 * range to normal written extens after the data I/O has finished.
223c59d87c4SChristoph Hellwig 	 */
2240d882a36SAlain Renaud 	if (ioend->io_type == XFS_IO_UNWRITTEN) {
225c59d87c4SChristoph Hellwig 		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
226c59d87c4SChristoph Hellwig 						  ioend->io_size);
227281627dfSChristoph Hellwig 	} else if (ioend->io_append_trans) {
2282ba66237SChristoph Hellwig 		error = xfs_setfilesize_ioend(ioend);
22984803fb7SChristoph Hellwig 	} else {
230281627dfSChristoph Hellwig 		ASSERT(!xfs_ioend_is_append(ioend));
23184803fb7SChristoph Hellwig 	}
23284803fb7SChristoph Hellwig 
23304f658eeSChristoph Hellwig done:
234437a255aSDave Chinner 	if (error)
2352451337dSDave Chinner 		ioend->io_error = error;
236c59d87c4SChristoph Hellwig 	xfs_destroy_ioend(ioend);
237c59d87c4SChristoph Hellwig }
238c59d87c4SChristoph Hellwig 
239c59d87c4SChristoph Hellwig /*
240c59d87c4SChristoph Hellwig  * Allocate and initialise an IO completion structure.
241c59d87c4SChristoph Hellwig  * We need to track unwritten extent write completion here initially.
242c59d87c4SChristoph Hellwig  * We'll need to extend this for updating the ondisk inode size later
243c59d87c4SChristoph Hellwig  * (vs. incore size).
244c59d87c4SChristoph Hellwig  */
245c59d87c4SChristoph Hellwig STATIC xfs_ioend_t *
246c59d87c4SChristoph Hellwig xfs_alloc_ioend(
247c59d87c4SChristoph Hellwig 	struct inode		*inode,
248c59d87c4SChristoph Hellwig 	unsigned int		type)
249c59d87c4SChristoph Hellwig {
250c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend;
251c59d87c4SChristoph Hellwig 
252c59d87c4SChristoph Hellwig 	ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
253c59d87c4SChristoph Hellwig 
254c59d87c4SChristoph Hellwig 	/*
255c59d87c4SChristoph Hellwig 	 * Set the count to 1 initially, which will prevent an I/O
256c59d87c4SChristoph Hellwig 	 * completion callback from happening before we have started
257c59d87c4SChristoph Hellwig 	 * all the I/O from calling the completion routine too early.
258c59d87c4SChristoph Hellwig 	 */
259c59d87c4SChristoph Hellwig 	atomic_set(&ioend->io_remaining, 1);
260c59d87c4SChristoph Hellwig 	ioend->io_error = 0;
261c59d87c4SChristoph Hellwig 	ioend->io_list = NULL;
262c59d87c4SChristoph Hellwig 	ioend->io_type = type;
263c59d87c4SChristoph Hellwig 	ioend->io_inode = inode;
264c59d87c4SChristoph Hellwig 	ioend->io_buffer_head = NULL;
265c59d87c4SChristoph Hellwig 	ioend->io_buffer_tail = NULL;
266c59d87c4SChristoph Hellwig 	ioend->io_offset = 0;
267c59d87c4SChristoph Hellwig 	ioend->io_size = 0;
268281627dfSChristoph Hellwig 	ioend->io_append_trans = NULL;
269c59d87c4SChristoph Hellwig 
270c59d87c4SChristoph Hellwig 	INIT_WORK(&ioend->io_work, xfs_end_io);
271c59d87c4SChristoph Hellwig 	return ioend;
272c59d87c4SChristoph Hellwig }
273c59d87c4SChristoph Hellwig 
274c59d87c4SChristoph Hellwig STATIC int
275c59d87c4SChristoph Hellwig xfs_map_blocks(
276c59d87c4SChristoph Hellwig 	struct inode		*inode,
277c59d87c4SChristoph Hellwig 	loff_t			offset,
278c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
279c59d87c4SChristoph Hellwig 	int			type,
280c59d87c4SChristoph Hellwig 	int			nonblocking)
281c59d87c4SChristoph Hellwig {
282c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
283c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
284c59d87c4SChristoph Hellwig 	ssize_t			count = 1 << inode->i_blkbits;
285c59d87c4SChristoph Hellwig 	xfs_fileoff_t		offset_fsb, end_fsb;
286c59d87c4SChristoph Hellwig 	int			error = 0;
287c59d87c4SChristoph Hellwig 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
288c59d87c4SChristoph Hellwig 	int			nimaps = 1;
289c59d87c4SChristoph Hellwig 
290c59d87c4SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
291b474c7aeSEric Sandeen 		return -EIO;
292c59d87c4SChristoph Hellwig 
2930d882a36SAlain Renaud 	if (type == XFS_IO_UNWRITTEN)
294c59d87c4SChristoph Hellwig 		bmapi_flags |= XFS_BMAPI_IGSTATE;
295c59d87c4SChristoph Hellwig 
296c59d87c4SChristoph Hellwig 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
297c59d87c4SChristoph Hellwig 		if (nonblocking)
298b474c7aeSEric Sandeen 			return -EAGAIN;
299c59d87c4SChristoph Hellwig 		xfs_ilock(ip, XFS_ILOCK_SHARED);
300c59d87c4SChristoph Hellwig 	}
301c59d87c4SChristoph Hellwig 
302c59d87c4SChristoph Hellwig 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
303c59d87c4SChristoph Hellwig 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
304d2c28191SDave Chinner 	ASSERT(offset <= mp->m_super->s_maxbytes);
305c59d87c4SChristoph Hellwig 
306d2c28191SDave Chinner 	if (offset + count > mp->m_super->s_maxbytes)
307d2c28191SDave Chinner 		count = mp->m_super->s_maxbytes - offset;
308c59d87c4SChristoph Hellwig 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
309c59d87c4SChristoph Hellwig 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
3105c8ed202SDave Chinner 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
3115c8ed202SDave Chinner 				imap, &nimaps, bmapi_flags);
312c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
313c59d87c4SChristoph Hellwig 
314c59d87c4SChristoph Hellwig 	if (error)
3152451337dSDave Chinner 		return error;
316c59d87c4SChristoph Hellwig 
3170d882a36SAlain Renaud 	if (type == XFS_IO_DELALLOC &&
318c59d87c4SChristoph Hellwig 	    (!nimaps || isnullstartblock(imap->br_startblock))) {
3190799a3e8SJie Liu 		error = xfs_iomap_write_allocate(ip, offset, imap);
320c59d87c4SChristoph Hellwig 		if (!error)
321c59d87c4SChristoph Hellwig 			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
3222451337dSDave Chinner 		return error;
323c59d87c4SChristoph Hellwig 	}
324c59d87c4SChristoph Hellwig 
325c59d87c4SChristoph Hellwig #ifdef DEBUG
3260d882a36SAlain Renaud 	if (type == XFS_IO_UNWRITTEN) {
327c59d87c4SChristoph Hellwig 		ASSERT(nimaps);
328c59d87c4SChristoph Hellwig 		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
329c59d87c4SChristoph Hellwig 		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
330c59d87c4SChristoph Hellwig 	}
331c59d87c4SChristoph Hellwig #endif
332c59d87c4SChristoph Hellwig 	if (nimaps)
333c59d87c4SChristoph Hellwig 		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
334c59d87c4SChristoph Hellwig 	return 0;
335c59d87c4SChristoph Hellwig }
336c59d87c4SChristoph Hellwig 
337c59d87c4SChristoph Hellwig STATIC int
338c59d87c4SChristoph Hellwig xfs_imap_valid(
339c59d87c4SChristoph Hellwig 	struct inode		*inode,
340c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
341c59d87c4SChristoph Hellwig 	xfs_off_t		offset)
342c59d87c4SChristoph Hellwig {
343c59d87c4SChristoph Hellwig 	offset >>= inode->i_blkbits;
344c59d87c4SChristoph Hellwig 
345c59d87c4SChristoph Hellwig 	return offset >= imap->br_startoff &&
346c59d87c4SChristoph Hellwig 		offset < imap->br_startoff + imap->br_blockcount;
347c59d87c4SChristoph Hellwig }
348c59d87c4SChristoph Hellwig 
349c59d87c4SChristoph Hellwig /*
350c59d87c4SChristoph Hellwig  * BIO completion handler for buffered IO.
351c59d87c4SChristoph Hellwig  */
352c59d87c4SChristoph Hellwig STATIC void
353c59d87c4SChristoph Hellwig xfs_end_bio(
354c59d87c4SChristoph Hellwig 	struct bio		*bio,
355c59d87c4SChristoph Hellwig 	int			error)
356c59d87c4SChristoph Hellwig {
357c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend = bio->bi_private;
358c59d87c4SChristoph Hellwig 
359c59d87c4SChristoph Hellwig 	ASSERT(atomic_read(&bio->bi_cnt) >= 1);
360c59d87c4SChristoph Hellwig 	ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
361c59d87c4SChristoph Hellwig 
362c59d87c4SChristoph Hellwig 	/* Toss bio and pass work off to an xfsdatad thread */
363c59d87c4SChristoph Hellwig 	bio->bi_private = NULL;
364c59d87c4SChristoph Hellwig 	bio->bi_end_io = NULL;
365c59d87c4SChristoph Hellwig 	bio_put(bio);
366c59d87c4SChristoph Hellwig 
367c59d87c4SChristoph Hellwig 	xfs_finish_ioend(ioend);
368c59d87c4SChristoph Hellwig }
369c59d87c4SChristoph Hellwig 
370c59d87c4SChristoph Hellwig STATIC void
371c59d87c4SChristoph Hellwig xfs_submit_ioend_bio(
372c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
373c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend,
374c59d87c4SChristoph Hellwig 	struct bio		*bio)
375c59d87c4SChristoph Hellwig {
376c59d87c4SChristoph Hellwig 	atomic_inc(&ioend->io_remaining);
377c59d87c4SChristoph Hellwig 	bio->bi_private = ioend;
378c59d87c4SChristoph Hellwig 	bio->bi_end_io = xfs_end_bio;
379c59d87c4SChristoph Hellwig 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
380c59d87c4SChristoph Hellwig }
381c59d87c4SChristoph Hellwig 
382c59d87c4SChristoph Hellwig STATIC struct bio *
383c59d87c4SChristoph Hellwig xfs_alloc_ioend_bio(
384c59d87c4SChristoph Hellwig 	struct buffer_head	*bh)
385c59d87c4SChristoph Hellwig {
386c59d87c4SChristoph Hellwig 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
387c59d87c4SChristoph Hellwig 	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
388c59d87c4SChristoph Hellwig 
389c59d87c4SChristoph Hellwig 	ASSERT(bio->bi_private == NULL);
3904f024f37SKent Overstreet 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
391c59d87c4SChristoph Hellwig 	bio->bi_bdev = bh->b_bdev;
392c59d87c4SChristoph Hellwig 	return bio;
393c59d87c4SChristoph Hellwig }
394c59d87c4SChristoph Hellwig 
395c59d87c4SChristoph Hellwig STATIC void
396c59d87c4SChristoph Hellwig xfs_start_buffer_writeback(
397c59d87c4SChristoph Hellwig 	struct buffer_head	*bh)
398c59d87c4SChristoph Hellwig {
399c59d87c4SChristoph Hellwig 	ASSERT(buffer_mapped(bh));
400c59d87c4SChristoph Hellwig 	ASSERT(buffer_locked(bh));
401c59d87c4SChristoph Hellwig 	ASSERT(!buffer_delay(bh));
402c59d87c4SChristoph Hellwig 	ASSERT(!buffer_unwritten(bh));
403c59d87c4SChristoph Hellwig 
404c59d87c4SChristoph Hellwig 	mark_buffer_async_write(bh);
405c59d87c4SChristoph Hellwig 	set_buffer_uptodate(bh);
406c59d87c4SChristoph Hellwig 	clear_buffer_dirty(bh);
407c59d87c4SChristoph Hellwig }
408c59d87c4SChristoph Hellwig 
409c59d87c4SChristoph Hellwig STATIC void
410c59d87c4SChristoph Hellwig xfs_start_page_writeback(
411c59d87c4SChristoph Hellwig 	struct page		*page,
412c59d87c4SChristoph Hellwig 	int			clear_dirty,
413c59d87c4SChristoph Hellwig 	int			buffers)
414c59d87c4SChristoph Hellwig {
415c59d87c4SChristoph Hellwig 	ASSERT(PageLocked(page));
416c59d87c4SChristoph Hellwig 	ASSERT(!PageWriteback(page));
4170d085a52SDave Chinner 
4180d085a52SDave Chinner 	/*
4190d085a52SDave Chinner 	 * if the page was not fully cleaned, we need to ensure that the higher
4200d085a52SDave Chinner 	 * layers come back to it correctly. That means we need to keep the page
4210d085a52SDave Chinner 	 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
4220d085a52SDave Chinner 	 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
4230d085a52SDave Chinner 	 * write this page in this writeback sweep will be made.
4240d085a52SDave Chinner 	 */
4250d085a52SDave Chinner 	if (clear_dirty) {
426c59d87c4SChristoph Hellwig 		clear_page_dirty_for_io(page);
427c59d87c4SChristoph Hellwig 		set_page_writeback(page);
4280d085a52SDave Chinner 	} else
4290d085a52SDave Chinner 		set_page_writeback_keepwrite(page);
4300d085a52SDave Chinner 
431c59d87c4SChristoph Hellwig 	unlock_page(page);
4320d085a52SDave Chinner 
433c59d87c4SChristoph Hellwig 	/* If no buffers on the page are to be written, finish it here */
434c59d87c4SChristoph Hellwig 	if (!buffers)
435c59d87c4SChristoph Hellwig 		end_page_writeback(page);
436c59d87c4SChristoph Hellwig }
437c59d87c4SChristoph Hellwig 
438c7c1a7d8SZhi Yong Wu static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
439c59d87c4SChristoph Hellwig {
440c59d87c4SChristoph Hellwig 	return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
441c59d87c4SChristoph Hellwig }
442c59d87c4SChristoph Hellwig 
443c59d87c4SChristoph Hellwig /*
444c59d87c4SChristoph Hellwig  * Submit all of the bios for all of the ioends we have saved up, covering the
445c59d87c4SChristoph Hellwig  * initial writepage page and also any probed pages.
446c59d87c4SChristoph Hellwig  *
447c59d87c4SChristoph Hellwig  * Because we may have multiple ioends spanning a page, we need to start
448c59d87c4SChristoph Hellwig  * writeback on all the buffers before we submit them for I/O. If we mark the
449c59d87c4SChristoph Hellwig  * buffers as we got, then we can end up with a page that only has buffers
450c59d87c4SChristoph Hellwig  * marked async write and I/O complete on can occur before we mark the other
451c59d87c4SChristoph Hellwig  * buffers async write.
452c59d87c4SChristoph Hellwig  *
453c59d87c4SChristoph Hellwig  * The end result of this is that we trip a bug in end_page_writeback() because
454c59d87c4SChristoph Hellwig  * we call it twice for the one page as the code in end_buffer_async_write()
455c59d87c4SChristoph Hellwig  * assumes that all buffers on the page are started at the same time.
456c59d87c4SChristoph Hellwig  *
457c59d87c4SChristoph Hellwig  * The fix is two passes across the ioend list - one to start writeback on the
458c59d87c4SChristoph Hellwig  * buffer_heads, and then submit them for I/O on the second pass.
4597bf7f352SDave Chinner  *
4607bf7f352SDave Chinner  * If @fail is non-zero, it means that we have a situation where some part of
4617bf7f352SDave Chinner  * the submission process has failed after we have marked paged for writeback
4627bf7f352SDave Chinner  * and unlocked them. In this situation, we need to fail the ioend chain rather
4637bf7f352SDave Chinner  * than submit it to IO. This typically only happens on a filesystem shutdown.
464c59d87c4SChristoph Hellwig  */
465c59d87c4SChristoph Hellwig STATIC void
466c59d87c4SChristoph Hellwig xfs_submit_ioend(
467c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
4687bf7f352SDave Chinner 	xfs_ioend_t		*ioend,
4697bf7f352SDave Chinner 	int			fail)
470c59d87c4SChristoph Hellwig {
471c59d87c4SChristoph Hellwig 	xfs_ioend_t		*head = ioend;
472c59d87c4SChristoph Hellwig 	xfs_ioend_t		*next;
473c59d87c4SChristoph Hellwig 	struct buffer_head	*bh;
474c59d87c4SChristoph Hellwig 	struct bio		*bio;
475c59d87c4SChristoph Hellwig 	sector_t		lastblock = 0;
476c59d87c4SChristoph Hellwig 
477c59d87c4SChristoph Hellwig 	/* Pass 1 - start writeback */
478c59d87c4SChristoph Hellwig 	do {
479c59d87c4SChristoph Hellwig 		next = ioend->io_list;
480c59d87c4SChristoph Hellwig 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
481c59d87c4SChristoph Hellwig 			xfs_start_buffer_writeback(bh);
482c59d87c4SChristoph Hellwig 	} while ((ioend = next) != NULL);
483c59d87c4SChristoph Hellwig 
484c59d87c4SChristoph Hellwig 	/* Pass 2 - submit I/O */
485c59d87c4SChristoph Hellwig 	ioend = head;
486c59d87c4SChristoph Hellwig 	do {
487c59d87c4SChristoph Hellwig 		next = ioend->io_list;
488c59d87c4SChristoph Hellwig 		bio = NULL;
489c59d87c4SChristoph Hellwig 
4907bf7f352SDave Chinner 		/*
4917bf7f352SDave Chinner 		 * If we are failing the IO now, just mark the ioend with an
4927bf7f352SDave Chinner 		 * error and finish it. This will run IO completion immediately
4937bf7f352SDave Chinner 		 * as there is only one reference to the ioend at this point in
4947bf7f352SDave Chinner 		 * time.
4957bf7f352SDave Chinner 		 */
4967bf7f352SDave Chinner 		if (fail) {
4972451337dSDave Chinner 			ioend->io_error = fail;
4987bf7f352SDave Chinner 			xfs_finish_ioend(ioend);
4997bf7f352SDave Chinner 			continue;
5007bf7f352SDave Chinner 		}
5017bf7f352SDave Chinner 
502c59d87c4SChristoph Hellwig 		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
503c59d87c4SChristoph Hellwig 
504c59d87c4SChristoph Hellwig 			if (!bio) {
505c59d87c4SChristoph Hellwig  retry:
506c59d87c4SChristoph Hellwig 				bio = xfs_alloc_ioend_bio(bh);
507c59d87c4SChristoph Hellwig 			} else if (bh->b_blocknr != lastblock + 1) {
508c59d87c4SChristoph Hellwig 				xfs_submit_ioend_bio(wbc, ioend, bio);
509c59d87c4SChristoph Hellwig 				goto retry;
510c59d87c4SChristoph Hellwig 			}
511c59d87c4SChristoph Hellwig 
512c7c1a7d8SZhi Yong Wu 			if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
513c59d87c4SChristoph Hellwig 				xfs_submit_ioend_bio(wbc, ioend, bio);
514c59d87c4SChristoph Hellwig 				goto retry;
515c59d87c4SChristoph Hellwig 			}
516c59d87c4SChristoph Hellwig 
517c59d87c4SChristoph Hellwig 			lastblock = bh->b_blocknr;
518c59d87c4SChristoph Hellwig 		}
519c59d87c4SChristoph Hellwig 		if (bio)
520c59d87c4SChristoph Hellwig 			xfs_submit_ioend_bio(wbc, ioend, bio);
521c59d87c4SChristoph Hellwig 		xfs_finish_ioend(ioend);
522c59d87c4SChristoph Hellwig 	} while ((ioend = next) != NULL);
523c59d87c4SChristoph Hellwig }
524c59d87c4SChristoph Hellwig 
525c59d87c4SChristoph Hellwig /*
526c59d87c4SChristoph Hellwig  * Cancel submission of all buffer_heads so far in this endio.
527c59d87c4SChristoph Hellwig  * Toss the endio too.  Only ever called for the initial page
528c59d87c4SChristoph Hellwig  * in a writepage request, so only ever one page.
529c59d87c4SChristoph Hellwig  */
530c59d87c4SChristoph Hellwig STATIC void
531c59d87c4SChristoph Hellwig xfs_cancel_ioend(
532c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend)
533c59d87c4SChristoph Hellwig {
534c59d87c4SChristoph Hellwig 	xfs_ioend_t		*next;
535c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *next_bh;
536c59d87c4SChristoph Hellwig 
537c59d87c4SChristoph Hellwig 	do {
538c59d87c4SChristoph Hellwig 		next = ioend->io_list;
539c59d87c4SChristoph Hellwig 		bh = ioend->io_buffer_head;
540c59d87c4SChristoph Hellwig 		do {
541c59d87c4SChristoph Hellwig 			next_bh = bh->b_private;
542c59d87c4SChristoph Hellwig 			clear_buffer_async_write(bh);
54307d08681SBrian Foster 			/*
54407d08681SBrian Foster 			 * The unwritten flag is cleared when added to the
54507d08681SBrian Foster 			 * ioend. We're not submitting for I/O so mark the
54607d08681SBrian Foster 			 * buffer unwritten again for next time around.
54707d08681SBrian Foster 			 */
54807d08681SBrian Foster 			if (ioend->io_type == XFS_IO_UNWRITTEN)
54907d08681SBrian Foster 				set_buffer_unwritten(bh);
550c59d87c4SChristoph Hellwig 			unlock_buffer(bh);
551c59d87c4SChristoph Hellwig 		} while ((bh = next_bh) != NULL);
552c59d87c4SChristoph Hellwig 
553c59d87c4SChristoph Hellwig 		mempool_free(ioend, xfs_ioend_pool);
554c59d87c4SChristoph Hellwig 	} while ((ioend = next) != NULL);
555c59d87c4SChristoph Hellwig }
556c59d87c4SChristoph Hellwig 
557c59d87c4SChristoph Hellwig /*
558c59d87c4SChristoph Hellwig  * Test to see if we've been building up a completion structure for
559c59d87c4SChristoph Hellwig  * earlier buffers -- if so, we try to append to this ioend if we
560c59d87c4SChristoph Hellwig  * can, otherwise we finish off any current ioend and start another.
561c59d87c4SChristoph Hellwig  * Return true if we've finished the given ioend.
562c59d87c4SChristoph Hellwig  */
563c59d87c4SChristoph Hellwig STATIC void
564c59d87c4SChristoph Hellwig xfs_add_to_ioend(
565c59d87c4SChristoph Hellwig 	struct inode		*inode,
566c59d87c4SChristoph Hellwig 	struct buffer_head	*bh,
567c59d87c4SChristoph Hellwig 	xfs_off_t		offset,
568c59d87c4SChristoph Hellwig 	unsigned int		type,
569c59d87c4SChristoph Hellwig 	xfs_ioend_t		**result,
570c59d87c4SChristoph Hellwig 	int			need_ioend)
571c59d87c4SChristoph Hellwig {
572c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend = *result;
573c59d87c4SChristoph Hellwig 
574c59d87c4SChristoph Hellwig 	if (!ioend || need_ioend || type != ioend->io_type) {
575c59d87c4SChristoph Hellwig 		xfs_ioend_t	*previous = *result;
576c59d87c4SChristoph Hellwig 
577c59d87c4SChristoph Hellwig 		ioend = xfs_alloc_ioend(inode, type);
578c59d87c4SChristoph Hellwig 		ioend->io_offset = offset;
579c59d87c4SChristoph Hellwig 		ioend->io_buffer_head = bh;
580c59d87c4SChristoph Hellwig 		ioend->io_buffer_tail = bh;
581c59d87c4SChristoph Hellwig 		if (previous)
582c59d87c4SChristoph Hellwig 			previous->io_list = ioend;
583c59d87c4SChristoph Hellwig 		*result = ioend;
584c59d87c4SChristoph Hellwig 	} else {
585c59d87c4SChristoph Hellwig 		ioend->io_buffer_tail->b_private = bh;
586c59d87c4SChristoph Hellwig 		ioend->io_buffer_tail = bh;
587c59d87c4SChristoph Hellwig 	}
588c59d87c4SChristoph Hellwig 
589c59d87c4SChristoph Hellwig 	bh->b_private = NULL;
590c59d87c4SChristoph Hellwig 	ioend->io_size += bh->b_size;
591c59d87c4SChristoph Hellwig }
592c59d87c4SChristoph Hellwig 
593c59d87c4SChristoph Hellwig STATIC void
594c59d87c4SChristoph Hellwig xfs_map_buffer(
595c59d87c4SChristoph Hellwig 	struct inode		*inode,
596c59d87c4SChristoph Hellwig 	struct buffer_head	*bh,
597c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
598c59d87c4SChristoph Hellwig 	xfs_off_t		offset)
599c59d87c4SChristoph Hellwig {
600c59d87c4SChristoph Hellwig 	sector_t		bn;
601c59d87c4SChristoph Hellwig 	struct xfs_mount	*m = XFS_I(inode)->i_mount;
602c59d87c4SChristoph Hellwig 	xfs_off_t		iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
603c59d87c4SChristoph Hellwig 	xfs_daddr_t		iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
604c59d87c4SChristoph Hellwig 
605c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
606c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
607c59d87c4SChristoph Hellwig 
608c59d87c4SChristoph Hellwig 	bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
609c59d87c4SChristoph Hellwig 	      ((offset - iomap_offset) >> inode->i_blkbits);
610c59d87c4SChristoph Hellwig 
611c59d87c4SChristoph Hellwig 	ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
612c59d87c4SChristoph Hellwig 
613c59d87c4SChristoph Hellwig 	bh->b_blocknr = bn;
614c59d87c4SChristoph Hellwig 	set_buffer_mapped(bh);
615c59d87c4SChristoph Hellwig }
616c59d87c4SChristoph Hellwig 
617c59d87c4SChristoph Hellwig STATIC void
618c59d87c4SChristoph Hellwig xfs_map_at_offset(
619c59d87c4SChristoph Hellwig 	struct inode		*inode,
620c59d87c4SChristoph Hellwig 	struct buffer_head	*bh,
621c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
622c59d87c4SChristoph Hellwig 	xfs_off_t		offset)
623c59d87c4SChristoph Hellwig {
624c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
625c59d87c4SChristoph Hellwig 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
626c59d87c4SChristoph Hellwig 
627c59d87c4SChristoph Hellwig 	xfs_map_buffer(inode, bh, imap, offset);
628c59d87c4SChristoph Hellwig 	set_buffer_mapped(bh);
629c59d87c4SChristoph Hellwig 	clear_buffer_delay(bh);
630c59d87c4SChristoph Hellwig 	clear_buffer_unwritten(bh);
631c59d87c4SChristoph Hellwig }
632c59d87c4SChristoph Hellwig 
633c59d87c4SChristoph Hellwig /*
634a49935f2SDave Chinner  * Test if a given page contains at least one buffer of a given @type.
635a49935f2SDave Chinner  * If @check_all_buffers is true, then we walk all the buffers in the page to
636a49935f2SDave Chinner  * try to find one of the type passed in. If it is not set, then the caller only
637a49935f2SDave Chinner  * needs to check the first buffer on the page for a match.
638c59d87c4SChristoph Hellwig  */
639a49935f2SDave Chinner STATIC bool
6406ffc4db5SDave Chinner xfs_check_page_type(
641c59d87c4SChristoph Hellwig 	struct page		*page,
642a49935f2SDave Chinner 	unsigned int		type,
643a49935f2SDave Chinner 	bool			check_all_buffers)
644c59d87c4SChristoph Hellwig {
645a49935f2SDave Chinner 	struct buffer_head	*bh;
646a49935f2SDave Chinner 	struct buffer_head	*head;
647c59d87c4SChristoph Hellwig 
648a49935f2SDave Chinner 	if (PageWriteback(page))
649a49935f2SDave Chinner 		return false;
650a49935f2SDave Chinner 	if (!page->mapping)
651a49935f2SDave Chinner 		return false;
652a49935f2SDave Chinner 	if (!page_has_buffers(page))
653a49935f2SDave Chinner 		return false;
654c59d87c4SChristoph Hellwig 
655c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
656c59d87c4SChristoph Hellwig 	do {
657a49935f2SDave Chinner 		if (buffer_unwritten(bh)) {
658a49935f2SDave Chinner 			if (type == XFS_IO_UNWRITTEN)
659a49935f2SDave Chinner 				return true;
660a49935f2SDave Chinner 		} else if (buffer_delay(bh)) {
661805eeb8eSDan Carpenter 			if (type == XFS_IO_DELALLOC)
662a49935f2SDave Chinner 				return true;
663a49935f2SDave Chinner 		} else if (buffer_dirty(bh) && buffer_mapped(bh)) {
664805eeb8eSDan Carpenter 			if (type == XFS_IO_OVERWRITE)
665a49935f2SDave Chinner 				return true;
666a49935f2SDave Chinner 		}
667a49935f2SDave Chinner 
668a49935f2SDave Chinner 		/* If we are only checking the first buffer, we are done now. */
669a49935f2SDave Chinner 		if (!check_all_buffers)
670c59d87c4SChristoph Hellwig 			break;
671c59d87c4SChristoph Hellwig 	} while ((bh = bh->b_this_page) != head);
672c59d87c4SChristoph Hellwig 
673a49935f2SDave Chinner 	return false;
674c59d87c4SChristoph Hellwig }
675c59d87c4SChristoph Hellwig 
676c59d87c4SChristoph Hellwig /*
677c59d87c4SChristoph Hellwig  * Allocate & map buffers for page given the extent map. Write it out.
678c59d87c4SChristoph Hellwig  * except for the original page of a writepage, this is called on
679c59d87c4SChristoph Hellwig  * delalloc/unwritten pages only, for the original page it is possible
680c59d87c4SChristoph Hellwig  * that the page has no mapping at all.
681c59d87c4SChristoph Hellwig  */
682c59d87c4SChristoph Hellwig STATIC int
683c59d87c4SChristoph Hellwig xfs_convert_page(
684c59d87c4SChristoph Hellwig 	struct inode		*inode,
685c59d87c4SChristoph Hellwig 	struct page		*page,
686c59d87c4SChristoph Hellwig 	loff_t			tindex,
687c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
688c59d87c4SChristoph Hellwig 	xfs_ioend_t		**ioendp,
689c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
690c59d87c4SChristoph Hellwig {
691c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
692c59d87c4SChristoph Hellwig 	xfs_off_t		end_offset;
693c59d87c4SChristoph Hellwig 	unsigned long		p_offset;
694c59d87c4SChristoph Hellwig 	unsigned int		type;
695c59d87c4SChristoph Hellwig 	int			len, page_dirty;
696c59d87c4SChristoph Hellwig 	int			count = 0, done = 0, uptodate = 1;
697c59d87c4SChristoph Hellwig  	xfs_off_t		offset = page_offset(page);
698c59d87c4SChristoph Hellwig 
699c59d87c4SChristoph Hellwig 	if (page->index != tindex)
700c59d87c4SChristoph Hellwig 		goto fail;
701c59d87c4SChristoph Hellwig 	if (!trylock_page(page))
702c59d87c4SChristoph Hellwig 		goto fail;
703c59d87c4SChristoph Hellwig 	if (PageWriteback(page))
704c59d87c4SChristoph Hellwig 		goto fail_unlock_page;
705c59d87c4SChristoph Hellwig 	if (page->mapping != inode->i_mapping)
706c59d87c4SChristoph Hellwig 		goto fail_unlock_page;
707a49935f2SDave Chinner 	if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
708c59d87c4SChristoph Hellwig 		goto fail_unlock_page;
709c59d87c4SChristoph Hellwig 
710c59d87c4SChristoph Hellwig 	/*
711c59d87c4SChristoph Hellwig 	 * page_dirty is initially a count of buffers on the page before
712c59d87c4SChristoph Hellwig 	 * EOF and is decremented as we move each into a cleanable state.
713c59d87c4SChristoph Hellwig 	 *
714c59d87c4SChristoph Hellwig 	 * Derivation:
715c59d87c4SChristoph Hellwig 	 *
716c59d87c4SChristoph Hellwig 	 * End offset is the highest offset that this page should represent.
717c59d87c4SChristoph Hellwig 	 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
718c59d87c4SChristoph Hellwig 	 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
719c59d87c4SChristoph Hellwig 	 * hence give us the correct page_dirty count. On any other page,
720c59d87c4SChristoph Hellwig 	 * it will be zero and in that case we need page_dirty to be the
721c59d87c4SChristoph Hellwig 	 * count of buffers on the page.
722c59d87c4SChristoph Hellwig 	 */
723c59d87c4SChristoph Hellwig 	end_offset = min_t(unsigned long long,
724c59d87c4SChristoph Hellwig 			(xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
725c59d87c4SChristoph Hellwig 			i_size_read(inode));
726c59d87c4SChristoph Hellwig 
727480d7467SDave Chinner 	/*
728480d7467SDave Chinner 	 * If the current map does not span the entire page we are about to try
729480d7467SDave Chinner 	 * to write, then give up. The only way we can write a page that spans
730480d7467SDave Chinner 	 * multiple mappings in a single writeback iteration is via the
731480d7467SDave Chinner 	 * xfs_vm_writepage() function. Data integrity writeback requires the
732480d7467SDave Chinner 	 * entire page to be written in a single attempt, otherwise the part of
733480d7467SDave Chinner 	 * the page we don't write here doesn't get written as part of the data
734480d7467SDave Chinner 	 * integrity sync.
735480d7467SDave Chinner 	 *
736480d7467SDave Chinner 	 * For normal writeback, we also don't attempt to write partial pages
737480d7467SDave Chinner 	 * here as it simply means that write_cache_pages() will see it under
738480d7467SDave Chinner 	 * writeback and ignore the page until some point in the future, at
739480d7467SDave Chinner 	 * which time this will be the only page in the file that needs
740480d7467SDave Chinner 	 * writeback.  Hence for more optimal IO patterns, we should always
741480d7467SDave Chinner 	 * avoid partial page writeback due to multiple mappings on a page here.
742480d7467SDave Chinner 	 */
743480d7467SDave Chinner 	if (!xfs_imap_valid(inode, imap, end_offset))
744480d7467SDave Chinner 		goto fail_unlock_page;
745480d7467SDave Chinner 
746c59d87c4SChristoph Hellwig 	len = 1 << inode->i_blkbits;
747c59d87c4SChristoph Hellwig 	p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
748c59d87c4SChristoph Hellwig 					PAGE_CACHE_SIZE);
749c59d87c4SChristoph Hellwig 	p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
750c59d87c4SChristoph Hellwig 	page_dirty = p_offset / len;
751c59d87c4SChristoph Hellwig 
752a49935f2SDave Chinner 	/*
753a49935f2SDave Chinner 	 * The moment we find a buffer that doesn't match our current type
754a49935f2SDave Chinner 	 * specification or can't be written, abort the loop and start
755a49935f2SDave Chinner 	 * writeback. As per the above xfs_imap_valid() check, only
756a49935f2SDave Chinner 	 * xfs_vm_writepage() can handle partial page writeback fully - we are
757a49935f2SDave Chinner 	 * limited here to the buffers that are contiguous with the current
758a49935f2SDave Chinner 	 * ioend, and hence a buffer we can't write breaks that contiguity and
759a49935f2SDave Chinner 	 * we have to defer the rest of the IO to xfs_vm_writepage().
760a49935f2SDave Chinner 	 */
761c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
762c59d87c4SChristoph Hellwig 	do {
763c59d87c4SChristoph Hellwig 		if (offset >= end_offset)
764c59d87c4SChristoph Hellwig 			break;
765c59d87c4SChristoph Hellwig 		if (!buffer_uptodate(bh))
766c59d87c4SChristoph Hellwig 			uptodate = 0;
767c59d87c4SChristoph Hellwig 		if (!(PageUptodate(page) || buffer_uptodate(bh))) {
768c59d87c4SChristoph Hellwig 			done = 1;
769a49935f2SDave Chinner 			break;
770c59d87c4SChristoph Hellwig 		}
771c59d87c4SChristoph Hellwig 
772c59d87c4SChristoph Hellwig 		if (buffer_unwritten(bh) || buffer_delay(bh) ||
773c59d87c4SChristoph Hellwig 		    buffer_mapped(bh)) {
774c59d87c4SChristoph Hellwig 			if (buffer_unwritten(bh))
7750d882a36SAlain Renaud 				type = XFS_IO_UNWRITTEN;
776c59d87c4SChristoph Hellwig 			else if (buffer_delay(bh))
7770d882a36SAlain Renaud 				type = XFS_IO_DELALLOC;
778c59d87c4SChristoph Hellwig 			else
7790d882a36SAlain Renaud 				type = XFS_IO_OVERWRITE;
780c59d87c4SChristoph Hellwig 
781a49935f2SDave Chinner 			/*
782a49935f2SDave Chinner 			 * imap should always be valid because of the above
783a49935f2SDave Chinner 			 * partial page end_offset check on the imap.
784a49935f2SDave Chinner 			 */
785a49935f2SDave Chinner 			ASSERT(xfs_imap_valid(inode, imap, offset));
786c59d87c4SChristoph Hellwig 
787c59d87c4SChristoph Hellwig 			lock_buffer(bh);
7880d882a36SAlain Renaud 			if (type != XFS_IO_OVERWRITE)
789c59d87c4SChristoph Hellwig 				xfs_map_at_offset(inode, bh, imap, offset);
790c59d87c4SChristoph Hellwig 			xfs_add_to_ioend(inode, bh, offset, type,
791c59d87c4SChristoph Hellwig 					 ioendp, done);
792c59d87c4SChristoph Hellwig 
793c59d87c4SChristoph Hellwig 			page_dirty--;
794c59d87c4SChristoph Hellwig 			count++;
795c59d87c4SChristoph Hellwig 		} else {
796c59d87c4SChristoph Hellwig 			done = 1;
797a49935f2SDave Chinner 			break;
798c59d87c4SChristoph Hellwig 		}
799c59d87c4SChristoph Hellwig 	} while (offset += len, (bh = bh->b_this_page) != head);
800c59d87c4SChristoph Hellwig 
801c59d87c4SChristoph Hellwig 	if (uptodate && bh == head)
802c59d87c4SChristoph Hellwig 		SetPageUptodate(page);
803c59d87c4SChristoph Hellwig 
804c59d87c4SChristoph Hellwig 	if (count) {
805c59d87c4SChristoph Hellwig 		if (--wbc->nr_to_write <= 0 &&
806c59d87c4SChristoph Hellwig 		    wbc->sync_mode == WB_SYNC_NONE)
807c59d87c4SChristoph Hellwig 			done = 1;
808c59d87c4SChristoph Hellwig 	}
809c59d87c4SChristoph Hellwig 	xfs_start_page_writeback(page, !page_dirty, count);
810c59d87c4SChristoph Hellwig 
811c59d87c4SChristoph Hellwig 	return done;
812c59d87c4SChristoph Hellwig  fail_unlock_page:
813c59d87c4SChristoph Hellwig 	unlock_page(page);
814c59d87c4SChristoph Hellwig  fail:
815c59d87c4SChristoph Hellwig 	return 1;
816c59d87c4SChristoph Hellwig }
817c59d87c4SChristoph Hellwig 
818c59d87c4SChristoph Hellwig /*
819c59d87c4SChristoph Hellwig  * Convert & write out a cluster of pages in the same extent as defined
820c59d87c4SChristoph Hellwig  * by mp and following the start page.
821c59d87c4SChristoph Hellwig  */
822c59d87c4SChristoph Hellwig STATIC void
823c59d87c4SChristoph Hellwig xfs_cluster_write(
824c59d87c4SChristoph Hellwig 	struct inode		*inode,
825c59d87c4SChristoph Hellwig 	pgoff_t			tindex,
826c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	*imap,
827c59d87c4SChristoph Hellwig 	xfs_ioend_t		**ioendp,
828c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
829c59d87c4SChristoph Hellwig 	pgoff_t			tlast)
830c59d87c4SChristoph Hellwig {
831c59d87c4SChristoph Hellwig 	struct pagevec		pvec;
832c59d87c4SChristoph Hellwig 	int			done = 0, i;
833c59d87c4SChristoph Hellwig 
834c59d87c4SChristoph Hellwig 	pagevec_init(&pvec, 0);
835c59d87c4SChristoph Hellwig 	while (!done && tindex <= tlast) {
836c59d87c4SChristoph Hellwig 		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
837c59d87c4SChristoph Hellwig 
838c59d87c4SChristoph Hellwig 		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
839c59d87c4SChristoph Hellwig 			break;
840c59d87c4SChristoph Hellwig 
841c59d87c4SChristoph Hellwig 		for (i = 0; i < pagevec_count(&pvec); i++) {
842c59d87c4SChristoph Hellwig 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
843c59d87c4SChristoph Hellwig 					imap, ioendp, wbc);
844c59d87c4SChristoph Hellwig 			if (done)
845c59d87c4SChristoph Hellwig 				break;
846c59d87c4SChristoph Hellwig 		}
847c59d87c4SChristoph Hellwig 
848c59d87c4SChristoph Hellwig 		pagevec_release(&pvec);
849c59d87c4SChristoph Hellwig 		cond_resched();
850c59d87c4SChristoph Hellwig 	}
851c59d87c4SChristoph Hellwig }
852c59d87c4SChristoph Hellwig 
853c59d87c4SChristoph Hellwig STATIC void
854c59d87c4SChristoph Hellwig xfs_vm_invalidatepage(
855c59d87c4SChristoph Hellwig 	struct page		*page,
856d47992f8SLukas Czerner 	unsigned int		offset,
857d47992f8SLukas Czerner 	unsigned int		length)
858c59d87c4SChristoph Hellwig {
85934097dfeSLukas Czerner 	trace_xfs_invalidatepage(page->mapping->host, page, offset,
86034097dfeSLukas Czerner 				 length);
86134097dfeSLukas Czerner 	block_invalidatepage(page, offset, length);
862c59d87c4SChristoph Hellwig }
863c59d87c4SChristoph Hellwig 
864c59d87c4SChristoph Hellwig /*
865c59d87c4SChristoph Hellwig  * If the page has delalloc buffers on it, we need to punch them out before we
866c59d87c4SChristoph Hellwig  * invalidate the page. If we don't, we leave a stale delalloc mapping on the
867c59d87c4SChristoph Hellwig  * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
868c59d87c4SChristoph Hellwig  * is done on that same region - the delalloc extent is returned when none is
869c59d87c4SChristoph Hellwig  * supposed to be there.
870c59d87c4SChristoph Hellwig  *
871c59d87c4SChristoph Hellwig  * We prevent this by truncating away the delalloc regions on the page before
872c59d87c4SChristoph Hellwig  * invalidating it. Because they are delalloc, we can do this without needing a
873c59d87c4SChristoph Hellwig  * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
874c59d87c4SChristoph Hellwig  * truncation without a transaction as there is no space left for block
875c59d87c4SChristoph Hellwig  * reservation (typically why we see a ENOSPC in writeback).
876c59d87c4SChristoph Hellwig  *
877c59d87c4SChristoph Hellwig  * This is not a performance critical path, so for now just do the punching a
878c59d87c4SChristoph Hellwig  * buffer head at a time.
879c59d87c4SChristoph Hellwig  */
880c59d87c4SChristoph Hellwig STATIC void
881c59d87c4SChristoph Hellwig xfs_aops_discard_page(
882c59d87c4SChristoph Hellwig 	struct page		*page)
883c59d87c4SChristoph Hellwig {
884c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
885c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
886c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
887c59d87c4SChristoph Hellwig 	loff_t			offset = page_offset(page);
888c59d87c4SChristoph Hellwig 
889a49935f2SDave Chinner 	if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
890c59d87c4SChristoph Hellwig 		goto out_invalidate;
891c59d87c4SChristoph Hellwig 
892c59d87c4SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
893c59d87c4SChristoph Hellwig 		goto out_invalidate;
894c59d87c4SChristoph Hellwig 
895c59d87c4SChristoph Hellwig 	xfs_alert(ip->i_mount,
896c59d87c4SChristoph Hellwig 		"page discard on page %p, inode 0x%llx, offset %llu.",
897c59d87c4SChristoph Hellwig 			page, ip->i_ino, offset);
898c59d87c4SChristoph Hellwig 
899c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
900c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
901c59d87c4SChristoph Hellwig 	do {
902c59d87c4SChristoph Hellwig 		int		error;
903c59d87c4SChristoph Hellwig 		xfs_fileoff_t	start_fsb;
904c59d87c4SChristoph Hellwig 
905c59d87c4SChristoph Hellwig 		if (!buffer_delay(bh))
906c59d87c4SChristoph Hellwig 			goto next_buffer;
907c59d87c4SChristoph Hellwig 
908c59d87c4SChristoph Hellwig 		start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
909c59d87c4SChristoph Hellwig 		error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
910c59d87c4SChristoph Hellwig 		if (error) {
911c59d87c4SChristoph Hellwig 			/* something screwed, just bail */
912c59d87c4SChristoph Hellwig 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
913c59d87c4SChristoph Hellwig 				xfs_alert(ip->i_mount,
914c59d87c4SChristoph Hellwig 			"page discard unable to remove delalloc mapping.");
915c59d87c4SChristoph Hellwig 			}
916c59d87c4SChristoph Hellwig 			break;
917c59d87c4SChristoph Hellwig 		}
918c59d87c4SChristoph Hellwig next_buffer:
919c59d87c4SChristoph Hellwig 		offset += 1 << inode->i_blkbits;
920c59d87c4SChristoph Hellwig 
921c59d87c4SChristoph Hellwig 	} while ((bh = bh->b_this_page) != head);
922c59d87c4SChristoph Hellwig 
923c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
924c59d87c4SChristoph Hellwig out_invalidate:
925d47992f8SLukas Czerner 	xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
926c59d87c4SChristoph Hellwig 	return;
927c59d87c4SChristoph Hellwig }
928c59d87c4SChristoph Hellwig 
929c59d87c4SChristoph Hellwig /*
930c59d87c4SChristoph Hellwig  * Write out a dirty page.
931c59d87c4SChristoph Hellwig  *
932c59d87c4SChristoph Hellwig  * For delalloc space on the page we need to allocate space and flush it.
933c59d87c4SChristoph Hellwig  * For unwritten space on the page we need to start the conversion to
934c59d87c4SChristoph Hellwig  * regular allocated space.
935c59d87c4SChristoph Hellwig  * For any other dirty buffer heads on the page we should flush them.
936c59d87c4SChristoph Hellwig  */
937c59d87c4SChristoph Hellwig STATIC int
938c59d87c4SChristoph Hellwig xfs_vm_writepage(
939c59d87c4SChristoph Hellwig 	struct page		*page,
940c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
941c59d87c4SChristoph Hellwig {
942c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
943c59d87c4SChristoph Hellwig 	struct buffer_head	*bh, *head;
944c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	imap;
945c59d87c4SChristoph Hellwig 	xfs_ioend_t		*ioend = NULL, *iohead = NULL;
946c59d87c4SChristoph Hellwig 	loff_t			offset;
947c59d87c4SChristoph Hellwig 	unsigned int		type;
948c59d87c4SChristoph Hellwig 	__uint64_t              end_offset;
949c59d87c4SChristoph Hellwig 	pgoff_t                 end_index, last_index;
950c59d87c4SChristoph Hellwig 	ssize_t			len;
951c59d87c4SChristoph Hellwig 	int			err, imap_valid = 0, uptodate = 1;
952c59d87c4SChristoph Hellwig 	int			count = 0;
953c59d87c4SChristoph Hellwig 	int			nonblocking = 0;
954c59d87c4SChristoph Hellwig 
95534097dfeSLukas Czerner 	trace_xfs_writepage(inode, page, 0, 0);
956c59d87c4SChristoph Hellwig 
957c59d87c4SChristoph Hellwig 	ASSERT(page_has_buffers(page));
958c59d87c4SChristoph Hellwig 
959c59d87c4SChristoph Hellwig 	/*
960c59d87c4SChristoph Hellwig 	 * Refuse to write the page out if we are called from reclaim context.
961c59d87c4SChristoph Hellwig 	 *
962c59d87c4SChristoph Hellwig 	 * This avoids stack overflows when called from deeply used stacks in
963c59d87c4SChristoph Hellwig 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
964c59d87c4SChristoph Hellwig 	 * allow reclaim from kswapd as the stack usage there is relatively low.
965c59d87c4SChristoph Hellwig 	 *
96694054fa3SMel Gorman 	 * This should never happen except in the case of a VM regression so
96794054fa3SMel Gorman 	 * warn about it.
968c59d87c4SChristoph Hellwig 	 */
96994054fa3SMel Gorman 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
97094054fa3SMel Gorman 			PF_MEMALLOC))
971c59d87c4SChristoph Hellwig 		goto redirty;
972c59d87c4SChristoph Hellwig 
973c59d87c4SChristoph Hellwig 	/*
974c59d87c4SChristoph Hellwig 	 * Given that we do not allow direct reclaim to call us, we should
975c59d87c4SChristoph Hellwig 	 * never be called while in a filesystem transaction.
976c59d87c4SChristoph Hellwig 	 */
977448011e2SChristoph Hellwig 	if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
978c59d87c4SChristoph Hellwig 		goto redirty;
979c59d87c4SChristoph Hellwig 
980c59d87c4SChristoph Hellwig 	/* Is this page beyond the end of the file? */
981c59d87c4SChristoph Hellwig 	offset = i_size_read(inode);
982c59d87c4SChristoph Hellwig 	end_index = offset >> PAGE_CACHE_SHIFT;
983c59d87c4SChristoph Hellwig 	last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
9848695d27eSJie Liu 
9858695d27eSJie Liu 	/*
9868695d27eSJie Liu 	 * The page index is less than the end_index, adjust the end_offset
9878695d27eSJie Liu 	 * to the highest offset that this page should represent.
9888695d27eSJie Liu 	 * -----------------------------------------------------
9898695d27eSJie Liu 	 * |			file mapping	       | <EOF> |
9908695d27eSJie Liu 	 * -----------------------------------------------------
9918695d27eSJie Liu 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
9928695d27eSJie Liu 	 * ^--------------------------------^----------|--------
9938695d27eSJie Liu 	 * |     desired writeback range    |      see else    |
9948695d27eSJie Liu 	 * ---------------------------------^------------------|
9958695d27eSJie Liu 	 */
9968695d27eSJie Liu 	if (page->index < end_index)
9978695d27eSJie Liu 		end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
9988695d27eSJie Liu 	else {
9998695d27eSJie Liu 		/*
10008695d27eSJie Liu 		 * Check whether the page to write out is beyond or straddles
10018695d27eSJie Liu 		 * i_size or not.
10028695d27eSJie Liu 		 * -------------------------------------------------------
10038695d27eSJie Liu 		 * |		file mapping		        | <EOF>  |
10048695d27eSJie Liu 		 * -------------------------------------------------------
10058695d27eSJie Liu 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
10068695d27eSJie Liu 		 * ^--------------------------------^-----------|---------
10078695d27eSJie Liu 		 * |				    |      Straddles     |
10088695d27eSJie Liu 		 * ---------------------------------^-----------|--------|
10098695d27eSJie Liu 		 */
10106b7a03f0SChristoph Hellwig 		unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
10116b7a03f0SChristoph Hellwig 
10126b7a03f0SChristoph Hellwig 		/*
1013ff9a28f6SJan Kara 		 * Skip the page if it is fully outside i_size, e.g. due to a
1014ff9a28f6SJan Kara 		 * truncate operation that is in progress. We must redirty the
1015ff9a28f6SJan Kara 		 * page so that reclaim stops reclaiming it. Otherwise
1016ff9a28f6SJan Kara 		 * xfs_vm_releasepage() is called on it and gets confused.
10178695d27eSJie Liu 		 *
10188695d27eSJie Liu 		 * Note that the end_index is unsigned long, it would overflow
10198695d27eSJie Liu 		 * if the given offset is greater than 16TB on 32-bit system
10208695d27eSJie Liu 		 * and if we do check the page is fully outside i_size or not
10218695d27eSJie Liu 		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
10228695d27eSJie Liu 		 * will be evaluated to 0.  Hence this page will be redirtied
10238695d27eSJie Liu 		 * and be written out repeatedly which would result in an
10248695d27eSJie Liu 		 * infinite loop, the user program that perform this operation
10258695d27eSJie Liu 		 * will hang.  Instead, we can verify this situation by checking
10268695d27eSJie Liu 		 * if the page to write is totally beyond the i_size or if it's
10278695d27eSJie Liu 		 * offset is just equal to the EOF.
10286b7a03f0SChristoph Hellwig 		 */
10298695d27eSJie Liu 		if (page->index > end_index ||
10308695d27eSJie Liu 		    (page->index == end_index && offset_into_page == 0))
1031ff9a28f6SJan Kara 			goto redirty;
10326b7a03f0SChristoph Hellwig 
10336b7a03f0SChristoph Hellwig 		/*
10346b7a03f0SChristoph Hellwig 		 * The page straddles i_size.  It must be zeroed out on each
10356b7a03f0SChristoph Hellwig 		 * and every writepage invocation because it may be mmapped.
10366b7a03f0SChristoph Hellwig 		 * "A file is mapped in multiples of the page size.  For a file
10376b7a03f0SChristoph Hellwig 		 * that is not a multiple of the page size, the remaining
10386b7a03f0SChristoph Hellwig 		 * memory is zeroed when mapped, and writes to that region are
10396b7a03f0SChristoph Hellwig 		 * not written out to the file."
10406b7a03f0SChristoph Hellwig 		 */
10416b7a03f0SChristoph Hellwig 		zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
10428695d27eSJie Liu 
10438695d27eSJie Liu 		/* Adjust the end_offset to the end of file */
10448695d27eSJie Liu 		end_offset = offset;
1045c59d87c4SChristoph Hellwig 	}
1046c59d87c4SChristoph Hellwig 
1047c59d87c4SChristoph Hellwig 	len = 1 << inode->i_blkbits;
1048c59d87c4SChristoph Hellwig 
1049c59d87c4SChristoph Hellwig 	bh = head = page_buffers(page);
1050c59d87c4SChristoph Hellwig 	offset = page_offset(page);
10510d882a36SAlain Renaud 	type = XFS_IO_OVERWRITE;
1052c59d87c4SChristoph Hellwig 
1053c59d87c4SChristoph Hellwig 	if (wbc->sync_mode == WB_SYNC_NONE)
1054c59d87c4SChristoph Hellwig 		nonblocking = 1;
1055c59d87c4SChristoph Hellwig 
1056c59d87c4SChristoph Hellwig 	do {
1057c59d87c4SChristoph Hellwig 		int new_ioend = 0;
1058c59d87c4SChristoph Hellwig 
1059c59d87c4SChristoph Hellwig 		if (offset >= end_offset)
1060c59d87c4SChristoph Hellwig 			break;
1061c59d87c4SChristoph Hellwig 		if (!buffer_uptodate(bh))
1062c59d87c4SChristoph Hellwig 			uptodate = 0;
1063c59d87c4SChristoph Hellwig 
1064c59d87c4SChristoph Hellwig 		/*
1065c59d87c4SChristoph Hellwig 		 * set_page_dirty dirties all buffers in a page, independent
1066c59d87c4SChristoph Hellwig 		 * of their state.  The dirty state however is entirely
1067c59d87c4SChristoph Hellwig 		 * meaningless for holes (!mapped && uptodate), so skip
1068c59d87c4SChristoph Hellwig 		 * buffers covering holes here.
1069c59d87c4SChristoph Hellwig 		 */
1070c59d87c4SChristoph Hellwig 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1071c59d87c4SChristoph Hellwig 			imap_valid = 0;
1072c59d87c4SChristoph Hellwig 			continue;
1073c59d87c4SChristoph Hellwig 		}
1074c59d87c4SChristoph Hellwig 
1075c59d87c4SChristoph Hellwig 		if (buffer_unwritten(bh)) {
10760d882a36SAlain Renaud 			if (type != XFS_IO_UNWRITTEN) {
10770d882a36SAlain Renaud 				type = XFS_IO_UNWRITTEN;
1078c59d87c4SChristoph Hellwig 				imap_valid = 0;
1079c59d87c4SChristoph Hellwig 			}
1080c59d87c4SChristoph Hellwig 		} else if (buffer_delay(bh)) {
10810d882a36SAlain Renaud 			if (type != XFS_IO_DELALLOC) {
10820d882a36SAlain Renaud 				type = XFS_IO_DELALLOC;
1083c59d87c4SChristoph Hellwig 				imap_valid = 0;
1084c59d87c4SChristoph Hellwig 			}
1085c59d87c4SChristoph Hellwig 		} else if (buffer_uptodate(bh)) {
10860d882a36SAlain Renaud 			if (type != XFS_IO_OVERWRITE) {
10870d882a36SAlain Renaud 				type = XFS_IO_OVERWRITE;
1088c59d87c4SChristoph Hellwig 				imap_valid = 0;
1089c59d87c4SChristoph Hellwig 			}
1090c59d87c4SChristoph Hellwig 		} else {
10917d0fa3ecSAlain Renaud 			if (PageUptodate(page))
1092c59d87c4SChristoph Hellwig 				ASSERT(buffer_mapped(bh));
10937d0fa3ecSAlain Renaud 			/*
10947d0fa3ecSAlain Renaud 			 * This buffer is not uptodate and will not be
10957d0fa3ecSAlain Renaud 			 * written to disk.  Ensure that we will put any
10967d0fa3ecSAlain Renaud 			 * subsequent writeable buffers into a new
10977d0fa3ecSAlain Renaud 			 * ioend.
10987d0fa3ecSAlain Renaud 			 */
1099c59d87c4SChristoph Hellwig 			imap_valid = 0;
1100c59d87c4SChristoph Hellwig 			continue;
1101c59d87c4SChristoph Hellwig 		}
1102c59d87c4SChristoph Hellwig 
1103c59d87c4SChristoph Hellwig 		if (imap_valid)
1104c59d87c4SChristoph Hellwig 			imap_valid = xfs_imap_valid(inode, &imap, offset);
1105c59d87c4SChristoph Hellwig 		if (!imap_valid) {
1106c59d87c4SChristoph Hellwig 			/*
1107c59d87c4SChristoph Hellwig 			 * If we didn't have a valid mapping then we need to
1108c59d87c4SChristoph Hellwig 			 * put the new mapping into a separate ioend structure.
1109c59d87c4SChristoph Hellwig 			 * This ensures non-contiguous extents always have
1110c59d87c4SChristoph Hellwig 			 * separate ioends, which is particularly important
1111c59d87c4SChristoph Hellwig 			 * for unwritten extent conversion at I/O completion
1112c59d87c4SChristoph Hellwig 			 * time.
1113c59d87c4SChristoph Hellwig 			 */
1114c59d87c4SChristoph Hellwig 			new_ioend = 1;
1115c59d87c4SChristoph Hellwig 			err = xfs_map_blocks(inode, offset, &imap, type,
1116c59d87c4SChristoph Hellwig 					     nonblocking);
1117c59d87c4SChristoph Hellwig 			if (err)
1118c59d87c4SChristoph Hellwig 				goto error;
1119c59d87c4SChristoph Hellwig 			imap_valid = xfs_imap_valid(inode, &imap, offset);
1120c59d87c4SChristoph Hellwig 		}
1121c59d87c4SChristoph Hellwig 		if (imap_valid) {
1122c59d87c4SChristoph Hellwig 			lock_buffer(bh);
11230d882a36SAlain Renaud 			if (type != XFS_IO_OVERWRITE)
1124c59d87c4SChristoph Hellwig 				xfs_map_at_offset(inode, bh, &imap, offset);
1125c59d87c4SChristoph Hellwig 			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1126c59d87c4SChristoph Hellwig 					 new_ioend);
1127c59d87c4SChristoph Hellwig 			count++;
1128c59d87c4SChristoph Hellwig 		}
1129c59d87c4SChristoph Hellwig 
1130c59d87c4SChristoph Hellwig 		if (!iohead)
1131c59d87c4SChristoph Hellwig 			iohead = ioend;
1132c59d87c4SChristoph Hellwig 
1133c59d87c4SChristoph Hellwig 	} while (offset += len, ((bh = bh->b_this_page) != head));
1134c59d87c4SChristoph Hellwig 
1135c59d87c4SChristoph Hellwig 	if (uptodate && bh == head)
1136c59d87c4SChristoph Hellwig 		SetPageUptodate(page);
1137c59d87c4SChristoph Hellwig 
1138c59d87c4SChristoph Hellwig 	xfs_start_page_writeback(page, 1, count);
1139c59d87c4SChristoph Hellwig 
11407bf7f352SDave Chinner 	/* if there is no IO to be submitted for this page, we are done */
11417bf7f352SDave Chinner 	if (!ioend)
11427bf7f352SDave Chinner 		return 0;
11437bf7f352SDave Chinner 
11447bf7f352SDave Chinner 	ASSERT(iohead);
11457bf7f352SDave Chinner 
11467bf7f352SDave Chinner 	/*
11477bf7f352SDave Chinner 	 * Any errors from this point onwards need tobe reported through the IO
11487bf7f352SDave Chinner 	 * completion path as we have marked the initial page as under writeback
11497bf7f352SDave Chinner 	 * and unlocked it.
11507bf7f352SDave Chinner 	 */
11517bf7f352SDave Chinner 	if (imap_valid) {
1152c59d87c4SChristoph Hellwig 		xfs_off_t		end_index;
1153c59d87c4SChristoph Hellwig 
1154c59d87c4SChristoph Hellwig 		end_index = imap.br_startoff + imap.br_blockcount;
1155c59d87c4SChristoph Hellwig 
1156c59d87c4SChristoph Hellwig 		/* to bytes */
1157c59d87c4SChristoph Hellwig 		end_index <<= inode->i_blkbits;
1158c59d87c4SChristoph Hellwig 
1159c59d87c4SChristoph Hellwig 		/* to pages */
1160c59d87c4SChristoph Hellwig 		end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1161c59d87c4SChristoph Hellwig 
1162c59d87c4SChristoph Hellwig 		/* check against file size */
1163c59d87c4SChristoph Hellwig 		if (end_index > last_index)
1164c59d87c4SChristoph Hellwig 			end_index = last_index;
1165c59d87c4SChristoph Hellwig 
1166c59d87c4SChristoph Hellwig 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1167c59d87c4SChristoph Hellwig 				  wbc, end_index);
1168c59d87c4SChristoph Hellwig 	}
1169c59d87c4SChristoph Hellwig 
1170281627dfSChristoph Hellwig 
11717bf7f352SDave Chinner 	/*
11727bf7f352SDave Chinner 	 * Reserve log space if we might write beyond the on-disk inode size.
11737bf7f352SDave Chinner 	 */
11747bf7f352SDave Chinner 	err = 0;
11757bf7f352SDave Chinner 	if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
11767bf7f352SDave Chinner 		err = xfs_setfilesize_trans_alloc(ioend);
11777bf7f352SDave Chinner 
11787bf7f352SDave Chinner 	xfs_submit_ioend(wbc, iohead, err);
1179c59d87c4SChristoph Hellwig 
1180c59d87c4SChristoph Hellwig 	return 0;
1181c59d87c4SChristoph Hellwig 
1182c59d87c4SChristoph Hellwig error:
1183c59d87c4SChristoph Hellwig 	if (iohead)
1184c59d87c4SChristoph Hellwig 		xfs_cancel_ioend(iohead);
1185c59d87c4SChristoph Hellwig 
1186c59d87c4SChristoph Hellwig 	if (err == -EAGAIN)
1187c59d87c4SChristoph Hellwig 		goto redirty;
1188c59d87c4SChristoph Hellwig 
1189c59d87c4SChristoph Hellwig 	xfs_aops_discard_page(page);
1190c59d87c4SChristoph Hellwig 	ClearPageUptodate(page);
1191c59d87c4SChristoph Hellwig 	unlock_page(page);
1192c59d87c4SChristoph Hellwig 	return err;
1193c59d87c4SChristoph Hellwig 
1194c59d87c4SChristoph Hellwig redirty:
1195c59d87c4SChristoph Hellwig 	redirty_page_for_writepage(wbc, page);
1196c59d87c4SChristoph Hellwig 	unlock_page(page);
1197c59d87c4SChristoph Hellwig 	return 0;
1198c59d87c4SChristoph Hellwig }
1199c59d87c4SChristoph Hellwig 
1200c59d87c4SChristoph Hellwig STATIC int
1201c59d87c4SChristoph Hellwig xfs_vm_writepages(
1202c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1203c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
1204c59d87c4SChristoph Hellwig {
1205c59d87c4SChristoph Hellwig 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1206c59d87c4SChristoph Hellwig 	return generic_writepages(mapping, wbc);
1207c59d87c4SChristoph Hellwig }
1208c59d87c4SChristoph Hellwig 
1209c59d87c4SChristoph Hellwig /*
1210c59d87c4SChristoph Hellwig  * Called to move a page into cleanable state - and from there
1211c59d87c4SChristoph Hellwig  * to be released. The page should already be clean. We always
1212c59d87c4SChristoph Hellwig  * have buffer heads in this call.
1213c59d87c4SChristoph Hellwig  *
1214c59d87c4SChristoph Hellwig  * Returns 1 if the page is ok to release, 0 otherwise.
1215c59d87c4SChristoph Hellwig  */
1216c59d87c4SChristoph Hellwig STATIC int
1217c59d87c4SChristoph Hellwig xfs_vm_releasepage(
1218c59d87c4SChristoph Hellwig 	struct page		*page,
1219c59d87c4SChristoph Hellwig 	gfp_t			gfp_mask)
1220c59d87c4SChristoph Hellwig {
1221c59d87c4SChristoph Hellwig 	int			delalloc, unwritten;
1222c59d87c4SChristoph Hellwig 
122334097dfeSLukas Czerner 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1224c59d87c4SChristoph Hellwig 
1225c59d87c4SChristoph Hellwig 	xfs_count_page_state(page, &delalloc, &unwritten);
1226c59d87c4SChristoph Hellwig 
1227448011e2SChristoph Hellwig 	if (WARN_ON_ONCE(delalloc))
1228c59d87c4SChristoph Hellwig 		return 0;
1229448011e2SChristoph Hellwig 	if (WARN_ON_ONCE(unwritten))
1230c59d87c4SChristoph Hellwig 		return 0;
1231c59d87c4SChristoph Hellwig 
1232c59d87c4SChristoph Hellwig 	return try_to_free_buffers(page);
1233c59d87c4SChristoph Hellwig }
1234c59d87c4SChristoph Hellwig 
1235a719370bSDave Chinner /*
1236a06c277aSDave Chinner  * When we map a DIO buffer, we may need to attach an ioend that describes the
1237a06c277aSDave Chinner  * type of write IO we are doing. This passes to the completion function the
1238a06c277aSDave Chinner  * operations it needs to perform. If the mapping is for an overwrite wholly
1239a06c277aSDave Chinner  * within the EOF then we don't need an ioend and so we don't allocate one.
1240a06c277aSDave Chinner  * This avoids the unnecessary overhead of allocating and freeing ioends for
1241a06c277aSDave Chinner  * workloads that don't require transactions on IO completion.
1242d5cc2e3fSDave Chinner  *
1243d5cc2e3fSDave Chinner  * If we get multiple mappings in a single IO, we might be mapping different
1244d5cc2e3fSDave Chinner  * types. But because the direct IO can only have a single private pointer, we
1245d5cc2e3fSDave Chinner  * need to ensure that:
1246d5cc2e3fSDave Chinner  *
1247a06c277aSDave Chinner  * a) i) the ioend spans the entire region of unwritten mappings; or
1248a06c277aSDave Chinner  *    ii) the ioend spans all the mappings that cross or are beyond EOF; and
1249d5cc2e3fSDave Chinner  * b) if it contains unwritten extents, it is *permanently* marked as such
1250d5cc2e3fSDave Chinner  *
1251d5cc2e3fSDave Chinner  * We could do this by chaining ioends like buffered IO does, but we only
1252d5cc2e3fSDave Chinner  * actually get one IO completion callback from the direct IO, and that spans
1253d5cc2e3fSDave Chinner  * the entire IO regardless of how many mappings and IOs are needed to complete
1254d5cc2e3fSDave Chinner  * the DIO. There is only going to be one reference to the ioend and its life
1255d5cc2e3fSDave Chinner  * cycle is constrained by the DIO completion code. hence we don't need
1256d5cc2e3fSDave Chinner  * reference counting here.
1257a719370bSDave Chinner  */
1258a719370bSDave Chinner static void
1259a719370bSDave Chinner xfs_map_direct(
1260a719370bSDave Chinner 	struct inode		*inode,
1261a719370bSDave Chinner 	struct buffer_head	*bh_result,
1262a719370bSDave Chinner 	struct xfs_bmbt_irec	*imap,
1263a719370bSDave Chinner 	xfs_off_t		offset)
1264a719370bSDave Chinner {
1265d5cc2e3fSDave Chinner 	struct xfs_ioend	*ioend;
1266d5cc2e3fSDave Chinner 	xfs_off_t		size = bh_result->b_size;
1267d5cc2e3fSDave Chinner 	int			type;
1268d5cc2e3fSDave Chinner 
1269d5cc2e3fSDave Chinner 	if (ISUNWRITTEN(imap))
1270d5cc2e3fSDave Chinner 		type = XFS_IO_UNWRITTEN;
1271d5cc2e3fSDave Chinner 	else
1272d5cc2e3fSDave Chinner 		type = XFS_IO_OVERWRITE;
1273d5cc2e3fSDave Chinner 
1274d5cc2e3fSDave Chinner 	trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
1275d5cc2e3fSDave Chinner 
1276d5cc2e3fSDave Chinner 	if (bh_result->b_private) {
1277d5cc2e3fSDave Chinner 		ioend = bh_result->b_private;
1278d5cc2e3fSDave Chinner 		ASSERT(ioend->io_size > 0);
1279d5cc2e3fSDave Chinner 		ASSERT(offset >= ioend->io_offset);
1280d5cc2e3fSDave Chinner 		if (offset + size > ioend->io_offset + ioend->io_size)
1281d5cc2e3fSDave Chinner 			ioend->io_size = offset - ioend->io_offset + size;
1282d5cc2e3fSDave Chinner 
1283d5cc2e3fSDave Chinner 		if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
1284d5cc2e3fSDave Chinner 			ioend->io_type = XFS_IO_UNWRITTEN;
1285d5cc2e3fSDave Chinner 
1286d5cc2e3fSDave Chinner 		trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
1287d5cc2e3fSDave Chinner 					      ioend->io_size, ioend->io_type,
1288d5cc2e3fSDave Chinner 					      imap);
1289a06c277aSDave Chinner 	} else if (type == XFS_IO_UNWRITTEN ||
1290a06c277aSDave Chinner 		   offset + size > i_size_read(inode)) {
1291d5cc2e3fSDave Chinner 		ioend = xfs_alloc_ioend(inode, type);
1292d5cc2e3fSDave Chinner 		ioend->io_offset = offset;
1293d5cc2e3fSDave Chinner 		ioend->io_size = size;
1294a06c277aSDave Chinner 
1295d5cc2e3fSDave Chinner 		bh_result->b_private = ioend;
1296a06c277aSDave Chinner 		set_buffer_defer_completion(bh_result);
1297d5cc2e3fSDave Chinner 
1298d5cc2e3fSDave Chinner 		trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
1299d5cc2e3fSDave Chinner 					   imap);
1300a06c277aSDave Chinner 	} else {
1301a06c277aSDave Chinner 		trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1302a06c277aSDave Chinner 					    imap);
1303a719370bSDave Chinner 	}
1304a719370bSDave Chinner }
1305a719370bSDave Chinner 
13061fdca9c2SDave Chinner /*
13071fdca9c2SDave Chinner  * If this is O_DIRECT or the mpage code calling tell them how large the mapping
13081fdca9c2SDave Chinner  * is, so that we can avoid repeated get_blocks calls.
13091fdca9c2SDave Chinner  *
13101fdca9c2SDave Chinner  * If the mapping spans EOF, then we have to break the mapping up as the mapping
13111fdca9c2SDave Chinner  * for blocks beyond EOF must be marked new so that sub block regions can be
13121fdca9c2SDave Chinner  * correctly zeroed. We can't do this for mappings within EOF unless the mapping
13131fdca9c2SDave Chinner  * was just allocated or is unwritten, otherwise the callers would overwrite
13141fdca9c2SDave Chinner  * existing data with zeros. Hence we have to split the mapping into a range up
13151fdca9c2SDave Chinner  * to and including EOF, and a second mapping for beyond EOF.
13161fdca9c2SDave Chinner  */
13171fdca9c2SDave Chinner static void
13181fdca9c2SDave Chinner xfs_map_trim_size(
13191fdca9c2SDave Chinner 	struct inode		*inode,
13201fdca9c2SDave Chinner 	sector_t		iblock,
13211fdca9c2SDave Chinner 	struct buffer_head	*bh_result,
13221fdca9c2SDave Chinner 	struct xfs_bmbt_irec	*imap,
13231fdca9c2SDave Chinner 	xfs_off_t		offset,
13241fdca9c2SDave Chinner 	ssize_t			size)
13251fdca9c2SDave Chinner {
13261fdca9c2SDave Chinner 	xfs_off_t		mapping_size;
13271fdca9c2SDave Chinner 
13281fdca9c2SDave Chinner 	mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
13291fdca9c2SDave Chinner 	mapping_size <<= inode->i_blkbits;
13301fdca9c2SDave Chinner 
13311fdca9c2SDave Chinner 	ASSERT(mapping_size > 0);
13321fdca9c2SDave Chinner 	if (mapping_size > size)
13331fdca9c2SDave Chinner 		mapping_size = size;
13341fdca9c2SDave Chinner 	if (offset < i_size_read(inode) &&
13351fdca9c2SDave Chinner 	    offset + mapping_size >= i_size_read(inode)) {
13361fdca9c2SDave Chinner 		/* limit mapping to block that spans EOF */
13371fdca9c2SDave Chinner 		mapping_size = roundup_64(i_size_read(inode) - offset,
13381fdca9c2SDave Chinner 					  1 << inode->i_blkbits);
13391fdca9c2SDave Chinner 	}
13401fdca9c2SDave Chinner 	if (mapping_size > LONG_MAX)
13411fdca9c2SDave Chinner 		mapping_size = LONG_MAX;
13421fdca9c2SDave Chinner 
13431fdca9c2SDave Chinner 	bh_result->b_size = mapping_size;
13441fdca9c2SDave Chinner }
13451fdca9c2SDave Chinner 
1346c59d87c4SChristoph Hellwig STATIC int
1347c59d87c4SChristoph Hellwig __xfs_get_blocks(
1348c59d87c4SChristoph Hellwig 	struct inode		*inode,
1349c59d87c4SChristoph Hellwig 	sector_t		iblock,
1350c59d87c4SChristoph Hellwig 	struct buffer_head	*bh_result,
1351c59d87c4SChristoph Hellwig 	int			create,
1352c59d87c4SChristoph Hellwig 	int			direct)
1353c59d87c4SChristoph Hellwig {
1354c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
1355c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
1356c59d87c4SChristoph Hellwig 	xfs_fileoff_t		offset_fsb, end_fsb;
1357c59d87c4SChristoph Hellwig 	int			error = 0;
1358c59d87c4SChristoph Hellwig 	int			lockmode = 0;
1359c59d87c4SChristoph Hellwig 	struct xfs_bmbt_irec	imap;
1360c59d87c4SChristoph Hellwig 	int			nimaps = 1;
1361c59d87c4SChristoph Hellwig 	xfs_off_t		offset;
1362c59d87c4SChristoph Hellwig 	ssize_t			size;
1363c59d87c4SChristoph Hellwig 	int			new = 0;
1364c59d87c4SChristoph Hellwig 
1365c59d87c4SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
1366b474c7aeSEric Sandeen 		return -EIO;
1367c59d87c4SChristoph Hellwig 
1368c59d87c4SChristoph Hellwig 	offset = (xfs_off_t)iblock << inode->i_blkbits;
1369c59d87c4SChristoph Hellwig 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1370c59d87c4SChristoph Hellwig 	size = bh_result->b_size;
1371c59d87c4SChristoph Hellwig 
1372c59d87c4SChristoph Hellwig 	if (!create && direct && offset >= i_size_read(inode))
1373c59d87c4SChristoph Hellwig 		return 0;
1374c59d87c4SChristoph Hellwig 
1375507630b2SDave Chinner 	/*
1376507630b2SDave Chinner 	 * Direct I/O is usually done on preallocated files, so try getting
1377507630b2SDave Chinner 	 * a block mapping without an exclusive lock first.  For buffered
1378507630b2SDave Chinner 	 * writes we already have the exclusive iolock anyway, so avoiding
1379507630b2SDave Chinner 	 * a lock roundtrip here by taking the ilock exclusive from the
1380507630b2SDave Chinner 	 * beginning is a useful micro optimization.
1381507630b2SDave Chinner 	 */
1382507630b2SDave Chinner 	if (create && !direct) {
1383c59d87c4SChristoph Hellwig 		lockmode = XFS_ILOCK_EXCL;
1384c59d87c4SChristoph Hellwig 		xfs_ilock(ip, lockmode);
1385c59d87c4SChristoph Hellwig 	} else {
1386309ecac8SChristoph Hellwig 		lockmode = xfs_ilock_data_map_shared(ip);
1387c59d87c4SChristoph Hellwig 	}
1388c59d87c4SChristoph Hellwig 
1389d2c28191SDave Chinner 	ASSERT(offset <= mp->m_super->s_maxbytes);
1390d2c28191SDave Chinner 	if (offset + size > mp->m_super->s_maxbytes)
1391d2c28191SDave Chinner 		size = mp->m_super->s_maxbytes - offset;
1392c59d87c4SChristoph Hellwig 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1393c59d87c4SChristoph Hellwig 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
1394c59d87c4SChristoph Hellwig 
13955c8ed202SDave Chinner 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
13965c8ed202SDave Chinner 				&imap, &nimaps, XFS_BMAPI_ENTIRE);
1397c59d87c4SChristoph Hellwig 	if (error)
1398c59d87c4SChristoph Hellwig 		goto out_unlock;
1399c59d87c4SChristoph Hellwig 
1400c59d87c4SChristoph Hellwig 	if (create &&
1401c59d87c4SChristoph Hellwig 	    (!nimaps ||
1402c59d87c4SChristoph Hellwig 	     (imap.br_startblock == HOLESTARTBLOCK ||
1403c59d87c4SChristoph Hellwig 	      imap.br_startblock == DELAYSTARTBLOCK))) {
1404aff3a9edSDave Chinner 		if (direct || xfs_get_extsz_hint(ip)) {
1405507630b2SDave Chinner 			/*
1406507630b2SDave Chinner 			 * Drop the ilock in preparation for starting the block
1407507630b2SDave Chinner 			 * allocation transaction.  It will be retaken
1408507630b2SDave Chinner 			 * exclusively inside xfs_iomap_write_direct for the
1409507630b2SDave Chinner 			 * actual allocation.
1410507630b2SDave Chinner 			 */
1411507630b2SDave Chinner 			xfs_iunlock(ip, lockmode);
1412c59d87c4SChristoph Hellwig 			error = xfs_iomap_write_direct(ip, offset, size,
1413c59d87c4SChristoph Hellwig 						       &imap, nimaps);
1414507630b2SDave Chinner 			if (error)
14152451337dSDave Chinner 				return error;
1416d3bc815aSDave Chinner 			new = 1;
1417c59d87c4SChristoph Hellwig 		} else {
1418507630b2SDave Chinner 			/*
1419507630b2SDave Chinner 			 * Delalloc reservations do not require a transaction,
1420d3bc815aSDave Chinner 			 * we can go on without dropping the lock here. If we
1421d3bc815aSDave Chinner 			 * are allocating a new delalloc block, make sure that
1422d3bc815aSDave Chinner 			 * we set the new flag so that we mark the buffer new so
1423d3bc815aSDave Chinner 			 * that we know that it is newly allocated if the write
1424d3bc815aSDave Chinner 			 * fails.
1425507630b2SDave Chinner 			 */
1426d3bc815aSDave Chinner 			if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1427d3bc815aSDave Chinner 				new = 1;
1428c59d87c4SChristoph Hellwig 			error = xfs_iomap_write_delay(ip, offset, size, &imap);
1429c59d87c4SChristoph Hellwig 			if (error)
1430c59d87c4SChristoph Hellwig 				goto out_unlock;
1431c59d87c4SChristoph Hellwig 
1432507630b2SDave Chinner 			xfs_iunlock(ip, lockmode);
1433507630b2SDave Chinner 		}
1434d5cc2e3fSDave Chinner 		trace_xfs_get_blocks_alloc(ip, offset, size,
1435d5cc2e3fSDave Chinner 				ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1436d5cc2e3fSDave Chinner 						   : XFS_IO_DELALLOC, &imap);
1437c59d87c4SChristoph Hellwig 	} else if (nimaps) {
1438d5cc2e3fSDave Chinner 		trace_xfs_get_blocks_found(ip, offset, size,
1439d5cc2e3fSDave Chinner 				ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1440d5cc2e3fSDave Chinner 						   : XFS_IO_OVERWRITE, &imap);
1441507630b2SDave Chinner 		xfs_iunlock(ip, lockmode);
1442c59d87c4SChristoph Hellwig 	} else {
1443c59d87c4SChristoph Hellwig 		trace_xfs_get_blocks_notfound(ip, offset, size);
1444c59d87c4SChristoph Hellwig 		goto out_unlock;
1445c59d87c4SChristoph Hellwig 	}
1446c59d87c4SChristoph Hellwig 
14471fdca9c2SDave Chinner 	/* trim mapping down to size requested */
14481fdca9c2SDave Chinner 	if (direct || size > (1 << inode->i_blkbits))
14491fdca9c2SDave Chinner 		xfs_map_trim_size(inode, iblock, bh_result,
14501fdca9c2SDave Chinner 				  &imap, offset, size);
14511fdca9c2SDave Chinner 
1452c59d87c4SChristoph Hellwig 	/*
1453a719370bSDave Chinner 	 * For unwritten extents do not report a disk address in the buffered
1454a719370bSDave Chinner 	 * read case (treat as if we're reading into a hole).
1455c59d87c4SChristoph Hellwig 	 */
1456a719370bSDave Chinner 	if (imap.br_startblock != HOLESTARTBLOCK &&
1457a719370bSDave Chinner 	    imap.br_startblock != DELAYSTARTBLOCK &&
1458a719370bSDave Chinner 	    (create || !ISUNWRITTEN(&imap))) {
1459c59d87c4SChristoph Hellwig 		xfs_map_buffer(inode, bh_result, &imap, offset);
1460a719370bSDave Chinner 		if (ISUNWRITTEN(&imap))
1461c59d87c4SChristoph Hellwig 			set_buffer_unwritten(bh_result);
1462a719370bSDave Chinner 		/* direct IO needs special help */
1463a719370bSDave Chinner 		if (create && direct)
1464a719370bSDave Chinner 			xfs_map_direct(inode, bh_result, &imap, offset);
1465c59d87c4SChristoph Hellwig 	}
1466c59d87c4SChristoph Hellwig 
1467c59d87c4SChristoph Hellwig 	/*
1468c59d87c4SChristoph Hellwig 	 * If this is a realtime file, data may be on a different device.
1469c59d87c4SChristoph Hellwig 	 * to that pointed to from the buffer_head b_bdev currently.
1470c59d87c4SChristoph Hellwig 	 */
1471c59d87c4SChristoph Hellwig 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1472c59d87c4SChristoph Hellwig 
1473c59d87c4SChristoph Hellwig 	/*
1474c59d87c4SChristoph Hellwig 	 * If we previously allocated a block out beyond eof and we are now
1475c59d87c4SChristoph Hellwig 	 * coming back to use it then we will need to flag it as new even if it
1476c59d87c4SChristoph Hellwig 	 * has a disk address.
1477c59d87c4SChristoph Hellwig 	 *
1478c59d87c4SChristoph Hellwig 	 * With sub-block writes into unwritten extents we also need to mark
1479c59d87c4SChristoph Hellwig 	 * the buffer as new so that the unwritten parts of the buffer gets
1480c59d87c4SChristoph Hellwig 	 * correctly zeroed.
1481c59d87c4SChristoph Hellwig 	 */
1482c59d87c4SChristoph Hellwig 	if (create &&
1483c59d87c4SChristoph Hellwig 	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1484c59d87c4SChristoph Hellwig 	     (offset >= i_size_read(inode)) ||
1485c59d87c4SChristoph Hellwig 	     (new || ISUNWRITTEN(&imap))))
1486c59d87c4SChristoph Hellwig 		set_buffer_new(bh_result);
1487c59d87c4SChristoph Hellwig 
1488c59d87c4SChristoph Hellwig 	if (imap.br_startblock == DELAYSTARTBLOCK) {
1489c59d87c4SChristoph Hellwig 		BUG_ON(direct);
1490c59d87c4SChristoph Hellwig 		if (create) {
1491c59d87c4SChristoph Hellwig 			set_buffer_uptodate(bh_result);
1492c59d87c4SChristoph Hellwig 			set_buffer_mapped(bh_result);
1493c59d87c4SChristoph Hellwig 			set_buffer_delay(bh_result);
1494c59d87c4SChristoph Hellwig 		}
1495c59d87c4SChristoph Hellwig 	}
1496c59d87c4SChristoph Hellwig 
1497c59d87c4SChristoph Hellwig 	return 0;
1498c59d87c4SChristoph Hellwig 
1499c59d87c4SChristoph Hellwig out_unlock:
1500c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, lockmode);
15012451337dSDave Chinner 	return error;
1502c59d87c4SChristoph Hellwig }
1503c59d87c4SChristoph Hellwig 
1504c59d87c4SChristoph Hellwig int
1505c59d87c4SChristoph Hellwig xfs_get_blocks(
1506c59d87c4SChristoph Hellwig 	struct inode		*inode,
1507c59d87c4SChristoph Hellwig 	sector_t		iblock,
1508c59d87c4SChristoph Hellwig 	struct buffer_head	*bh_result,
1509c59d87c4SChristoph Hellwig 	int			create)
1510c59d87c4SChristoph Hellwig {
1511c59d87c4SChristoph Hellwig 	return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1512c59d87c4SChristoph Hellwig }
1513c59d87c4SChristoph Hellwig 
1514c59d87c4SChristoph Hellwig STATIC int
1515c59d87c4SChristoph Hellwig xfs_get_blocks_direct(
1516c59d87c4SChristoph Hellwig 	struct inode		*inode,
1517c59d87c4SChristoph Hellwig 	sector_t		iblock,
1518c59d87c4SChristoph Hellwig 	struct buffer_head	*bh_result,
1519c59d87c4SChristoph Hellwig 	int			create)
1520c59d87c4SChristoph Hellwig {
1521c59d87c4SChristoph Hellwig 	return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1522c59d87c4SChristoph Hellwig }
1523c59d87c4SChristoph Hellwig 
1524c59d87c4SChristoph Hellwig /*
1525c59d87c4SChristoph Hellwig  * Complete a direct I/O write request.
1526c59d87c4SChristoph Hellwig  *
1527a06c277aSDave Chinner  * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
1528a06c277aSDave Chinner  * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
1529a06c277aSDave Chinner  * wholly within the EOF and so there is nothing for us to do. Note that in this
1530a06c277aSDave Chinner  * case the completion can be called in interrupt context, whereas if we have an
1531a06c277aSDave Chinner  * ioend we will always be called in task context (i.e. from a workqueue).
1532c59d87c4SChristoph Hellwig  */
1533c59d87c4SChristoph Hellwig STATIC void
1534c59d87c4SChristoph Hellwig xfs_end_io_direct_write(
1535c59d87c4SChristoph Hellwig 	struct kiocb		*iocb,
1536c59d87c4SChristoph Hellwig 	loff_t			offset,
1537c59d87c4SChristoph Hellwig 	ssize_t			size,
15387b7a8665SChristoph Hellwig 	void			*private)
1539c59d87c4SChristoph Hellwig {
15402ba66237SChristoph Hellwig 	struct inode		*inode = file_inode(iocb->ki_filp);
15412ba66237SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
15422ba66237SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
1543d5cc2e3fSDave Chinner 	struct xfs_ioend	*ioend = private;
15442ba66237SChristoph Hellwig 
1545a06c277aSDave Chinner 	trace_xfs_gbmap_direct_endio(ip, offset, size,
1546a06c277aSDave Chinner 				     ioend ? ioend->io_type : 0, NULL);
1547a06c277aSDave Chinner 
1548a06c277aSDave Chinner 	if (!ioend) {
1549a06c277aSDave Chinner 		ASSERT(offset + size <= i_size_read(inode));
1550a06c277aSDave Chinner 		return;
1551a06c277aSDave Chinner 	}
15522ba66237SChristoph Hellwig 
15532ba66237SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
15546dfa1b67SDave Chinner 		goto out_end_io;
1555c59d87c4SChristoph Hellwig 
1556c59d87c4SChristoph Hellwig 	/*
1557d5cc2e3fSDave Chinner 	 * dio completion end_io functions are only called on writes if more
1558d5cc2e3fSDave Chinner 	 * than 0 bytes was written.
15592813d682SChristoph Hellwig 	 */
1560d5cc2e3fSDave Chinner 	ASSERT(size > 0);
1561d5cc2e3fSDave Chinner 
1562d5cc2e3fSDave Chinner 	/*
1563d5cc2e3fSDave Chinner 	 * The ioend only maps whole blocks, while the IO may be sector aligned.
1564a06c277aSDave Chinner 	 * Hence the ioend offset/size may not match the IO offset/size exactly.
1565a06c277aSDave Chinner 	 * Because we don't map overwrites within EOF into the ioend, the offset
1566a06c277aSDave Chinner 	 * may not match, but only if the endio spans EOF.  Either way, write
1567a06c277aSDave Chinner 	 * the IO sizes into the ioend so that completion processing does the
1568a06c277aSDave Chinner 	 * right thing.
1569d5cc2e3fSDave Chinner 	 */
1570d5cc2e3fSDave Chinner 	ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
1571d5cc2e3fSDave Chinner 	ioend->io_size = size;
1572d5cc2e3fSDave Chinner 	ioend->io_offset = offset;
1573c59d87c4SChristoph Hellwig 
1574c59d87c4SChristoph Hellwig 	/*
15756dfa1b67SDave Chinner 	 * The ioend tells us whether we are doing unwritten extent conversion
15766dfa1b67SDave Chinner 	 * or an append transaction that updates the on-disk file size. These
15776dfa1b67SDave Chinner 	 * cases are the only cases where we should *potentially* be needing
1578a06c277aSDave Chinner 	 * to update the VFS inode size.
15796dfa1b67SDave Chinner 	 *
15806dfa1b67SDave Chinner 	 * We need to update the in-core inode size here so that we don't end up
1581a06c277aSDave Chinner 	 * with the on-disk inode size being outside the in-core inode size. We
1582a06c277aSDave Chinner 	 * have no other method of updating EOF for AIO, so always do it here
1583a06c277aSDave Chinner 	 * if necessary.
1584b9d59846SDave Chinner 	 *
1585b9d59846SDave Chinner 	 * We need to lock the test/set EOF update as we can be racing with
1586b9d59846SDave Chinner 	 * other IO completions here to update the EOF. Failing to serialise
1587b9d59846SDave Chinner 	 * here can result in EOF moving backwards and Bad Things Happen when
1588b9d59846SDave Chinner 	 * that occurs.
15892813d682SChristoph Hellwig 	 */
1590b9d59846SDave Chinner 	spin_lock(&ip->i_flags_lock);
15912ba66237SChristoph Hellwig 	if (offset + size > i_size_read(inode))
15922ba66237SChristoph Hellwig 		i_size_write(inode, offset + size);
1593b9d59846SDave Chinner 	spin_unlock(&ip->i_flags_lock);
15942813d682SChristoph Hellwig 
15952813d682SChristoph Hellwig 	/*
15966dfa1b67SDave Chinner 	 * If we are doing an append IO that needs to update the EOF on disk,
15976dfa1b67SDave Chinner 	 * do the transaction reserve now so we can use common end io
15986dfa1b67SDave Chinner 	 * processing. Stashing the error (if there is one) in the ioend will
15996dfa1b67SDave Chinner 	 * result in the ioend processing passing on the error if it is
16006dfa1b67SDave Chinner 	 * possible as we can't return it from here.
1601c59d87c4SChristoph Hellwig 	 */
1602a06c277aSDave Chinner 	if (ioend->io_type == XFS_IO_OVERWRITE)
16036dfa1b67SDave Chinner 		ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
1604c59d87c4SChristoph Hellwig 
16056dfa1b67SDave Chinner out_end_io:
16066dfa1b67SDave Chinner 	xfs_end_io(&ioend->io_work);
16072ba66237SChristoph Hellwig 	return;
16082ba66237SChristoph Hellwig }
1609c59d87c4SChristoph Hellwig 
1610c59d87c4SChristoph Hellwig STATIC ssize_t
1611c59d87c4SChristoph Hellwig xfs_vm_direct_IO(
1612c59d87c4SChristoph Hellwig 	struct kiocb		*iocb,
1613d8d3d94bSAl Viro 	struct iov_iter		*iter,
1614d8d3d94bSAl Viro 	loff_t			offset)
1615c59d87c4SChristoph Hellwig {
1616c59d87c4SChristoph Hellwig 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
1617c59d87c4SChristoph Hellwig 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
1618c59d87c4SChristoph Hellwig 
16196f673763SOmar Sandoval 	if (iov_iter_rw(iter) == WRITE) {
162017f8c842SOmar Sandoval 		return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
162117f8c842SOmar Sandoval 					    xfs_get_blocks_direct,
16229862f62fSChristoph Hellwig 					    xfs_end_io_direct_write, NULL,
16239862f62fSChristoph Hellwig 					    DIO_ASYNC_EXTEND);
16242ba66237SChristoph Hellwig 	}
162517f8c842SOmar Sandoval 	return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
162617f8c842SOmar Sandoval 				    xfs_get_blocks_direct, NULL, NULL, 0);
1627c59d87c4SChristoph Hellwig }
1628c59d87c4SChristoph Hellwig 
1629c59d87c4SChristoph Hellwig /*
16302813d682SChristoph Hellwig  * Punch out the delalloc blocks we have already allocated.
16312813d682SChristoph Hellwig  *
1632d3bc815aSDave Chinner  * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1633d3bc815aSDave Chinner  * as the page is still locked at this point.
1634c59d87c4SChristoph Hellwig  */
1635d3bc815aSDave Chinner STATIC void
1636d3bc815aSDave Chinner xfs_vm_kill_delalloc_range(
1637d3bc815aSDave Chinner 	struct inode		*inode,
1638d3bc815aSDave Chinner 	loff_t			start,
1639d3bc815aSDave Chinner 	loff_t			end)
1640d3bc815aSDave Chinner {
1641c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
1642c59d87c4SChristoph Hellwig 	xfs_fileoff_t		start_fsb;
1643c59d87c4SChristoph Hellwig 	xfs_fileoff_t		end_fsb;
1644c59d87c4SChristoph Hellwig 	int			error;
1645c59d87c4SChristoph Hellwig 
1646d3bc815aSDave Chinner 	start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1647d3bc815aSDave Chinner 	end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1648c59d87c4SChristoph Hellwig 	if (end_fsb <= start_fsb)
1649c59d87c4SChristoph Hellwig 		return;
1650c59d87c4SChristoph Hellwig 
1651c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1652c59d87c4SChristoph Hellwig 	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1653c59d87c4SChristoph Hellwig 						end_fsb - start_fsb);
1654c59d87c4SChristoph Hellwig 	if (error) {
1655c59d87c4SChristoph Hellwig 		/* something screwed, just bail */
1656c59d87c4SChristoph Hellwig 		if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1657c59d87c4SChristoph Hellwig 			xfs_alert(ip->i_mount,
1658c59d87c4SChristoph Hellwig 		"xfs_vm_write_failed: unable to clean up ino %lld",
1659c59d87c4SChristoph Hellwig 					ip->i_ino);
1660c59d87c4SChristoph Hellwig 		}
1661c59d87c4SChristoph Hellwig 	}
1662c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1663c59d87c4SChristoph Hellwig }
1664d3bc815aSDave Chinner 
1665d3bc815aSDave Chinner STATIC void
1666d3bc815aSDave Chinner xfs_vm_write_failed(
1667d3bc815aSDave Chinner 	struct inode		*inode,
1668d3bc815aSDave Chinner 	struct page		*page,
1669d3bc815aSDave Chinner 	loff_t			pos,
1670d3bc815aSDave Chinner 	unsigned		len)
1671d3bc815aSDave Chinner {
167258e59854SJie Liu 	loff_t			block_offset;
1673d3bc815aSDave Chinner 	loff_t			block_start;
1674d3bc815aSDave Chinner 	loff_t			block_end;
1675d3bc815aSDave Chinner 	loff_t			from = pos & (PAGE_CACHE_SIZE - 1);
1676d3bc815aSDave Chinner 	loff_t			to = from + len;
1677d3bc815aSDave Chinner 	struct buffer_head	*bh, *head;
1678d3bc815aSDave Chinner 
167958e59854SJie Liu 	/*
168058e59854SJie Liu 	 * The request pos offset might be 32 or 64 bit, this is all fine
168158e59854SJie Liu 	 * on 64-bit platform.  However, for 64-bit pos request on 32-bit
168258e59854SJie Liu 	 * platform, the high 32-bit will be masked off if we evaluate the
168358e59854SJie Liu 	 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
168458e59854SJie Liu 	 * 0xfffff000 as an unsigned long, hence the result is incorrect
168558e59854SJie Liu 	 * which could cause the following ASSERT failed in most cases.
168658e59854SJie Liu 	 * In order to avoid this, we can evaluate the block_offset of the
168758e59854SJie Liu 	 * start of the page by using shifts rather than masks the mismatch
168858e59854SJie Liu 	 * problem.
168958e59854SJie Liu 	 */
169058e59854SJie Liu 	block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
169158e59854SJie Liu 
1692d3bc815aSDave Chinner 	ASSERT(block_offset + from == pos);
1693d3bc815aSDave Chinner 
1694d3bc815aSDave Chinner 	head = page_buffers(page);
1695d3bc815aSDave Chinner 	block_start = 0;
1696d3bc815aSDave Chinner 	for (bh = head; bh != head || !block_start;
1697d3bc815aSDave Chinner 	     bh = bh->b_this_page, block_start = block_end,
1698d3bc815aSDave Chinner 				   block_offset += bh->b_size) {
1699d3bc815aSDave Chinner 		block_end = block_start + bh->b_size;
1700d3bc815aSDave Chinner 
1701d3bc815aSDave Chinner 		/* skip buffers before the write */
1702d3bc815aSDave Chinner 		if (block_end <= from)
1703d3bc815aSDave Chinner 			continue;
1704d3bc815aSDave Chinner 
1705d3bc815aSDave Chinner 		/* if the buffer is after the write, we're done */
1706d3bc815aSDave Chinner 		if (block_start >= to)
1707d3bc815aSDave Chinner 			break;
1708d3bc815aSDave Chinner 
1709d3bc815aSDave Chinner 		if (!buffer_delay(bh))
1710d3bc815aSDave Chinner 			continue;
1711d3bc815aSDave Chinner 
1712d3bc815aSDave Chinner 		if (!buffer_new(bh) && block_offset < i_size_read(inode))
1713d3bc815aSDave Chinner 			continue;
1714d3bc815aSDave Chinner 
1715d3bc815aSDave Chinner 		xfs_vm_kill_delalloc_range(inode, block_offset,
1716d3bc815aSDave Chinner 					   block_offset + bh->b_size);
17174ab9ed57SDave Chinner 
17184ab9ed57SDave Chinner 		/*
17194ab9ed57SDave Chinner 		 * This buffer does not contain data anymore. make sure anyone
17204ab9ed57SDave Chinner 		 * who finds it knows that for certain.
17214ab9ed57SDave Chinner 		 */
17224ab9ed57SDave Chinner 		clear_buffer_delay(bh);
17234ab9ed57SDave Chinner 		clear_buffer_uptodate(bh);
17244ab9ed57SDave Chinner 		clear_buffer_mapped(bh);
17254ab9ed57SDave Chinner 		clear_buffer_new(bh);
17264ab9ed57SDave Chinner 		clear_buffer_dirty(bh);
1727c59d87c4SChristoph Hellwig 	}
1728c59d87c4SChristoph Hellwig 
1729d3bc815aSDave Chinner }
1730d3bc815aSDave Chinner 
1731d3bc815aSDave Chinner /*
1732d3bc815aSDave Chinner  * This used to call block_write_begin(), but it unlocks and releases the page
1733d3bc815aSDave Chinner  * on error, and we need that page to be able to punch stale delalloc blocks out
1734d3bc815aSDave Chinner  * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1735d3bc815aSDave Chinner  * the appropriate point.
1736d3bc815aSDave Chinner  */
1737c59d87c4SChristoph Hellwig STATIC int
1738c59d87c4SChristoph Hellwig xfs_vm_write_begin(
1739c59d87c4SChristoph Hellwig 	struct file		*file,
1740c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1741c59d87c4SChristoph Hellwig 	loff_t			pos,
1742c59d87c4SChristoph Hellwig 	unsigned		len,
1743c59d87c4SChristoph Hellwig 	unsigned		flags,
1744c59d87c4SChristoph Hellwig 	struct page		**pagep,
1745c59d87c4SChristoph Hellwig 	void			**fsdata)
1746c59d87c4SChristoph Hellwig {
1747d3bc815aSDave Chinner 	pgoff_t			index = pos >> PAGE_CACHE_SHIFT;
1748d3bc815aSDave Chinner 	struct page		*page;
1749d3bc815aSDave Chinner 	int			status;
1750c59d87c4SChristoph Hellwig 
1751d3bc815aSDave Chinner 	ASSERT(len <= PAGE_CACHE_SIZE);
1752d3bc815aSDave Chinner 
1753ad22c7a0SDave Chinner 	page = grab_cache_page_write_begin(mapping, index, flags);
1754d3bc815aSDave Chinner 	if (!page)
1755d3bc815aSDave Chinner 		return -ENOMEM;
1756d3bc815aSDave Chinner 
1757d3bc815aSDave Chinner 	status = __block_write_begin(page, pos, len, xfs_get_blocks);
1758d3bc815aSDave Chinner 	if (unlikely(status)) {
1759d3bc815aSDave Chinner 		struct inode	*inode = mapping->host;
176072ab70a1SDave Chinner 		size_t		isize = i_size_read(inode);
1761d3bc815aSDave Chinner 
1762d3bc815aSDave Chinner 		xfs_vm_write_failed(inode, page, pos, len);
1763d3bc815aSDave Chinner 		unlock_page(page);
1764d3bc815aSDave Chinner 
176572ab70a1SDave Chinner 		/*
176672ab70a1SDave Chinner 		 * If the write is beyond EOF, we only want to kill blocks
176772ab70a1SDave Chinner 		 * allocated in this write, not blocks that were previously
176872ab70a1SDave Chinner 		 * written successfully.
176972ab70a1SDave Chinner 		 */
177072ab70a1SDave Chinner 		if (pos + len > isize) {
177172ab70a1SDave Chinner 			ssize_t start = max_t(ssize_t, pos, isize);
177272ab70a1SDave Chinner 
177372ab70a1SDave Chinner 			truncate_pagecache_range(inode, start, pos + len);
177472ab70a1SDave Chinner 		}
1775d3bc815aSDave Chinner 
1776d3bc815aSDave Chinner 		page_cache_release(page);
1777d3bc815aSDave Chinner 		page = NULL;
1778c59d87c4SChristoph Hellwig 	}
1779c59d87c4SChristoph Hellwig 
1780d3bc815aSDave Chinner 	*pagep = page;
1781d3bc815aSDave Chinner 	return status;
1782d3bc815aSDave Chinner }
1783d3bc815aSDave Chinner 
1784d3bc815aSDave Chinner /*
1785aad3f375SDave Chinner  * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1786aad3f375SDave Chinner  * this specific write because they will never be written. Previous writes
1787aad3f375SDave Chinner  * beyond EOF where block allocation succeeded do not need to be trashed, so
1788aad3f375SDave Chinner  * only new blocks from this write should be trashed. For blocks within
1789aad3f375SDave Chinner  * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1790aad3f375SDave Chinner  * written with all the other valid data.
1791d3bc815aSDave Chinner  */
1792c59d87c4SChristoph Hellwig STATIC int
1793c59d87c4SChristoph Hellwig xfs_vm_write_end(
1794c59d87c4SChristoph Hellwig 	struct file		*file,
1795c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1796c59d87c4SChristoph Hellwig 	loff_t			pos,
1797c59d87c4SChristoph Hellwig 	unsigned		len,
1798c59d87c4SChristoph Hellwig 	unsigned		copied,
1799c59d87c4SChristoph Hellwig 	struct page		*page,
1800c59d87c4SChristoph Hellwig 	void			*fsdata)
1801c59d87c4SChristoph Hellwig {
1802c59d87c4SChristoph Hellwig 	int			ret;
1803c59d87c4SChristoph Hellwig 
1804d3bc815aSDave Chinner 	ASSERT(len <= PAGE_CACHE_SIZE);
1805d3bc815aSDave Chinner 
1806c59d87c4SChristoph Hellwig 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1807d3bc815aSDave Chinner 	if (unlikely(ret < len)) {
1808d3bc815aSDave Chinner 		struct inode	*inode = mapping->host;
1809d3bc815aSDave Chinner 		size_t		isize = i_size_read(inode);
1810d3bc815aSDave Chinner 		loff_t		to = pos + len;
1811d3bc815aSDave Chinner 
1812d3bc815aSDave Chinner 		if (to > isize) {
1813aad3f375SDave Chinner 			/* only kill blocks in this write beyond EOF */
1814aad3f375SDave Chinner 			if (pos > isize)
1815aad3f375SDave Chinner 				isize = pos;
1816d3bc815aSDave Chinner 			xfs_vm_kill_delalloc_range(inode, isize, to);
1817aad3f375SDave Chinner 			truncate_pagecache_range(inode, isize, to);
1818d3bc815aSDave Chinner 		}
1819d3bc815aSDave Chinner 	}
1820c59d87c4SChristoph Hellwig 	return ret;
1821c59d87c4SChristoph Hellwig }
1822c59d87c4SChristoph Hellwig 
1823c59d87c4SChristoph Hellwig STATIC sector_t
1824c59d87c4SChristoph Hellwig xfs_vm_bmap(
1825c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1826c59d87c4SChristoph Hellwig 	sector_t		block)
1827c59d87c4SChristoph Hellwig {
1828c59d87c4SChristoph Hellwig 	struct inode		*inode = (struct inode *)mapping->host;
1829c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
1830c59d87c4SChristoph Hellwig 
1831c59d87c4SChristoph Hellwig 	trace_xfs_vm_bmap(XFS_I(inode));
1832c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
18334bc1ea6bSDave Chinner 	filemap_write_and_wait(mapping);
1834c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1835c59d87c4SChristoph Hellwig 	return generic_block_bmap(mapping, block, xfs_get_blocks);
1836c59d87c4SChristoph Hellwig }
1837c59d87c4SChristoph Hellwig 
1838c59d87c4SChristoph Hellwig STATIC int
1839c59d87c4SChristoph Hellwig xfs_vm_readpage(
1840c59d87c4SChristoph Hellwig 	struct file		*unused,
1841c59d87c4SChristoph Hellwig 	struct page		*page)
1842c59d87c4SChristoph Hellwig {
1843c59d87c4SChristoph Hellwig 	return mpage_readpage(page, xfs_get_blocks);
1844c59d87c4SChristoph Hellwig }
1845c59d87c4SChristoph Hellwig 
1846c59d87c4SChristoph Hellwig STATIC int
1847c59d87c4SChristoph Hellwig xfs_vm_readpages(
1848c59d87c4SChristoph Hellwig 	struct file		*unused,
1849c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1850c59d87c4SChristoph Hellwig 	struct list_head	*pages,
1851c59d87c4SChristoph Hellwig 	unsigned		nr_pages)
1852c59d87c4SChristoph Hellwig {
1853c59d87c4SChristoph Hellwig 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1854c59d87c4SChristoph Hellwig }
1855c59d87c4SChristoph Hellwig 
185622e757a4SDave Chinner /*
185722e757a4SDave Chinner  * This is basically a copy of __set_page_dirty_buffers() with one
185822e757a4SDave Chinner  * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
185922e757a4SDave Chinner  * dirty, we'll never be able to clean them because we don't write buffers
186022e757a4SDave Chinner  * beyond EOF, and that means we can't invalidate pages that span EOF
186122e757a4SDave Chinner  * that have been marked dirty. Further, the dirty state can leak into
186222e757a4SDave Chinner  * the file interior if the file is extended, resulting in all sorts of
186322e757a4SDave Chinner  * bad things happening as the state does not match the underlying data.
186422e757a4SDave Chinner  *
186522e757a4SDave Chinner  * XXX: this really indicates that bufferheads in XFS need to die. Warts like
186622e757a4SDave Chinner  * this only exist because of bufferheads and how the generic code manages them.
186722e757a4SDave Chinner  */
186822e757a4SDave Chinner STATIC int
186922e757a4SDave Chinner xfs_vm_set_page_dirty(
187022e757a4SDave Chinner 	struct page		*page)
187122e757a4SDave Chinner {
187222e757a4SDave Chinner 	struct address_space	*mapping = page->mapping;
187322e757a4SDave Chinner 	struct inode		*inode = mapping->host;
187422e757a4SDave Chinner 	loff_t			end_offset;
187522e757a4SDave Chinner 	loff_t			offset;
187622e757a4SDave Chinner 	int			newly_dirty;
187722e757a4SDave Chinner 
187822e757a4SDave Chinner 	if (unlikely(!mapping))
187922e757a4SDave Chinner 		return !TestSetPageDirty(page);
188022e757a4SDave Chinner 
188122e757a4SDave Chinner 	end_offset = i_size_read(inode);
188222e757a4SDave Chinner 	offset = page_offset(page);
188322e757a4SDave Chinner 
188422e757a4SDave Chinner 	spin_lock(&mapping->private_lock);
188522e757a4SDave Chinner 	if (page_has_buffers(page)) {
188622e757a4SDave Chinner 		struct buffer_head *head = page_buffers(page);
188722e757a4SDave Chinner 		struct buffer_head *bh = head;
188822e757a4SDave Chinner 
188922e757a4SDave Chinner 		do {
189022e757a4SDave Chinner 			if (offset < end_offset)
189122e757a4SDave Chinner 				set_buffer_dirty(bh);
189222e757a4SDave Chinner 			bh = bh->b_this_page;
189322e757a4SDave Chinner 			offset += 1 << inode->i_blkbits;
189422e757a4SDave Chinner 		} while (bh != head);
189522e757a4SDave Chinner 	}
189622e757a4SDave Chinner 	newly_dirty = !TestSetPageDirty(page);
189722e757a4SDave Chinner 	spin_unlock(&mapping->private_lock);
189822e757a4SDave Chinner 
189922e757a4SDave Chinner 	if (newly_dirty) {
190022e757a4SDave Chinner 		/* sigh - __set_page_dirty() is static, so copy it here, too */
190122e757a4SDave Chinner 		unsigned long flags;
190222e757a4SDave Chinner 
190322e757a4SDave Chinner 		spin_lock_irqsave(&mapping->tree_lock, flags);
190422e757a4SDave Chinner 		if (page->mapping) {	/* Race with truncate? */
190522e757a4SDave Chinner 			WARN_ON_ONCE(!PageUptodate(page));
190622e757a4SDave Chinner 			account_page_dirtied(page, mapping);
190722e757a4SDave Chinner 			radix_tree_tag_set(&mapping->page_tree,
190822e757a4SDave Chinner 					page_index(page), PAGECACHE_TAG_DIRTY);
190922e757a4SDave Chinner 		}
191022e757a4SDave Chinner 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
191122e757a4SDave Chinner 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
191222e757a4SDave Chinner 	}
191322e757a4SDave Chinner 	return newly_dirty;
191422e757a4SDave Chinner }
191522e757a4SDave Chinner 
1916c59d87c4SChristoph Hellwig const struct address_space_operations xfs_address_space_operations = {
1917c59d87c4SChristoph Hellwig 	.readpage		= xfs_vm_readpage,
1918c59d87c4SChristoph Hellwig 	.readpages		= xfs_vm_readpages,
1919c59d87c4SChristoph Hellwig 	.writepage		= xfs_vm_writepage,
1920c59d87c4SChristoph Hellwig 	.writepages		= xfs_vm_writepages,
192122e757a4SDave Chinner 	.set_page_dirty		= xfs_vm_set_page_dirty,
1922c59d87c4SChristoph Hellwig 	.releasepage		= xfs_vm_releasepage,
1923c59d87c4SChristoph Hellwig 	.invalidatepage		= xfs_vm_invalidatepage,
1924c59d87c4SChristoph Hellwig 	.write_begin		= xfs_vm_write_begin,
1925c59d87c4SChristoph Hellwig 	.write_end		= xfs_vm_write_end,
1926c59d87c4SChristoph Hellwig 	.bmap			= xfs_vm_bmap,
1927c59d87c4SChristoph Hellwig 	.direct_IO		= xfs_vm_direct_IO,
1928c59d87c4SChristoph Hellwig 	.migratepage		= buffer_migrate_page,
1929c59d87c4SChristoph Hellwig 	.is_partially_uptodate  = block_is_partially_uptodate,
1930c59d87c4SChristoph Hellwig 	.error_remove_page	= generic_error_remove_page,
1931c59d87c4SChristoph Hellwig };
1932