xref: /linux/fs/xfs/xfs_aops.c (revision 3994fc48957520df061990ed22fff96023cfd953)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2c59d87c4SChristoph Hellwig /*
3c59d87c4SChristoph Hellwig  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
498c1a7c0SChristoph Hellwig  * Copyright (c) 2016-2018 Christoph Hellwig.
5c59d87c4SChristoph Hellwig  * All Rights Reserved.
6c59d87c4SChristoph Hellwig  */
7c59d87c4SChristoph Hellwig #include "xfs.h"
870a9883cSDave Chinner #include "xfs_shared.h"
9239880efSDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
12c59d87c4SChristoph Hellwig #include "xfs_mount.h"
13c59d87c4SChristoph Hellwig #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15281627dfSChristoph Hellwig #include "xfs_inode_item.h"
16c59d87c4SChristoph Hellwig #include "xfs_alloc.h"
17c59d87c4SChristoph Hellwig #include "xfs_error.h"
18c59d87c4SChristoph Hellwig #include "xfs_iomap.h"
19c59d87c4SChristoph Hellwig #include "xfs_trace.h"
20c59d87c4SChristoph Hellwig #include "xfs_bmap.h"
2168988114SDave Chinner #include "xfs_bmap_util.h"
22a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
23ef473667SDarrick J. Wong #include "xfs_reflink.h"
24c59d87c4SChristoph Hellwig #include <linux/writeback.h>
25c59d87c4SChristoph Hellwig 
26fbcc0256SDave Chinner /*
27fbcc0256SDave Chinner  * structure owned by writepages passed to individual writepage calls
28fbcc0256SDave Chinner  */
29fbcc0256SDave Chinner struct xfs_writepage_ctx {
30fbcc0256SDave Chinner 	struct xfs_bmbt_irec    imap;
31be225fecSChristoph Hellwig 	int			fork;
32d9252d52SBrian Foster 	unsigned int		data_seq;
33e666aa37SChristoph Hellwig 	unsigned int		cow_seq;
34fbcc0256SDave Chinner 	struct xfs_ioend	*ioend;
35fbcc0256SDave Chinner };
36fbcc0256SDave Chinner 
3720a90f58SRoss Zwisler struct block_device *
38c59d87c4SChristoph Hellwig xfs_find_bdev_for_inode(
39c59d87c4SChristoph Hellwig 	struct inode		*inode)
40c59d87c4SChristoph Hellwig {
41c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
42c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
43c59d87c4SChristoph Hellwig 
44c59d87c4SChristoph Hellwig 	if (XFS_IS_REALTIME_INODE(ip))
45c59d87c4SChristoph Hellwig 		return mp->m_rtdev_targp->bt_bdev;
46c59d87c4SChristoph Hellwig 	else
47c59d87c4SChristoph Hellwig 		return mp->m_ddev_targp->bt_bdev;
48c59d87c4SChristoph Hellwig }
49c59d87c4SChristoph Hellwig 
50486aff5eSDan Williams struct dax_device *
51486aff5eSDan Williams xfs_find_daxdev_for_inode(
52486aff5eSDan Williams 	struct inode		*inode)
53486aff5eSDan Williams {
54486aff5eSDan Williams 	struct xfs_inode	*ip = XFS_I(inode);
55486aff5eSDan Williams 	struct xfs_mount	*mp = ip->i_mount;
56486aff5eSDan Williams 
57486aff5eSDan Williams 	if (XFS_IS_REALTIME_INODE(ip))
58486aff5eSDan Williams 		return mp->m_rtdev_targp->bt_daxdev;
59486aff5eSDan Williams 	else
60486aff5eSDan Williams 		return mp->m_ddev_targp->bt_daxdev;
61486aff5eSDan Williams }
62486aff5eSDan Williams 
63ac8ee546SChristoph Hellwig static void
64ac8ee546SChristoph Hellwig xfs_finish_page_writeback(
65ac8ee546SChristoph Hellwig 	struct inode		*inode,
66ac8ee546SChristoph Hellwig 	struct bio_vec	*bvec,
67ac8ee546SChristoph Hellwig 	int			error)
68ac8ee546SChristoph Hellwig {
6982cb1417SChristoph Hellwig 	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);
7082cb1417SChristoph Hellwig 
71ac8ee546SChristoph Hellwig 	if (error) {
72ac8ee546SChristoph Hellwig 		SetPageError(bvec->bv_page);
73ac8ee546SChristoph Hellwig 		mapping_set_error(inode->i_mapping, -EIO);
74ac8ee546SChristoph Hellwig 	}
75ac8ee546SChristoph Hellwig 
7682cb1417SChristoph Hellwig 	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
7782cb1417SChristoph Hellwig 	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
7837992c18SDave Chinner 
7982cb1417SChristoph Hellwig 	if (!iop || atomic_dec_and_test(&iop->write_count))
808353a814SChristoph Hellwig 		end_page_writeback(bvec->bv_page);
8137992c18SDave Chinner }
8237992c18SDave Chinner 
8337992c18SDave Chinner /*
8437992c18SDave Chinner  * We're now finished for good with this ioend structure.  Update the page
8537992c18SDave Chinner  * state, release holds on bios, and finally free up memory.  Do not use the
8637992c18SDave Chinner  * ioend after this.
87c59d87c4SChristoph Hellwig  */
88c59d87c4SChristoph Hellwig STATIC void
89c59d87c4SChristoph Hellwig xfs_destroy_ioend(
900e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
910e51a8e1SChristoph Hellwig 	int			error)
92c59d87c4SChristoph Hellwig {
9337992c18SDave Chinner 	struct inode		*inode = ioend->io_inode;
948353a814SChristoph Hellwig 	struct bio		*bio = &ioend->io_inline_bio;
958353a814SChristoph Hellwig 	struct bio		*last = ioend->io_bio, *next;
968353a814SChristoph Hellwig 	u64			start = bio->bi_iter.bi_sector;
978353a814SChristoph Hellwig 	bool			quiet = bio_flagged(bio, BIO_QUIET);
98c59d87c4SChristoph Hellwig 
990e51a8e1SChristoph Hellwig 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
10037992c18SDave Chinner 		struct bio_vec	*bvec;
10137992c18SDave Chinner 		int		i;
1026dc4f100SMing Lei 		struct bvec_iter_all iter_all;
10337992c18SDave Chinner 
1040e51a8e1SChristoph Hellwig 		/*
1050e51a8e1SChristoph Hellwig 		 * For the last bio, bi_private points to the ioend, so we
1060e51a8e1SChristoph Hellwig 		 * need to explicitly end the iteration here.
1070e51a8e1SChristoph Hellwig 		 */
1080e51a8e1SChristoph Hellwig 		if (bio == last)
1090e51a8e1SChristoph Hellwig 			next = NULL;
1100e51a8e1SChristoph Hellwig 		else
11137992c18SDave Chinner 			next = bio->bi_private;
11237992c18SDave Chinner 
11337992c18SDave Chinner 		/* walk each page on bio, ending page IO on them */
1146dc4f100SMing Lei 		bio_for_each_segment_all(bvec, bio, i, iter_all)
11537992c18SDave Chinner 			xfs_finish_page_writeback(inode, bvec, error);
11637992c18SDave Chinner 		bio_put(bio);
117c59d87c4SChristoph Hellwig 	}
1188353a814SChristoph Hellwig 
1198353a814SChristoph Hellwig 	if (unlikely(error && !quiet)) {
1208353a814SChristoph Hellwig 		xfs_err_ratelimited(XFS_I(inode)->i_mount,
1218353a814SChristoph Hellwig 			"writeback error on sector %llu", start);
1228353a814SChristoph Hellwig 	}
123c59d87c4SChristoph Hellwig }
124c59d87c4SChristoph Hellwig 
125c59d87c4SChristoph Hellwig /*
126fc0063c4SChristoph Hellwig  * Fast and loose check if this write could update the on-disk inode size.
127fc0063c4SChristoph Hellwig  */
128fc0063c4SChristoph Hellwig static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
129fc0063c4SChristoph Hellwig {
130fc0063c4SChristoph Hellwig 	return ioend->io_offset + ioend->io_size >
131fc0063c4SChristoph Hellwig 		XFS_I(ioend->io_inode)->i_d.di_size;
132fc0063c4SChristoph Hellwig }
133fc0063c4SChristoph Hellwig 
134281627dfSChristoph Hellwig STATIC int
135281627dfSChristoph Hellwig xfs_setfilesize_trans_alloc(
136281627dfSChristoph Hellwig 	struct xfs_ioend	*ioend)
137281627dfSChristoph Hellwig {
138281627dfSChristoph Hellwig 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
139281627dfSChristoph Hellwig 	struct xfs_trans	*tp;
140281627dfSChristoph Hellwig 	int			error;
141281627dfSChristoph Hellwig 
1424df0f7f1SDave Chinner 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
1434df0f7f1SDave Chinner 				XFS_TRANS_NOFS, &tp);
144253f4911SChristoph Hellwig 	if (error)
145281627dfSChristoph Hellwig 		return error;
146281627dfSChristoph Hellwig 
147281627dfSChristoph Hellwig 	ioend->io_append_trans = tp;
148281627dfSChristoph Hellwig 
149281627dfSChristoph Hellwig 	/*
150437a255aSDave Chinner 	 * We may pass freeze protection with a transaction.  So tell lockdep
151d9457dc0SJan Kara 	 * we released it.
152d9457dc0SJan Kara 	 */
153bee9182dSOleg Nesterov 	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
154d9457dc0SJan Kara 	/*
155281627dfSChristoph Hellwig 	 * We hand off the transaction to the completion thread now, so
156281627dfSChristoph Hellwig 	 * clear the flag here.
157281627dfSChristoph Hellwig 	 */
1589070733bSMichal Hocko 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
159281627dfSChristoph Hellwig 	return 0;
160281627dfSChristoph Hellwig }
161281627dfSChristoph Hellwig 
162fc0063c4SChristoph Hellwig /*
1632813d682SChristoph Hellwig  * Update on-disk file size now that data has been written to disk.
164c59d87c4SChristoph Hellwig  */
165281627dfSChristoph Hellwig STATIC int
166e372843aSChristoph Hellwig __xfs_setfilesize(
1672ba66237SChristoph Hellwig 	struct xfs_inode	*ip,
1682ba66237SChristoph Hellwig 	struct xfs_trans	*tp,
1692ba66237SChristoph Hellwig 	xfs_off_t		offset,
1702ba66237SChristoph Hellwig 	size_t			size)
171c59d87c4SChristoph Hellwig {
172c59d87c4SChristoph Hellwig 	xfs_fsize_t		isize;
173c59d87c4SChristoph Hellwig 
174aa6bf01dSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1752ba66237SChristoph Hellwig 	isize = xfs_new_eof(ip, offset + size);
176281627dfSChristoph Hellwig 	if (!isize) {
177281627dfSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1784906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
179281627dfSChristoph Hellwig 		return 0;
180c59d87c4SChristoph Hellwig 	}
181c59d87c4SChristoph Hellwig 
1822ba66237SChristoph Hellwig 	trace_xfs_setfilesize(ip, offset, size);
183281627dfSChristoph Hellwig 
184281627dfSChristoph Hellwig 	ip->i_d.di_size = isize;
185281627dfSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
186281627dfSChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
187281627dfSChristoph Hellwig 
18870393313SChristoph Hellwig 	return xfs_trans_commit(tp);
189c59d87c4SChristoph Hellwig }
190c59d87c4SChristoph Hellwig 
191e372843aSChristoph Hellwig int
192e372843aSChristoph Hellwig xfs_setfilesize(
193e372843aSChristoph Hellwig 	struct xfs_inode	*ip,
194e372843aSChristoph Hellwig 	xfs_off_t		offset,
195e372843aSChristoph Hellwig 	size_t			size)
196e372843aSChristoph Hellwig {
197e372843aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
198e372843aSChristoph Hellwig 	struct xfs_trans	*tp;
199e372843aSChristoph Hellwig 	int			error;
200e372843aSChristoph Hellwig 
201e372843aSChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
202e372843aSChristoph Hellwig 	if (error)
203e372843aSChristoph Hellwig 		return error;
204e372843aSChristoph Hellwig 
205e372843aSChristoph Hellwig 	return __xfs_setfilesize(ip, tp, offset, size);
206e372843aSChristoph Hellwig }
207e372843aSChristoph Hellwig 
2082ba66237SChristoph Hellwig STATIC int
2092ba66237SChristoph Hellwig xfs_setfilesize_ioend(
2100e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
2110e51a8e1SChristoph Hellwig 	int			error)
2122ba66237SChristoph Hellwig {
2132ba66237SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
2142ba66237SChristoph Hellwig 	struct xfs_trans	*tp = ioend->io_append_trans;
2152ba66237SChristoph Hellwig 
2162ba66237SChristoph Hellwig 	/*
2172ba66237SChristoph Hellwig 	 * The transaction may have been allocated in the I/O submission thread,
2182ba66237SChristoph Hellwig 	 * thus we need to mark ourselves as being in a transaction manually.
2192ba66237SChristoph Hellwig 	 * Similarly for freeze protection.
2202ba66237SChristoph Hellwig 	 */
2219070733bSMichal Hocko 	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
222bee9182dSOleg Nesterov 	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
2232ba66237SChristoph Hellwig 
2245cb13dcdSZhaohongjiang 	/* we abort the update if there was an IO error */
2250e51a8e1SChristoph Hellwig 	if (error) {
2265cb13dcdSZhaohongjiang 		xfs_trans_cancel(tp);
2270e51a8e1SChristoph Hellwig 		return error;
2285cb13dcdSZhaohongjiang 	}
2295cb13dcdSZhaohongjiang 
230e372843aSChristoph Hellwig 	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
2312ba66237SChristoph Hellwig }
2322ba66237SChristoph Hellwig 
233c59d87c4SChristoph Hellwig /*
234c59d87c4SChristoph Hellwig  * IO write completion.
235c59d87c4SChristoph Hellwig  */
236c59d87c4SChristoph Hellwig STATIC void
237cb357bf3SDarrick J. Wong xfs_end_ioend(
238cb357bf3SDarrick J. Wong 	struct xfs_ioend	*ioend)
239c59d87c4SChristoph Hellwig {
240*3994fc48SDarrick J. Wong 	struct list_head	ioend_list;
241c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
242787eb485SChristoph Hellwig 	xfs_off_t		offset = ioend->io_offset;
243787eb485SChristoph Hellwig 	size_t			size = ioend->io_size;
2444e4cbee9SChristoph Hellwig 	int			error;
245c59d87c4SChristoph Hellwig 
246af055e37SBrian Foster 	/*
247787eb485SChristoph Hellwig 	 * Just clean up the in-memory strutures if the fs has been shut down.
248af055e37SBrian Foster 	 */
249787eb485SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
2500e51a8e1SChristoph Hellwig 		error = -EIO;
25143caeb18SDarrick J. Wong 		goto done;
25243caeb18SDarrick J. Wong 	}
25343caeb18SDarrick J. Wong 
25443caeb18SDarrick J. Wong 	/*
255787eb485SChristoph Hellwig 	 * Clean up any COW blocks on an I/O error.
256c59d87c4SChristoph Hellwig 	 */
2574e4cbee9SChristoph Hellwig 	error = blk_status_to_errno(ioend->io_bio->bi_status);
258787eb485SChristoph Hellwig 	if (unlikely(error)) {
259be225fecSChristoph Hellwig 		if (ioend->io_fork == XFS_COW_FORK)
260787eb485SChristoph Hellwig 			xfs_reflink_cancel_cow_range(ip, offset, size, true);
2615cb13dcdSZhaohongjiang 		goto done;
262787eb485SChristoph Hellwig 	}
263787eb485SChristoph Hellwig 
264787eb485SChristoph Hellwig 	/*
265787eb485SChristoph Hellwig 	 * Success: commit the COW or unwritten blocks if needed.
266787eb485SChristoph Hellwig 	 */
267be225fecSChristoph Hellwig 	if (ioend->io_fork == XFS_COW_FORK)
268787eb485SChristoph Hellwig 		error = xfs_reflink_end_cow(ip, offset, size);
269be225fecSChristoph Hellwig 	else if (ioend->io_state == XFS_EXT_UNWRITTEN)
270ee70daabSEryu Guan 		error = xfs_iomap_write_unwritten(ip, offset, size, false);
271be225fecSChristoph Hellwig 	else
272787eb485SChristoph Hellwig 		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
27384803fb7SChristoph Hellwig 
27404f658eeSChristoph Hellwig done:
275787eb485SChristoph Hellwig 	if (ioend->io_append_trans)
276787eb485SChristoph Hellwig 		error = xfs_setfilesize_ioend(ioend, error);
277*3994fc48SDarrick J. Wong 	list_replace_init(&ioend->io_list, &ioend_list);
2780e51a8e1SChristoph Hellwig 	xfs_destroy_ioend(ioend, error);
279*3994fc48SDarrick J. Wong 
280*3994fc48SDarrick J. Wong 	while (!list_empty(&ioend_list)) {
281*3994fc48SDarrick J. Wong 		ioend = list_first_entry(&ioend_list, struct xfs_ioend,
282*3994fc48SDarrick J. Wong 				io_list);
283*3994fc48SDarrick J. Wong 		list_del_init(&ioend->io_list);
284*3994fc48SDarrick J. Wong 		xfs_destroy_ioend(ioend, error);
285*3994fc48SDarrick J. Wong 	}
286*3994fc48SDarrick J. Wong }
287*3994fc48SDarrick J. Wong 
288*3994fc48SDarrick J. Wong /*
289*3994fc48SDarrick J. Wong  * We can merge two adjacent ioends if they have the same set of work to do.
290*3994fc48SDarrick J. Wong  */
291*3994fc48SDarrick J. Wong static bool
292*3994fc48SDarrick J. Wong xfs_ioend_can_merge(
293*3994fc48SDarrick J. Wong 	struct xfs_ioend	*ioend,
294*3994fc48SDarrick J. Wong 	int			ioend_error,
295*3994fc48SDarrick J. Wong 	struct xfs_ioend	*next)
296*3994fc48SDarrick J. Wong {
297*3994fc48SDarrick J. Wong 	int			next_error;
298*3994fc48SDarrick J. Wong 
299*3994fc48SDarrick J. Wong 	next_error = blk_status_to_errno(next->io_bio->bi_status);
300*3994fc48SDarrick J. Wong 	if (ioend_error != next_error)
301*3994fc48SDarrick J. Wong 		return false;
302*3994fc48SDarrick J. Wong 	if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
303*3994fc48SDarrick J. Wong 		return false;
304*3994fc48SDarrick J. Wong 	if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
305*3994fc48SDarrick J. Wong 	    (next->io_state == XFS_EXT_UNWRITTEN))
306*3994fc48SDarrick J. Wong 		return false;
307*3994fc48SDarrick J. Wong 	if (ioend->io_offset + ioend->io_size != next->io_offset)
308*3994fc48SDarrick J. Wong 		return false;
309*3994fc48SDarrick J. Wong 	if (xfs_ioend_is_append(ioend) != xfs_ioend_is_append(next))
310*3994fc48SDarrick J. Wong 		return false;
311*3994fc48SDarrick J. Wong 	return true;
312*3994fc48SDarrick J. Wong }
313*3994fc48SDarrick J. Wong 
314*3994fc48SDarrick J. Wong /* Try to merge adjacent completions. */
315*3994fc48SDarrick J. Wong STATIC void
316*3994fc48SDarrick J. Wong xfs_ioend_try_merge(
317*3994fc48SDarrick J. Wong 	struct xfs_ioend	*ioend,
318*3994fc48SDarrick J. Wong 	struct list_head	*more_ioends)
319*3994fc48SDarrick J. Wong {
320*3994fc48SDarrick J. Wong 	struct xfs_ioend	*next_ioend;
321*3994fc48SDarrick J. Wong 	int			ioend_error;
322*3994fc48SDarrick J. Wong 	int			error;
323*3994fc48SDarrick J. Wong 
324*3994fc48SDarrick J. Wong 	if (list_empty(more_ioends))
325*3994fc48SDarrick J. Wong 		return;
326*3994fc48SDarrick J. Wong 
327*3994fc48SDarrick J. Wong 	ioend_error = blk_status_to_errno(ioend->io_bio->bi_status);
328*3994fc48SDarrick J. Wong 
329*3994fc48SDarrick J. Wong 	while (!list_empty(more_ioends)) {
330*3994fc48SDarrick J. Wong 		next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
331*3994fc48SDarrick J. Wong 				io_list);
332*3994fc48SDarrick J. Wong 		if (!xfs_ioend_can_merge(ioend, ioend_error, next_ioend))
333*3994fc48SDarrick J. Wong 			break;
334*3994fc48SDarrick J. Wong 		list_move_tail(&next_ioend->io_list, &ioend->io_list);
335*3994fc48SDarrick J. Wong 		ioend->io_size += next_ioend->io_size;
336*3994fc48SDarrick J. Wong 		if (ioend->io_append_trans) {
337*3994fc48SDarrick J. Wong 			error = xfs_setfilesize_ioend(next_ioend, 1);
338*3994fc48SDarrick J. Wong 			ASSERT(error == 1);
339*3994fc48SDarrick J. Wong 		}
340*3994fc48SDarrick J. Wong 	}
341*3994fc48SDarrick J. Wong }
342*3994fc48SDarrick J. Wong 
343*3994fc48SDarrick J. Wong /* list_sort compare function for ioends */
344*3994fc48SDarrick J. Wong static int
345*3994fc48SDarrick J. Wong xfs_ioend_compare(
346*3994fc48SDarrick J. Wong 	void			*priv,
347*3994fc48SDarrick J. Wong 	struct list_head	*a,
348*3994fc48SDarrick J. Wong 	struct list_head	*b)
349*3994fc48SDarrick J. Wong {
350*3994fc48SDarrick J. Wong 	struct xfs_ioend	*ia;
351*3994fc48SDarrick J. Wong 	struct xfs_ioend	*ib;
352*3994fc48SDarrick J. Wong 
353*3994fc48SDarrick J. Wong 	ia = container_of(a, struct xfs_ioend, io_list);
354*3994fc48SDarrick J. Wong 	ib = container_of(b, struct xfs_ioend, io_list);
355*3994fc48SDarrick J. Wong 	if (ia->io_offset < ib->io_offset)
356*3994fc48SDarrick J. Wong 		return -1;
357*3994fc48SDarrick J. Wong 	else if (ia->io_offset > ib->io_offset)
358*3994fc48SDarrick J. Wong 		return 1;
359*3994fc48SDarrick J. Wong 	return 0;
360c59d87c4SChristoph Hellwig }
361c59d87c4SChristoph Hellwig 
362cb357bf3SDarrick J. Wong /* Finish all pending io completions. */
363cb357bf3SDarrick J. Wong void
364cb357bf3SDarrick J. Wong xfs_end_io(
365cb357bf3SDarrick J. Wong 	struct work_struct	*work)
366cb357bf3SDarrick J. Wong {
367cb357bf3SDarrick J. Wong 	struct xfs_inode	*ip;
368cb357bf3SDarrick J. Wong 	struct xfs_ioend	*ioend;
369cb357bf3SDarrick J. Wong 	struct list_head	completion_list;
370cb357bf3SDarrick J. Wong 	unsigned long		flags;
371cb357bf3SDarrick J. Wong 
372cb357bf3SDarrick J. Wong 	ip = container_of(work, struct xfs_inode, i_ioend_work);
373cb357bf3SDarrick J. Wong 
374cb357bf3SDarrick J. Wong 	spin_lock_irqsave(&ip->i_ioend_lock, flags);
375cb357bf3SDarrick J. Wong 	list_replace_init(&ip->i_ioend_list, &completion_list);
376cb357bf3SDarrick J. Wong 	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
377cb357bf3SDarrick J. Wong 
378*3994fc48SDarrick J. Wong 	list_sort(NULL, &completion_list, xfs_ioend_compare);
379*3994fc48SDarrick J. Wong 
380cb357bf3SDarrick J. Wong 	while (!list_empty(&completion_list)) {
381cb357bf3SDarrick J. Wong 		ioend = list_first_entry(&completion_list, struct xfs_ioend,
382cb357bf3SDarrick J. Wong 				io_list);
383cb357bf3SDarrick J. Wong 		list_del_init(&ioend->io_list);
384*3994fc48SDarrick J. Wong 		xfs_ioend_try_merge(ioend, &completion_list);
385cb357bf3SDarrick J. Wong 		xfs_end_ioend(ioend);
386cb357bf3SDarrick J. Wong 	}
387cb357bf3SDarrick J. Wong }
388cb357bf3SDarrick J. Wong 
3890e51a8e1SChristoph Hellwig STATIC void
3900e51a8e1SChristoph Hellwig xfs_end_bio(
3910e51a8e1SChristoph Hellwig 	struct bio		*bio)
392c59d87c4SChristoph Hellwig {
3930e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend = bio->bi_private;
394cb357bf3SDarrick J. Wong 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
395cb357bf3SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
396cb357bf3SDarrick J. Wong 	unsigned long		flags;
397c59d87c4SChristoph Hellwig 
398be225fecSChristoph Hellwig 	if (ioend->io_fork == XFS_COW_FORK ||
399cb357bf3SDarrick J. Wong 	    ioend->io_state == XFS_EXT_UNWRITTEN ||
400cb357bf3SDarrick J. Wong 	    ioend->io_append_trans != NULL) {
401cb357bf3SDarrick J. Wong 		spin_lock_irqsave(&ip->i_ioend_lock, flags);
402cb357bf3SDarrick J. Wong 		if (list_empty(&ip->i_ioend_list))
403cb357bf3SDarrick J. Wong 			WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
404cb357bf3SDarrick J. Wong 						 &ip->i_ioend_work));
405cb357bf3SDarrick J. Wong 		list_add_tail(&ioend->io_list, &ip->i_ioend_list);
406cb357bf3SDarrick J. Wong 		spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
407cb357bf3SDarrick J. Wong 	} else
4084e4cbee9SChristoph Hellwig 		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
409c59d87c4SChristoph Hellwig }
410c59d87c4SChristoph Hellwig 
411d9252d52SBrian Foster /*
412d9252d52SBrian Foster  * Fast revalidation of the cached writeback mapping. Return true if the current
413d9252d52SBrian Foster  * mapping is valid, false otherwise.
414d9252d52SBrian Foster  */
415d9252d52SBrian Foster static bool
416d9252d52SBrian Foster xfs_imap_valid(
417d9252d52SBrian Foster 	struct xfs_writepage_ctx	*wpc,
418d9252d52SBrian Foster 	struct xfs_inode		*ip,
419d9252d52SBrian Foster 	xfs_fileoff_t			offset_fsb)
420d9252d52SBrian Foster {
421d9252d52SBrian Foster 	if (offset_fsb < wpc->imap.br_startoff ||
422d9252d52SBrian Foster 	    offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
423d9252d52SBrian Foster 		return false;
424d9252d52SBrian Foster 	/*
425d9252d52SBrian Foster 	 * If this is a COW mapping, it is sufficient to check that the mapping
426d9252d52SBrian Foster 	 * covers the offset. Be careful to check this first because the caller
427d9252d52SBrian Foster 	 * can revalidate a COW mapping without updating the data seqno.
428d9252d52SBrian Foster 	 */
429be225fecSChristoph Hellwig 	if (wpc->fork == XFS_COW_FORK)
430d9252d52SBrian Foster 		return true;
431d9252d52SBrian Foster 
432d9252d52SBrian Foster 	/*
433d9252d52SBrian Foster 	 * This is not a COW mapping. Check the sequence number of the data fork
434d9252d52SBrian Foster 	 * because concurrent changes could have invalidated the extent. Check
435d9252d52SBrian Foster 	 * the COW fork because concurrent changes since the last time we
436d9252d52SBrian Foster 	 * checked (and found nothing at this offset) could have added
437d9252d52SBrian Foster 	 * overlapping blocks.
438d9252d52SBrian Foster 	 */
439d9252d52SBrian Foster 	if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
440d9252d52SBrian Foster 		return false;
441d9252d52SBrian Foster 	if (xfs_inode_has_cow_data(ip) &&
442d9252d52SBrian Foster 	    wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
443d9252d52SBrian Foster 		return false;
444d9252d52SBrian Foster 	return true;
445d9252d52SBrian Foster }
446d9252d52SBrian Foster 
4474ad765edSChristoph Hellwig /*
4484ad765edSChristoph Hellwig  * Pass in a dellalloc extent and convert it to real extents, return the real
4494ad765edSChristoph Hellwig  * extent that maps offset_fsb in wpc->imap.
4504ad765edSChristoph Hellwig  *
4514ad765edSChristoph Hellwig  * The current page is held locked so nothing could have removed the block
4527588cbeeSChristoph Hellwig  * backing offset_fsb, although it could have moved from the COW to the data
4537588cbeeSChristoph Hellwig  * fork by another thread.
4544ad765edSChristoph Hellwig  */
4554ad765edSChristoph Hellwig static int
4564ad765edSChristoph Hellwig xfs_convert_blocks(
4574ad765edSChristoph Hellwig 	struct xfs_writepage_ctx *wpc,
4584ad765edSChristoph Hellwig 	struct xfs_inode	*ip,
4594ad765edSChristoph Hellwig 	xfs_fileoff_t		offset_fsb)
4604ad765edSChristoph Hellwig {
4614ad765edSChristoph Hellwig 	int			error;
4624ad765edSChristoph Hellwig 
4634ad765edSChristoph Hellwig 	/*
4644ad765edSChristoph Hellwig 	 * Attempt to allocate whatever delalloc extent currently backs
4654ad765edSChristoph Hellwig 	 * offset_fsb and put the result into wpc->imap.  Allocate in a loop
4664ad765edSChristoph Hellwig 	 * because it may take several attempts to allocate real blocks for a
4674ad765edSChristoph Hellwig 	 * contiguous delalloc extent if free space is sufficiently fragmented.
4684ad765edSChristoph Hellwig 	 */
4694ad765edSChristoph Hellwig 	do {
4704ad765edSChristoph Hellwig 		error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
4714ad765edSChristoph Hellwig 				&wpc->imap, wpc->fork == XFS_COW_FORK ?
4724ad765edSChristoph Hellwig 					&wpc->cow_seq : &wpc->data_seq);
4734ad765edSChristoph Hellwig 		if (error)
4744ad765edSChristoph Hellwig 			return error;
4754ad765edSChristoph Hellwig 	} while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
4764ad765edSChristoph Hellwig 
4774ad765edSChristoph Hellwig 	return 0;
4784ad765edSChristoph Hellwig }
4794ad765edSChristoph Hellwig 
480c59d87c4SChristoph Hellwig STATIC int
481c59d87c4SChristoph Hellwig xfs_map_blocks(
4825c665e5bSChristoph Hellwig 	struct xfs_writepage_ctx *wpc,
483c59d87c4SChristoph Hellwig 	struct inode		*inode,
4845c665e5bSChristoph Hellwig 	loff_t			offset)
485c59d87c4SChristoph Hellwig {
486c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
487c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
48893407472SFabian Frederick 	ssize_t			count = i_blocksize(inode);
489b4e29032SChristoph Hellwig 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
490b4e29032SChristoph Hellwig 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
491e666aa37SChristoph Hellwig 	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
4925c665e5bSChristoph Hellwig 	struct xfs_bmbt_irec	imap;
493060d4eaaSChristoph Hellwig 	struct xfs_iext_cursor	icur;
4947588cbeeSChristoph Hellwig 	int			retries = 0;
495c59d87c4SChristoph Hellwig 	int			error = 0;
496c59d87c4SChristoph Hellwig 
497d9252d52SBrian Foster 	if (XFS_FORCED_SHUTDOWN(mp))
498d9252d52SBrian Foster 		return -EIO;
499d9252d52SBrian Foster 
500889c65b3SChristoph Hellwig 	/*
501889c65b3SChristoph Hellwig 	 * COW fork blocks can overlap data fork blocks even if the blocks
502889c65b3SChristoph Hellwig 	 * aren't shared.  COW I/O always takes precedent, so we must always
503889c65b3SChristoph Hellwig 	 * check for overlap on reflink inodes unless the mapping is already a
504e666aa37SChristoph Hellwig 	 * COW one, or the COW fork hasn't changed from the last time we looked
505e666aa37SChristoph Hellwig 	 * at it.
506e666aa37SChristoph Hellwig 	 *
507e666aa37SChristoph Hellwig 	 * It's safe to check the COW fork if_seq here without the ILOCK because
508e666aa37SChristoph Hellwig 	 * we've indirectly protected against concurrent updates: writeback has
509e666aa37SChristoph Hellwig 	 * the page locked, which prevents concurrent invalidations by reflink
510e666aa37SChristoph Hellwig 	 * and directio and prevents concurrent buffered writes to the same
511e666aa37SChristoph Hellwig 	 * page.  Changes to if_seq always happen under i_lock, which protects
512e666aa37SChristoph Hellwig 	 * against concurrent updates and provides a memory barrier on the way
513e666aa37SChristoph Hellwig 	 * out that ensures that we always see the current value.
514889c65b3SChristoph Hellwig 	 */
515d9252d52SBrian Foster 	if (xfs_imap_valid(wpc, ip, offset_fsb))
516889c65b3SChristoph Hellwig 		return 0;
517889c65b3SChristoph Hellwig 
518889c65b3SChristoph Hellwig 	/*
519889c65b3SChristoph Hellwig 	 * If we don't have a valid map, now it's time to get a new one for this
520889c65b3SChristoph Hellwig 	 * offset.  This will convert delayed allocations (including COW ones)
521889c65b3SChristoph Hellwig 	 * into real extents.  If we return without a valid map, it means we
522889c65b3SChristoph Hellwig 	 * landed in a hole and we skip the block.
523889c65b3SChristoph Hellwig 	 */
5247588cbeeSChristoph Hellwig retry:
525c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
526c59d87c4SChristoph Hellwig 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
527c59d87c4SChristoph Hellwig 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
528060d4eaaSChristoph Hellwig 
529060d4eaaSChristoph Hellwig 	/*
530060d4eaaSChristoph Hellwig 	 * Check if this is offset is covered by a COW extents, and if yes use
531060d4eaaSChristoph Hellwig 	 * it directly instead of looking up anything in the data fork.
532060d4eaaSChristoph Hellwig 	 */
53351d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip) &&
534e666aa37SChristoph Hellwig 	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
535e666aa37SChristoph Hellwig 		cow_fsb = imap.br_startoff;
536e666aa37SChristoph Hellwig 	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
5372ba090d5SChristoph Hellwig 		wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
5385c665e5bSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
539be225fecSChristoph Hellwig 
540be225fecSChristoph Hellwig 		wpc->fork = XFS_COW_FORK;
5415c665e5bSChristoph Hellwig 		goto allocate_blocks;
5425c665e5bSChristoph Hellwig 	}
5435c665e5bSChristoph Hellwig 
5445c665e5bSChristoph Hellwig 	/*
545d9252d52SBrian Foster 	 * No COW extent overlap. Revalidate now that we may have updated
546d9252d52SBrian Foster 	 * ->cow_seq. If the data mapping is still valid, we're done.
5475c665e5bSChristoph Hellwig 	 */
548d9252d52SBrian Foster 	if (xfs_imap_valid(wpc, ip, offset_fsb)) {
5495c665e5bSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
5505c665e5bSChristoph Hellwig 		return 0;
5515c665e5bSChristoph Hellwig 	}
5525c665e5bSChristoph Hellwig 
5535c665e5bSChristoph Hellwig 	/*
5545c665e5bSChristoph Hellwig 	 * If we don't have a valid map, now it's time to get a new one for this
5555c665e5bSChristoph Hellwig 	 * offset.  This will convert delayed allocations (including COW ones)
5565c665e5bSChristoph Hellwig 	 * into real extents.
5575c665e5bSChristoph Hellwig 	 */
5583345746eSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
5593345746eSChristoph Hellwig 		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
560d9252d52SBrian Foster 	wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
561c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
562c59d87c4SChristoph Hellwig 
563be225fecSChristoph Hellwig 	wpc->fork = XFS_DATA_FORK;
564be225fecSChristoph Hellwig 
56512df89f2SChristoph Hellwig 	/* landed in a hole or beyond EOF? */
5663345746eSChristoph Hellwig 	if (imap.br_startoff > offset_fsb) {
5673345746eSChristoph Hellwig 		imap.br_blockcount = imap.br_startoff - offset_fsb;
5685c665e5bSChristoph Hellwig 		imap.br_startoff = offset_fsb;
5695c665e5bSChristoph Hellwig 		imap.br_startblock = HOLESTARTBLOCK;
570be225fecSChristoph Hellwig 		imap.br_state = XFS_EXT_NORM;
57112df89f2SChristoph Hellwig 	}
57212df89f2SChristoph Hellwig 
573e666aa37SChristoph Hellwig 	/*
57412df89f2SChristoph Hellwig 	 * Truncate to the next COW extent if there is one.  This is the only
57512df89f2SChristoph Hellwig 	 * opportunity to do this because we can skip COW fork lookups for the
57612df89f2SChristoph Hellwig 	 * subsequent blocks in the mapping; however, the requirement to treat
57712df89f2SChristoph Hellwig 	 * the COW range separately remains.
578e666aa37SChristoph Hellwig 	 */
579e666aa37SChristoph Hellwig 	if (cow_fsb != NULLFILEOFF &&
580e666aa37SChristoph Hellwig 	    cow_fsb < imap.br_startoff + imap.br_blockcount)
581e666aa37SChristoph Hellwig 		imap.br_blockcount = cow_fsb - imap.br_startoff;
582e666aa37SChristoph Hellwig 
583be225fecSChristoph Hellwig 	/* got a delalloc extent? */
58412df89f2SChristoph Hellwig 	if (imap.br_startblock != HOLESTARTBLOCK &&
58512df89f2SChristoph Hellwig 	    isnullstartblock(imap.br_startblock))
5865c665e5bSChristoph Hellwig 		goto allocate_blocks;
587e2f6ad46SDave Chinner 
5885c665e5bSChristoph Hellwig 	wpc->imap = imap;
589be225fecSChristoph Hellwig 	trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
5905c665e5bSChristoph Hellwig 	return 0;
5915c665e5bSChristoph Hellwig allocate_blocks:
5924ad765edSChristoph Hellwig 	error = xfs_convert_blocks(wpc, ip, offset_fsb);
5937588cbeeSChristoph Hellwig 	if (error) {
5947588cbeeSChristoph Hellwig 		/*
5957588cbeeSChristoph Hellwig 		 * If we failed to find the extent in the COW fork we might have
5967588cbeeSChristoph Hellwig 		 * raced with a COW to data fork conversion or truncate.
5977588cbeeSChristoph Hellwig 		 * Restart the lookup to catch the extent in the data fork for
5987588cbeeSChristoph Hellwig 		 * the former case, but prevent additional retries to avoid
5997588cbeeSChristoph Hellwig 		 * looping forever for the latter case.
6007588cbeeSChristoph Hellwig 		 */
6017588cbeeSChristoph Hellwig 		if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
6027588cbeeSChristoph Hellwig 			goto retry;
6037588cbeeSChristoph Hellwig 		ASSERT(error != -EAGAIN);
6045c665e5bSChristoph Hellwig 		return error;
6057588cbeeSChristoph Hellwig 	}
6064ad765edSChristoph Hellwig 
6074ad765edSChristoph Hellwig 	/*
6084ad765edSChristoph Hellwig 	 * Due to merging the return real extent might be larger than the
6094ad765edSChristoph Hellwig 	 * original delalloc one.  Trim the return extent to the next COW
6104ad765edSChristoph Hellwig 	 * boundary again to force a re-lookup.
6114ad765edSChristoph Hellwig 	 */
6124ad765edSChristoph Hellwig 	if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
6134ad765edSChristoph Hellwig 	    cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
6144ad765edSChristoph Hellwig 		wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
6154ad765edSChristoph Hellwig 
6164ad765edSChristoph Hellwig 	ASSERT(wpc->imap.br_startoff <= offset_fsb);
6174ad765edSChristoph Hellwig 	ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
618be225fecSChristoph Hellwig 	trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
619c59d87c4SChristoph Hellwig 	return 0;
620c59d87c4SChristoph Hellwig }
621c59d87c4SChristoph Hellwig 
622c59d87c4SChristoph Hellwig /*
623bb18782aSDave Chinner  * Submit the bio for an ioend. We are passed an ioend with a bio attached to
624bb18782aSDave Chinner  * it, and we submit that bio. The ioend may be used for multiple bio
625bb18782aSDave Chinner  * submissions, so we only want to allocate an append transaction for the ioend
626bb18782aSDave Chinner  * once. In the case of multiple bio submission, each bio will take an IO
627bb18782aSDave Chinner  * reference to the ioend to ensure that the ioend completion is only done once
628bb18782aSDave Chinner  * all bios have been submitted and the ioend is really done.
6297bf7f352SDave Chinner  *
6307bf7f352SDave Chinner  * If @fail is non-zero, it means that we have a situation where some part of
6317bf7f352SDave Chinner  * the submission process has failed after we have marked paged for writeback
632bb18782aSDave Chinner  * and unlocked them. In this situation, we need to fail the bio and ioend
633bb18782aSDave Chinner  * rather than submit it to IO. This typically only happens on a filesystem
634bb18782aSDave Chinner  * shutdown.
635c59d87c4SChristoph Hellwig  */
636e10de372SDave Chinner STATIC int
637c59d87c4SChristoph Hellwig xfs_submit_ioend(
638c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
6390e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
640e10de372SDave Chinner 	int			status)
641c59d87c4SChristoph Hellwig {
6425eda4300SDarrick J. Wong 	/* Convert CoW extents to regular */
643be225fecSChristoph Hellwig 	if (!status && ioend->io_fork == XFS_COW_FORK) {
6444a2d01b0SDave Chinner 		/*
6454a2d01b0SDave Chinner 		 * Yuk. This can do memory allocation, but is not a
6464a2d01b0SDave Chinner 		 * transactional operation so everything is done in GFP_KERNEL
6474a2d01b0SDave Chinner 		 * context. That can deadlock, because we hold pages in
6484a2d01b0SDave Chinner 		 * writeback state and GFP_KERNEL allocations can block on them.
6494a2d01b0SDave Chinner 		 * Hence we must operate in nofs conditions here.
6504a2d01b0SDave Chinner 		 */
6514a2d01b0SDave Chinner 		unsigned nofs_flag;
6524a2d01b0SDave Chinner 
6534a2d01b0SDave Chinner 		nofs_flag = memalloc_nofs_save();
6545eda4300SDarrick J. Wong 		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
6555eda4300SDarrick J. Wong 				ioend->io_offset, ioend->io_size);
6564a2d01b0SDave Chinner 		memalloc_nofs_restore(nofs_flag);
6575eda4300SDarrick J. Wong 	}
6585eda4300SDarrick J. Wong 
659e10de372SDave Chinner 	/* Reserve log space if we might write beyond the on-disk inode size. */
660e10de372SDave Chinner 	if (!status &&
661be225fecSChristoph Hellwig 	    (ioend->io_fork == XFS_COW_FORK ||
662be225fecSChristoph Hellwig 	     ioend->io_state != XFS_EXT_UNWRITTEN) &&
663bb18782aSDave Chinner 	    xfs_ioend_is_append(ioend) &&
664bb18782aSDave Chinner 	    !ioend->io_append_trans)
665e10de372SDave Chinner 		status = xfs_setfilesize_trans_alloc(ioend);
666bb18782aSDave Chinner 
6670e51a8e1SChristoph Hellwig 	ioend->io_bio->bi_private = ioend;
6680e51a8e1SChristoph Hellwig 	ioend->io_bio->bi_end_io = xfs_end_bio;
6697637241eSJens Axboe 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
67070fd7614SChristoph Hellwig 
6717bf7f352SDave Chinner 	/*
6727bf7f352SDave Chinner 	 * If we are failing the IO now, just mark the ioend with an
6737bf7f352SDave Chinner 	 * error and finish it. This will run IO completion immediately
6747bf7f352SDave Chinner 	 * as there is only one reference to the ioend at this point in
6757bf7f352SDave Chinner 	 * time.
6767bf7f352SDave Chinner 	 */
677e10de372SDave Chinner 	if (status) {
6784e4cbee9SChristoph Hellwig 		ioend->io_bio->bi_status = errno_to_blk_status(status);
6790e51a8e1SChristoph Hellwig 		bio_endio(ioend->io_bio);
680e10de372SDave Chinner 		return status;
6817bf7f352SDave Chinner 	}
6827bf7f352SDave Chinner 
68331d7d58dSJens Axboe 	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
6844e49ea4aSMike Christie 	submit_bio(ioend->io_bio);
685e10de372SDave Chinner 	return 0;
686c59d87c4SChristoph Hellwig }
687c59d87c4SChristoph Hellwig 
6880e51a8e1SChristoph Hellwig static struct xfs_ioend *
6890e51a8e1SChristoph Hellwig xfs_alloc_ioend(
6900e51a8e1SChristoph Hellwig 	struct inode		*inode,
691be225fecSChristoph Hellwig 	int			fork,
692be225fecSChristoph Hellwig 	xfs_exntst_t		state,
6930e51a8e1SChristoph Hellwig 	xfs_off_t		offset,
6943faed667SChristoph Hellwig 	struct block_device	*bdev,
6953faed667SChristoph Hellwig 	sector_t		sector)
6960e51a8e1SChristoph Hellwig {
6970e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend;
6980e51a8e1SChristoph Hellwig 	struct bio		*bio;
6990e51a8e1SChristoph Hellwig 
700e292d7bcSKent Overstreet 	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
7013faed667SChristoph Hellwig 	bio_set_dev(bio, bdev);
7023faed667SChristoph Hellwig 	bio->bi_iter.bi_sector = sector;
7030e51a8e1SChristoph Hellwig 
7040e51a8e1SChristoph Hellwig 	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
7050e51a8e1SChristoph Hellwig 	INIT_LIST_HEAD(&ioend->io_list);
706be225fecSChristoph Hellwig 	ioend->io_fork = fork;
707be225fecSChristoph Hellwig 	ioend->io_state = state;
7080e51a8e1SChristoph Hellwig 	ioend->io_inode = inode;
7090e51a8e1SChristoph Hellwig 	ioend->io_size = 0;
7100e51a8e1SChristoph Hellwig 	ioend->io_offset = offset;
7110e51a8e1SChristoph Hellwig 	ioend->io_append_trans = NULL;
7120e51a8e1SChristoph Hellwig 	ioend->io_bio = bio;
7130e51a8e1SChristoph Hellwig 	return ioend;
7140e51a8e1SChristoph Hellwig }
7150e51a8e1SChristoph Hellwig 
7160e51a8e1SChristoph Hellwig /*
7170e51a8e1SChristoph Hellwig  * Allocate a new bio, and chain the old bio to the new one.
7180e51a8e1SChristoph Hellwig  *
7190e51a8e1SChristoph Hellwig  * Note that we have to do perform the chaining in this unintuitive order
7200e51a8e1SChristoph Hellwig  * so that the bi_private linkage is set up in the right direction for the
7210e51a8e1SChristoph Hellwig  * traversal in xfs_destroy_ioend().
7220e51a8e1SChristoph Hellwig  */
7230e51a8e1SChristoph Hellwig static void
7240e51a8e1SChristoph Hellwig xfs_chain_bio(
7250e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
7260e51a8e1SChristoph Hellwig 	struct writeback_control *wbc,
7273faed667SChristoph Hellwig 	struct block_device	*bdev,
7283faed667SChristoph Hellwig 	sector_t		sector)
7290e51a8e1SChristoph Hellwig {
7300e51a8e1SChristoph Hellwig 	struct bio *new;
7310e51a8e1SChristoph Hellwig 
7320e51a8e1SChristoph Hellwig 	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
7333faed667SChristoph Hellwig 	bio_set_dev(new, bdev);
7343faed667SChristoph Hellwig 	new->bi_iter.bi_sector = sector;
7350e51a8e1SChristoph Hellwig 	bio_chain(ioend->io_bio, new);
7360e51a8e1SChristoph Hellwig 	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
7377637241eSJens Axboe 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
73831d7d58dSJens Axboe 	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
7394e49ea4aSMike Christie 	submit_bio(ioend->io_bio);
7400e51a8e1SChristoph Hellwig 	ioend->io_bio = new;
7410e51a8e1SChristoph Hellwig }
7420e51a8e1SChristoph Hellwig 
743c59d87c4SChristoph Hellwig /*
7443faed667SChristoph Hellwig  * Test to see if we have an existing ioend structure that we could append to
7453faed667SChristoph Hellwig  * first, otherwise finish off the current ioend and start another.
746c59d87c4SChristoph Hellwig  */
747c59d87c4SChristoph Hellwig STATIC void
748c59d87c4SChristoph Hellwig xfs_add_to_ioend(
749c59d87c4SChristoph Hellwig 	struct inode		*inode,
750c59d87c4SChristoph Hellwig 	xfs_off_t		offset,
7513faed667SChristoph Hellwig 	struct page		*page,
75282cb1417SChristoph Hellwig 	struct iomap_page	*iop,
753e10de372SDave Chinner 	struct xfs_writepage_ctx *wpc,
754bb18782aSDave Chinner 	struct writeback_control *wbc,
755e10de372SDave Chinner 	struct list_head	*iolist)
756c59d87c4SChristoph Hellwig {
7573faed667SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
7583faed667SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
7593faed667SChristoph Hellwig 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
7603faed667SChristoph Hellwig 	unsigned		len = i_blocksize(inode);
7613faed667SChristoph Hellwig 	unsigned		poff = offset & (PAGE_SIZE - 1);
7623faed667SChristoph Hellwig 	sector_t		sector;
7633faed667SChristoph Hellwig 
7643faed667SChristoph Hellwig 	sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
7653faed667SChristoph Hellwig 		((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
7663faed667SChristoph Hellwig 
767be225fecSChristoph Hellwig 	if (!wpc->ioend ||
768be225fecSChristoph Hellwig 	    wpc->fork != wpc->ioend->io_fork ||
769be225fecSChristoph Hellwig 	    wpc->imap.br_state != wpc->ioend->io_state ||
7703faed667SChristoph Hellwig 	    sector != bio_end_sector(wpc->ioend->io_bio) ||
7710df61da8SDarrick J. Wong 	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
772e10de372SDave Chinner 		if (wpc->ioend)
773e10de372SDave Chinner 			list_add(&wpc->ioend->io_list, iolist);
774be225fecSChristoph Hellwig 		wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
775be225fecSChristoph Hellwig 				wpc->imap.br_state, offset, bdev, sector);
776c59d87c4SChristoph Hellwig 	}
777c59d87c4SChristoph Hellwig 
77807173c3eSMing Lei 	if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, true)) {
77982cb1417SChristoph Hellwig 		if (iop)
78082cb1417SChristoph Hellwig 			atomic_inc(&iop->write_count);
78182cb1417SChristoph Hellwig 		if (bio_full(wpc->ioend->io_bio))
7823faed667SChristoph Hellwig 			xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
78307173c3eSMing Lei 		bio_add_page(wpc->ioend->io_bio, page, len, poff);
78482cb1417SChristoph Hellwig 	}
785bb18782aSDave Chinner 
7863faed667SChristoph Hellwig 	wpc->ioend->io_size += len;
787c59d87c4SChristoph Hellwig }
788c59d87c4SChristoph Hellwig 
789c59d87c4SChristoph Hellwig STATIC void
790c59d87c4SChristoph Hellwig xfs_vm_invalidatepage(
791c59d87c4SChristoph Hellwig 	struct page		*page,
792d47992f8SLukas Czerner 	unsigned int		offset,
793d47992f8SLukas Czerner 	unsigned int		length)
794c59d87c4SChristoph Hellwig {
79582cb1417SChristoph Hellwig 	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
79682cb1417SChristoph Hellwig 	iomap_invalidatepage(page, offset, length);
797c59d87c4SChristoph Hellwig }
798c59d87c4SChristoph Hellwig 
799c59d87c4SChristoph Hellwig /*
80082cb1417SChristoph Hellwig  * If the page has delalloc blocks on it, we need to punch them out before we
801c59d87c4SChristoph Hellwig  * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
80282cb1417SChristoph Hellwig  * inode that can trip up a later direct I/O read operation on the same region.
803c59d87c4SChristoph Hellwig  *
80482cb1417SChristoph Hellwig  * We prevent this by truncating away the delalloc regions on the page.  Because
80582cb1417SChristoph Hellwig  * they are delalloc, we can do this without needing a transaction. Indeed - if
80682cb1417SChristoph Hellwig  * we get ENOSPC errors, we have to be able to do this truncation without a
80782cb1417SChristoph Hellwig  * transaction as there is no space left for block reservation (typically why we
80882cb1417SChristoph Hellwig  * see a ENOSPC in writeback).
809c59d87c4SChristoph Hellwig  */
810c59d87c4SChristoph Hellwig STATIC void
811c59d87c4SChristoph Hellwig xfs_aops_discard_page(
812c59d87c4SChristoph Hellwig 	struct page		*page)
813c59d87c4SChristoph Hellwig {
814c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
815c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
81603625721SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
817c59d87c4SChristoph Hellwig 	loff_t			offset = page_offset(page);
81803625721SChristoph Hellwig 	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
81903625721SChristoph Hellwig 	int			error;
820c59d87c4SChristoph Hellwig 
82103625721SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
822c59d87c4SChristoph Hellwig 		goto out_invalidate;
823c59d87c4SChristoph Hellwig 
82403625721SChristoph Hellwig 	xfs_alert(mp,
825c9690043SDarrick J. Wong 		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
826c59d87c4SChristoph Hellwig 			page, ip->i_ino, offset);
827c59d87c4SChristoph Hellwig 
82803625721SChristoph Hellwig 	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
82903625721SChristoph Hellwig 			PAGE_SIZE / i_blocksize(inode));
83003625721SChristoph Hellwig 	if (error && !XFS_FORCED_SHUTDOWN(mp))
83103625721SChristoph Hellwig 		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
832c59d87c4SChristoph Hellwig out_invalidate:
83309cbfeafSKirill A. Shutemov 	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
834c59d87c4SChristoph Hellwig }
835c59d87c4SChristoph Hellwig 
836c59d87c4SChristoph Hellwig /*
837e10de372SDave Chinner  * We implement an immediate ioend submission policy here to avoid needing to
838e10de372SDave Chinner  * chain multiple ioends and hence nest mempool allocations which can violate
839e10de372SDave Chinner  * forward progress guarantees we need to provide. The current ioend we are
84082cb1417SChristoph Hellwig  * adding blocks to is cached on the writepage context, and if the new block
841e10de372SDave Chinner  * does not append to the cached ioend it will create a new ioend and cache that
842e10de372SDave Chinner  * instead.
843e10de372SDave Chinner  *
844e10de372SDave Chinner  * If a new ioend is created and cached, the old ioend is returned and queued
845e10de372SDave Chinner  * locally for submission once the entire page is processed or an error has been
846e10de372SDave Chinner  * detected.  While ioends are submitted immediately after they are completed,
847e10de372SDave Chinner  * batching optimisations are provided by higher level block plugging.
848e10de372SDave Chinner  *
849e10de372SDave Chinner  * At the end of a writeback pass, there will be a cached ioend remaining on the
850e10de372SDave Chinner  * writepage context that the caller will need to submit.
851e10de372SDave Chinner  */
852bfce7d2eSDave Chinner static int
853bfce7d2eSDave Chinner xfs_writepage_map(
854bfce7d2eSDave Chinner 	struct xfs_writepage_ctx *wpc,
855e10de372SDave Chinner 	struct writeback_control *wbc,
856bfce7d2eSDave Chinner 	struct inode		*inode,
857bfce7d2eSDave Chinner 	struct page		*page,
858c8ce540dSDarrick J. Wong 	uint64_t		end_offset)
859bfce7d2eSDave Chinner {
860e10de372SDave Chinner 	LIST_HEAD(submit_list);
86182cb1417SChristoph Hellwig 	struct iomap_page	*iop = to_iomap_page(page);
86282cb1417SChristoph Hellwig 	unsigned		len = i_blocksize(inode);
863e10de372SDave Chinner 	struct xfs_ioend	*ioend, *next;
8646a4c9501SChristoph Hellwig 	uint64_t		file_offset;	/* file offset of page */
86582cb1417SChristoph Hellwig 	int			error = 0, count = 0, i;
866bfce7d2eSDave Chinner 
86782cb1417SChristoph Hellwig 	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
86882cb1417SChristoph Hellwig 	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
869ac8ee546SChristoph Hellwig 
870e2f6ad46SDave Chinner 	/*
87182cb1417SChristoph Hellwig 	 * Walk through the page to find areas to write back. If we run off the
87282cb1417SChristoph Hellwig 	 * end of the current map or find the current map invalid, grab a new
87382cb1417SChristoph Hellwig 	 * one.
874e2f6ad46SDave Chinner 	 */
87582cb1417SChristoph Hellwig 	for (i = 0, file_offset = page_offset(page);
87682cb1417SChristoph Hellwig 	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
87782cb1417SChristoph Hellwig 	     i++, file_offset += len) {
87882cb1417SChristoph Hellwig 		if (iop && !test_bit(i, iop->uptodate))
879bfce7d2eSDave Chinner 			continue;
880bfce7d2eSDave Chinner 
8816a4c9501SChristoph Hellwig 		error = xfs_map_blocks(wpc, inode, file_offset);
882bfce7d2eSDave Chinner 		if (error)
883889c65b3SChristoph Hellwig 			break;
884be225fecSChristoph Hellwig 		if (wpc->imap.br_startblock == HOLESTARTBLOCK)
885ac8ee546SChristoph Hellwig 			continue;
88682cb1417SChristoph Hellwig 		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
8873faed667SChristoph Hellwig 				 &submit_list);
888bfce7d2eSDave Chinner 		count++;
889e2f6ad46SDave Chinner 	}
890bfce7d2eSDave Chinner 
891e10de372SDave Chinner 	ASSERT(wpc->ioend || list_empty(&submit_list));
8921b65d3ddSChristoph Hellwig 	ASSERT(PageLocked(page));
8931b65d3ddSChristoph Hellwig 	ASSERT(!PageWriteback(page));
894bfce7d2eSDave Chinner 
895bfce7d2eSDave Chinner 	/*
89682cb1417SChristoph Hellwig 	 * On error, we have to fail the ioend here because we may have set
89782cb1417SChristoph Hellwig 	 * pages under writeback, we have to make sure we run IO completion to
89882cb1417SChristoph Hellwig 	 * mark the error state of the IO appropriately, so we can't cancel the
89982cb1417SChristoph Hellwig 	 * ioend directly here.  That means we have to mark this page as under
90082cb1417SChristoph Hellwig 	 * writeback if we included any blocks from it in the ioend chain so
90182cb1417SChristoph Hellwig 	 * that completion treats it correctly.
902bfce7d2eSDave Chinner 	 *
903e10de372SDave Chinner 	 * If we didn't include the page in the ioend, the on error we can
904e10de372SDave Chinner 	 * simply discard and unlock it as there are no other users of the page
90582cb1417SChristoph Hellwig 	 * now.  The caller will still need to trigger submission of outstanding
90682cb1417SChristoph Hellwig 	 * ioends on the writepage context so they are treated correctly on
90782cb1417SChristoph Hellwig 	 * error.
908bfce7d2eSDave Chinner 	 */
9098e1f065bSChristoph Hellwig 	if (unlikely(error)) {
9108e1f065bSChristoph Hellwig 		if (!count) {
9118e1f065bSChristoph Hellwig 			xfs_aops_discard_page(page);
9128e1f065bSChristoph Hellwig 			ClearPageUptodate(page);
9138e1f065bSChristoph Hellwig 			unlock_page(page);
9148e1f065bSChristoph Hellwig 			goto done;
9158e1f065bSChristoph Hellwig 		}
9168e1f065bSChristoph Hellwig 
9171b65d3ddSChristoph Hellwig 		/*
9181b65d3ddSChristoph Hellwig 		 * If the page was not fully cleaned, we need to ensure that the
9191b65d3ddSChristoph Hellwig 		 * higher layers come back to it correctly.  That means we need
9201b65d3ddSChristoph Hellwig 		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
9211b65d3ddSChristoph Hellwig 		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
9221b65d3ddSChristoph Hellwig 		 * so another attempt to write this page in this writeback sweep
9231b65d3ddSChristoph Hellwig 		 * will be made.
9241b65d3ddSChristoph Hellwig 		 */
9251b65d3ddSChristoph Hellwig 		set_page_writeback_keepwrite(page);
9261b65d3ddSChristoph Hellwig 	} else {
9271b65d3ddSChristoph Hellwig 		clear_page_dirty_for_io(page);
9281b65d3ddSChristoph Hellwig 		set_page_writeback(page);
9291b65d3ddSChristoph Hellwig 	}
9308e1f065bSChristoph Hellwig 
9311b65d3ddSChristoph Hellwig 	unlock_page(page);
932e10de372SDave Chinner 
933e10de372SDave Chinner 	/*
934e10de372SDave Chinner 	 * Preserve the original error if there was one, otherwise catch
935e10de372SDave Chinner 	 * submission errors here and propagate into subsequent ioend
936e10de372SDave Chinner 	 * submissions.
937e10de372SDave Chinner 	 */
938e10de372SDave Chinner 	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
939e10de372SDave Chinner 		int error2;
940e10de372SDave Chinner 
941e10de372SDave Chinner 		list_del_init(&ioend->io_list);
942e10de372SDave Chinner 		error2 = xfs_submit_ioend(wbc, ioend, error);
943e10de372SDave Chinner 		if (error2 && !error)
944e10de372SDave Chinner 			error = error2;
945e10de372SDave Chinner 	}
946e10de372SDave Chinner 
9478e1f065bSChristoph Hellwig 	/*
94882cb1417SChristoph Hellwig 	 * We can end up here with no error and nothing to write only if we race
94982cb1417SChristoph Hellwig 	 * with a partial page truncate on a sub-page block sized filesystem.
9508e1f065bSChristoph Hellwig 	 */
9518e1f065bSChristoph Hellwig 	if (!count)
9528e1f065bSChristoph Hellwig 		end_page_writeback(page);
9538e1f065bSChristoph Hellwig done:
954bfce7d2eSDave Chinner 	mapping_set_error(page->mapping, error);
955bfce7d2eSDave Chinner 	return error;
956bfce7d2eSDave Chinner }
957bfce7d2eSDave Chinner 
958c59d87c4SChristoph Hellwig /*
959c59d87c4SChristoph Hellwig  * Write out a dirty page.
960c59d87c4SChristoph Hellwig  *
961c59d87c4SChristoph Hellwig  * For delalloc space on the page we need to allocate space and flush it.
962c59d87c4SChristoph Hellwig  * For unwritten space on the page we need to start the conversion to
963c59d87c4SChristoph Hellwig  * regular allocated space.
964c59d87c4SChristoph Hellwig  */
965c59d87c4SChristoph Hellwig STATIC int
966fbcc0256SDave Chinner xfs_do_writepage(
967c59d87c4SChristoph Hellwig 	struct page		*page,
968fbcc0256SDave Chinner 	struct writeback_control *wbc,
969fbcc0256SDave Chinner 	void			*data)
970c59d87c4SChristoph Hellwig {
971fbcc0256SDave Chinner 	struct xfs_writepage_ctx *wpc = data;
972c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
973c59d87c4SChristoph Hellwig 	loff_t			offset;
974c8ce540dSDarrick J. Wong 	uint64_t              end_offset;
975ad68972aSDave Chinner 	pgoff_t                 end_index;
976c59d87c4SChristoph Hellwig 
97734097dfeSLukas Czerner 	trace_xfs_writepage(inode, page, 0, 0);
978c59d87c4SChristoph Hellwig 
979c59d87c4SChristoph Hellwig 	/*
980c59d87c4SChristoph Hellwig 	 * Refuse to write the page out if we are called from reclaim context.
981c59d87c4SChristoph Hellwig 	 *
982c59d87c4SChristoph Hellwig 	 * This avoids stack overflows when called from deeply used stacks in
983c59d87c4SChristoph Hellwig 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
984c59d87c4SChristoph Hellwig 	 * allow reclaim from kswapd as the stack usage there is relatively low.
985c59d87c4SChristoph Hellwig 	 *
98694054fa3SMel Gorman 	 * This should never happen except in the case of a VM regression so
98794054fa3SMel Gorman 	 * warn about it.
988c59d87c4SChristoph Hellwig 	 */
98994054fa3SMel Gorman 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
99094054fa3SMel Gorman 			PF_MEMALLOC))
991c59d87c4SChristoph Hellwig 		goto redirty;
992c59d87c4SChristoph Hellwig 
993c59d87c4SChristoph Hellwig 	/*
994c59d87c4SChristoph Hellwig 	 * Given that we do not allow direct reclaim to call us, we should
995c59d87c4SChristoph Hellwig 	 * never be called while in a filesystem transaction.
996c59d87c4SChristoph Hellwig 	 */
9979070733bSMichal Hocko 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
998c59d87c4SChristoph Hellwig 		goto redirty;
999c59d87c4SChristoph Hellwig 
10008695d27eSJie Liu 	/*
1001ad68972aSDave Chinner 	 * Is this page beyond the end of the file?
1002ad68972aSDave Chinner 	 *
10038695d27eSJie Liu 	 * The page index is less than the end_index, adjust the end_offset
10048695d27eSJie Liu 	 * to the highest offset that this page should represent.
10058695d27eSJie Liu 	 * -----------------------------------------------------
10068695d27eSJie Liu 	 * |			file mapping	       | <EOF> |
10078695d27eSJie Liu 	 * -----------------------------------------------------
10088695d27eSJie Liu 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
10098695d27eSJie Liu 	 * ^--------------------------------^----------|--------
10108695d27eSJie Liu 	 * |     desired writeback range    |      see else    |
10118695d27eSJie Liu 	 * ---------------------------------^------------------|
10128695d27eSJie Liu 	 */
1013ad68972aSDave Chinner 	offset = i_size_read(inode);
101409cbfeafSKirill A. Shutemov 	end_index = offset >> PAGE_SHIFT;
10158695d27eSJie Liu 	if (page->index < end_index)
101609cbfeafSKirill A. Shutemov 		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
10178695d27eSJie Liu 	else {
10188695d27eSJie Liu 		/*
10198695d27eSJie Liu 		 * Check whether the page to write out is beyond or straddles
10208695d27eSJie Liu 		 * i_size or not.
10218695d27eSJie Liu 		 * -------------------------------------------------------
10228695d27eSJie Liu 		 * |		file mapping		        | <EOF>  |
10238695d27eSJie Liu 		 * -------------------------------------------------------
10248695d27eSJie Liu 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
10258695d27eSJie Liu 		 * ^--------------------------------^-----------|---------
10268695d27eSJie Liu 		 * |				    |      Straddles     |
10278695d27eSJie Liu 		 * ---------------------------------^-----------|--------|
10288695d27eSJie Liu 		 */
102909cbfeafSKirill A. Shutemov 		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
10306b7a03f0SChristoph Hellwig 
10316b7a03f0SChristoph Hellwig 		/*
1032ff9a28f6SJan Kara 		 * Skip the page if it is fully outside i_size, e.g. due to a
1033ff9a28f6SJan Kara 		 * truncate operation that is in progress. We must redirty the
1034ff9a28f6SJan Kara 		 * page so that reclaim stops reclaiming it. Otherwise
1035ff9a28f6SJan Kara 		 * xfs_vm_releasepage() is called on it and gets confused.
10368695d27eSJie Liu 		 *
10378695d27eSJie Liu 		 * Note that the end_index is unsigned long, it would overflow
10388695d27eSJie Liu 		 * if the given offset is greater than 16TB on 32-bit system
10398695d27eSJie Liu 		 * and if we do check the page is fully outside i_size or not
10408695d27eSJie Liu 		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
10418695d27eSJie Liu 		 * will be evaluated to 0.  Hence this page will be redirtied
10428695d27eSJie Liu 		 * and be written out repeatedly which would result in an
10438695d27eSJie Liu 		 * infinite loop, the user program that perform this operation
10448695d27eSJie Liu 		 * will hang.  Instead, we can verify this situation by checking
10458695d27eSJie Liu 		 * if the page to write is totally beyond the i_size or if it's
10468695d27eSJie Liu 		 * offset is just equal to the EOF.
10476b7a03f0SChristoph Hellwig 		 */
10488695d27eSJie Liu 		if (page->index > end_index ||
10498695d27eSJie Liu 		    (page->index == end_index && offset_into_page == 0))
1050ff9a28f6SJan Kara 			goto redirty;
10516b7a03f0SChristoph Hellwig 
10526b7a03f0SChristoph Hellwig 		/*
10536b7a03f0SChristoph Hellwig 		 * The page straddles i_size.  It must be zeroed out on each
10546b7a03f0SChristoph Hellwig 		 * and every writepage invocation because it may be mmapped.
10556b7a03f0SChristoph Hellwig 		 * "A file is mapped in multiples of the page size.  For a file
10566b7a03f0SChristoph Hellwig 		 * that is not a multiple of the page size, the remaining
10576b7a03f0SChristoph Hellwig 		 * memory is zeroed when mapped, and writes to that region are
10586b7a03f0SChristoph Hellwig 		 * not written out to the file."
10596b7a03f0SChristoph Hellwig 		 */
106009cbfeafSKirill A. Shutemov 		zero_user_segment(page, offset_into_page, PAGE_SIZE);
10618695d27eSJie Liu 
10628695d27eSJie Liu 		/* Adjust the end_offset to the end of file */
10638695d27eSJie Liu 		end_offset = offset;
1064c59d87c4SChristoph Hellwig 	}
1065c59d87c4SChristoph Hellwig 
10662d5f4b5bSDarrick J. Wong 	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1067c59d87c4SChristoph Hellwig 
1068c59d87c4SChristoph Hellwig redirty:
1069c59d87c4SChristoph Hellwig 	redirty_page_for_writepage(wbc, page);
1070c59d87c4SChristoph Hellwig 	unlock_page(page);
1071c59d87c4SChristoph Hellwig 	return 0;
1072c59d87c4SChristoph Hellwig }
1073c59d87c4SChristoph Hellwig 
1074c59d87c4SChristoph Hellwig STATIC int
1075fbcc0256SDave Chinner xfs_vm_writepage(
1076fbcc0256SDave Chinner 	struct page		*page,
1077fbcc0256SDave Chinner 	struct writeback_control *wbc)
1078fbcc0256SDave Chinner {
1079be225fecSChristoph Hellwig 	struct xfs_writepage_ctx wpc = { };
1080fbcc0256SDave Chinner 	int			ret;
1081fbcc0256SDave Chinner 
1082fbcc0256SDave Chinner 	ret = xfs_do_writepage(page, wbc, &wpc);
1083e10de372SDave Chinner 	if (wpc.ioend)
1084e10de372SDave Chinner 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1085e10de372SDave Chinner 	return ret;
1086fbcc0256SDave Chinner }
1087fbcc0256SDave Chinner 
1088fbcc0256SDave Chinner STATIC int
1089c59d87c4SChristoph Hellwig xfs_vm_writepages(
1090c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1091c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
1092c59d87c4SChristoph Hellwig {
1093be225fecSChristoph Hellwig 	struct xfs_writepage_ctx wpc = { };
1094fbcc0256SDave Chinner 	int			ret;
1095fbcc0256SDave Chinner 
1096c59d87c4SChristoph Hellwig 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1097fbcc0256SDave Chinner 	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1098e10de372SDave Chinner 	if (wpc.ioend)
1099e10de372SDave Chinner 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1100e10de372SDave Chinner 	return ret;
1101c59d87c4SChristoph Hellwig }
1102c59d87c4SChristoph Hellwig 
11036e2608dfSDan Williams STATIC int
11046e2608dfSDan Williams xfs_dax_writepages(
11056e2608dfSDan Williams 	struct address_space	*mapping,
11066e2608dfSDan Williams 	struct writeback_control *wbc)
11076e2608dfSDan Williams {
11086e2608dfSDan Williams 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
11096e2608dfSDan Williams 	return dax_writeback_mapping_range(mapping,
11106e2608dfSDan Williams 			xfs_find_bdev_for_inode(mapping->host), wbc);
11116e2608dfSDan Williams }
11126e2608dfSDan Williams 
1113c59d87c4SChristoph Hellwig STATIC int
1114c59d87c4SChristoph Hellwig xfs_vm_releasepage(
1115c59d87c4SChristoph Hellwig 	struct page		*page,
1116c59d87c4SChristoph Hellwig 	gfp_t			gfp_mask)
1117c59d87c4SChristoph Hellwig {
111834097dfeSLukas Czerner 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
111982cb1417SChristoph Hellwig 	return iomap_releasepage(page, gfp_mask);
1120c59d87c4SChristoph Hellwig }
1121c59d87c4SChristoph Hellwig 
1122c59d87c4SChristoph Hellwig STATIC sector_t
1123c59d87c4SChristoph Hellwig xfs_vm_bmap(
1124c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1125c59d87c4SChristoph Hellwig 	sector_t		block)
1126c59d87c4SChristoph Hellwig {
1127b84e7722SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(mapping->host);
1128c59d87c4SChristoph Hellwig 
1129b84e7722SChristoph Hellwig 	trace_xfs_vm_bmap(ip);
1130db1327b1SDarrick J. Wong 
1131db1327b1SDarrick J. Wong 	/*
1132db1327b1SDarrick J. Wong 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1133793057e1SIngo Molnar 	 * bypasses the file system for actual I/O.  We really can't allow
1134db1327b1SDarrick J. Wong 	 * that on reflinks inodes, so we have to skip out here.  And yes,
1135eb5e248dSDarrick J. Wong 	 * 0 is the magic code for a bmap error.
1136eb5e248dSDarrick J. Wong 	 *
1137eb5e248dSDarrick J. Wong 	 * Since we don't pass back blockdev info, we can't return bmap
1138eb5e248dSDarrick J. Wong 	 * information for rt files either.
1139db1327b1SDarrick J. Wong 	 */
114066ae56a5SChristoph Hellwig 	if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1141db1327b1SDarrick J. Wong 		return 0;
1142b84e7722SChristoph Hellwig 	return iomap_bmap(mapping, block, &xfs_iomap_ops);
1143c59d87c4SChristoph Hellwig }
1144c59d87c4SChristoph Hellwig 
1145c59d87c4SChristoph Hellwig STATIC int
1146c59d87c4SChristoph Hellwig xfs_vm_readpage(
1147c59d87c4SChristoph Hellwig 	struct file		*unused,
1148c59d87c4SChristoph Hellwig 	struct page		*page)
1149c59d87c4SChristoph Hellwig {
1150121e213eSDave Chinner 	trace_xfs_vm_readpage(page->mapping->host, 1);
11518b2e77c1SChristoph Hellwig 	return iomap_readpage(page, &xfs_iomap_ops);
1152c59d87c4SChristoph Hellwig }
1153c59d87c4SChristoph Hellwig 
1154c59d87c4SChristoph Hellwig STATIC int
1155c59d87c4SChristoph Hellwig xfs_vm_readpages(
1156c59d87c4SChristoph Hellwig 	struct file		*unused,
1157c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1158c59d87c4SChristoph Hellwig 	struct list_head	*pages,
1159c59d87c4SChristoph Hellwig 	unsigned		nr_pages)
1160c59d87c4SChristoph Hellwig {
1161121e213eSDave Chinner 	trace_xfs_vm_readpages(mapping->host, nr_pages);
11628b2e77c1SChristoph Hellwig 	return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
116322e757a4SDave Chinner }
116422e757a4SDave Chinner 
116567482129SDarrick J. Wong static int
116667482129SDarrick J. Wong xfs_iomap_swapfile_activate(
116767482129SDarrick J. Wong 	struct swap_info_struct		*sis,
116867482129SDarrick J. Wong 	struct file			*swap_file,
116967482129SDarrick J. Wong 	sector_t			*span)
117067482129SDarrick J. Wong {
117167482129SDarrick J. Wong 	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
117267482129SDarrick J. Wong 	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
117367482129SDarrick J. Wong }
117467482129SDarrick J. Wong 
1175c59d87c4SChristoph Hellwig const struct address_space_operations xfs_address_space_operations = {
1176c59d87c4SChristoph Hellwig 	.readpage		= xfs_vm_readpage,
1177c59d87c4SChristoph Hellwig 	.readpages		= xfs_vm_readpages,
1178c59d87c4SChristoph Hellwig 	.writepage		= xfs_vm_writepage,
1179c59d87c4SChristoph Hellwig 	.writepages		= xfs_vm_writepages,
118082cb1417SChristoph Hellwig 	.set_page_dirty		= iomap_set_page_dirty,
1181c59d87c4SChristoph Hellwig 	.releasepage		= xfs_vm_releasepage,
1182c59d87c4SChristoph Hellwig 	.invalidatepage		= xfs_vm_invalidatepage,
1183c59d87c4SChristoph Hellwig 	.bmap			= xfs_vm_bmap,
11846e2608dfSDan Williams 	.direct_IO		= noop_direct_IO,
118582cb1417SChristoph Hellwig 	.migratepage		= iomap_migrate_page,
118682cb1417SChristoph Hellwig 	.is_partially_uptodate  = iomap_is_partially_uptodate,
1187c59d87c4SChristoph Hellwig 	.error_remove_page	= generic_error_remove_page,
118867482129SDarrick J. Wong 	.swap_activate		= xfs_iomap_swapfile_activate,
1189c59d87c4SChristoph Hellwig };
11906e2608dfSDan Williams 
11916e2608dfSDan Williams const struct address_space_operations xfs_dax_aops = {
11926e2608dfSDan Williams 	.writepages		= xfs_dax_writepages,
11936e2608dfSDan Williams 	.direct_IO		= noop_direct_IO,
11946e2608dfSDan Williams 	.set_page_dirty		= noop_set_page_dirty,
11956e2608dfSDan Williams 	.invalidatepage		= noop_invalidatepage,
119667482129SDarrick J. Wong 	.swap_activate		= xfs_iomap_swapfile_activate,
11976e2608dfSDan Williams };
1198