xref: /linux/fs/xfs/xfs_aops.c (revision 7588cbeec6df925ef6142a7e48762896c06007a8)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2c59d87c4SChristoph Hellwig /*
3c59d87c4SChristoph Hellwig  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
498c1a7c0SChristoph Hellwig  * Copyright (c) 2016-2018 Christoph Hellwig.
5c59d87c4SChristoph Hellwig  * All Rights Reserved.
6c59d87c4SChristoph Hellwig  */
7c59d87c4SChristoph Hellwig #include "xfs.h"
870a9883cSDave Chinner #include "xfs_shared.h"
9239880efSDave Chinner #include "xfs_format.h"
10239880efSDave Chinner #include "xfs_log_format.h"
11239880efSDave Chinner #include "xfs_trans_resv.h"
12c59d87c4SChristoph Hellwig #include "xfs_mount.h"
13c59d87c4SChristoph Hellwig #include "xfs_inode.h"
14239880efSDave Chinner #include "xfs_trans.h"
15281627dfSChristoph Hellwig #include "xfs_inode_item.h"
16c59d87c4SChristoph Hellwig #include "xfs_alloc.h"
17c59d87c4SChristoph Hellwig #include "xfs_error.h"
18c59d87c4SChristoph Hellwig #include "xfs_iomap.h"
19c59d87c4SChristoph Hellwig #include "xfs_trace.h"
20c59d87c4SChristoph Hellwig #include "xfs_bmap.h"
2168988114SDave Chinner #include "xfs_bmap_util.h"
22a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
23ef473667SDarrick J. Wong #include "xfs_reflink.h"
24c59d87c4SChristoph Hellwig #include <linux/writeback.h>
25c59d87c4SChristoph Hellwig 
26fbcc0256SDave Chinner /*
27fbcc0256SDave Chinner  * structure owned by writepages passed to individual writepage calls
28fbcc0256SDave Chinner  */
29fbcc0256SDave Chinner struct xfs_writepage_ctx {
30fbcc0256SDave Chinner 	struct xfs_bmbt_irec    imap;
31be225fecSChristoph Hellwig 	int			fork;
32d9252d52SBrian Foster 	unsigned int		data_seq;
33e666aa37SChristoph Hellwig 	unsigned int		cow_seq;
34fbcc0256SDave Chinner 	struct xfs_ioend	*ioend;
35fbcc0256SDave Chinner };
36fbcc0256SDave Chinner 
3720a90f58SRoss Zwisler struct block_device *
38c59d87c4SChristoph Hellwig xfs_find_bdev_for_inode(
39c59d87c4SChristoph Hellwig 	struct inode		*inode)
40c59d87c4SChristoph Hellwig {
41c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
42c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
43c59d87c4SChristoph Hellwig 
44c59d87c4SChristoph Hellwig 	if (XFS_IS_REALTIME_INODE(ip))
45c59d87c4SChristoph Hellwig 		return mp->m_rtdev_targp->bt_bdev;
46c59d87c4SChristoph Hellwig 	else
47c59d87c4SChristoph Hellwig 		return mp->m_ddev_targp->bt_bdev;
48c59d87c4SChristoph Hellwig }
49c59d87c4SChristoph Hellwig 
50486aff5eSDan Williams struct dax_device *
51486aff5eSDan Williams xfs_find_daxdev_for_inode(
52486aff5eSDan Williams 	struct inode		*inode)
53486aff5eSDan Williams {
54486aff5eSDan Williams 	struct xfs_inode	*ip = XFS_I(inode);
55486aff5eSDan Williams 	struct xfs_mount	*mp = ip->i_mount;
56486aff5eSDan Williams 
57486aff5eSDan Williams 	if (XFS_IS_REALTIME_INODE(ip))
58486aff5eSDan Williams 		return mp->m_rtdev_targp->bt_daxdev;
59486aff5eSDan Williams 	else
60486aff5eSDan Williams 		return mp->m_ddev_targp->bt_daxdev;
61486aff5eSDan Williams }
62486aff5eSDan Williams 
63ac8ee546SChristoph Hellwig static void
64ac8ee546SChristoph Hellwig xfs_finish_page_writeback(
65ac8ee546SChristoph Hellwig 	struct inode		*inode,
66ac8ee546SChristoph Hellwig 	struct bio_vec		*bvec,
67ac8ee546SChristoph Hellwig 	int			error)
68ac8ee546SChristoph Hellwig {
6982cb1417SChristoph Hellwig 	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);
7082cb1417SChristoph Hellwig 
71ac8ee546SChristoph Hellwig 	if (error) {
72ac8ee546SChristoph Hellwig 		SetPageError(bvec->bv_page);
73ac8ee546SChristoph Hellwig 		mapping_set_error(inode->i_mapping, -EIO);
74ac8ee546SChristoph Hellwig 	}
75ac8ee546SChristoph Hellwig 
7682cb1417SChristoph Hellwig 	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
7782cb1417SChristoph Hellwig 	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
7837992c18SDave Chinner 
7982cb1417SChristoph Hellwig 	if (!iop || atomic_dec_and_test(&iop->write_count))
808353a814SChristoph Hellwig 		end_page_writeback(bvec->bv_page);
8137992c18SDave Chinner }
8237992c18SDave Chinner 
8337992c18SDave Chinner /*
8437992c18SDave Chinner  * We're now finished for good with this ioend structure.  Update the page
8537992c18SDave Chinner  * state, release holds on bios, and finally free up memory.  Do not use the
8637992c18SDave Chinner  * ioend after this.
87c59d87c4SChristoph Hellwig  */
88c59d87c4SChristoph Hellwig STATIC void
89c59d87c4SChristoph Hellwig xfs_destroy_ioend(
900e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
910e51a8e1SChristoph Hellwig 	int			error)
92c59d87c4SChristoph Hellwig {
9337992c18SDave Chinner 	struct inode		*inode = ioend->io_inode;
948353a814SChristoph Hellwig 	struct bio		*bio = &ioend->io_inline_bio;
958353a814SChristoph Hellwig 	struct bio		*last = ioend->io_bio, *next;
968353a814SChristoph Hellwig 	u64			start = bio->bi_iter.bi_sector;
978353a814SChristoph Hellwig 	bool			quiet = bio_flagged(bio, BIO_QUIET);
98c59d87c4SChristoph Hellwig 
990e51a8e1SChristoph Hellwig 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
10037992c18SDave Chinner 		struct bio_vec	*bvec;
10137992c18SDave Chinner 		int		i;
10237992c18SDave Chinner 
1030e51a8e1SChristoph Hellwig 		/*
1040e51a8e1SChristoph Hellwig 		 * For the last bio, bi_private points to the ioend, so we
1050e51a8e1SChristoph Hellwig 		 * need to explicitly end the iteration here.
1060e51a8e1SChristoph Hellwig 		 */
1070e51a8e1SChristoph Hellwig 		if (bio == last)
1080e51a8e1SChristoph Hellwig 			next = NULL;
1090e51a8e1SChristoph Hellwig 		else
11037992c18SDave Chinner 			next = bio->bi_private;
11137992c18SDave Chinner 
11237992c18SDave Chinner 		/* walk each page on bio, ending page IO on them */
11382cb1417SChristoph Hellwig 		bio_for_each_segment_all(bvec, bio, i)
11437992c18SDave Chinner 			xfs_finish_page_writeback(inode, bvec, error);
11537992c18SDave Chinner 		bio_put(bio);
116c59d87c4SChristoph Hellwig 	}
1178353a814SChristoph Hellwig 
1188353a814SChristoph Hellwig 	if (unlikely(error && !quiet)) {
1198353a814SChristoph Hellwig 		xfs_err_ratelimited(XFS_I(inode)->i_mount,
1208353a814SChristoph Hellwig 			"writeback error on sector %llu", start);
1218353a814SChristoph Hellwig 	}
122c59d87c4SChristoph Hellwig }
123c59d87c4SChristoph Hellwig 
124c59d87c4SChristoph Hellwig /*
125fc0063c4SChristoph Hellwig  * Fast and loose check if this write could update the on-disk inode size.
126fc0063c4SChristoph Hellwig  */
127fc0063c4SChristoph Hellwig static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
128fc0063c4SChristoph Hellwig {
129fc0063c4SChristoph Hellwig 	return ioend->io_offset + ioend->io_size >
130fc0063c4SChristoph Hellwig 		XFS_I(ioend->io_inode)->i_d.di_size;
131fc0063c4SChristoph Hellwig }
132fc0063c4SChristoph Hellwig 
133281627dfSChristoph Hellwig STATIC int
134281627dfSChristoph Hellwig xfs_setfilesize_trans_alloc(
135281627dfSChristoph Hellwig 	struct xfs_ioend	*ioend)
136281627dfSChristoph Hellwig {
137281627dfSChristoph Hellwig 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
138281627dfSChristoph Hellwig 	struct xfs_trans	*tp;
139281627dfSChristoph Hellwig 	int			error;
140281627dfSChristoph Hellwig 
1414df0f7f1SDave Chinner 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
1424df0f7f1SDave Chinner 				XFS_TRANS_NOFS, &tp);
143253f4911SChristoph Hellwig 	if (error)
144281627dfSChristoph Hellwig 		return error;
145281627dfSChristoph Hellwig 
146281627dfSChristoph Hellwig 	ioend->io_append_trans = tp;
147281627dfSChristoph Hellwig 
148281627dfSChristoph Hellwig 	/*
149437a255aSDave Chinner 	 * We may pass freeze protection with a transaction.  So tell lockdep
150d9457dc0SJan Kara 	 * we released it.
151d9457dc0SJan Kara 	 */
152bee9182dSOleg Nesterov 	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
153d9457dc0SJan Kara 	/*
154281627dfSChristoph Hellwig 	 * We hand off the transaction to the completion thread now, so
155281627dfSChristoph Hellwig 	 * clear the flag here.
156281627dfSChristoph Hellwig 	 */
1579070733bSMichal Hocko 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
158281627dfSChristoph Hellwig 	return 0;
159281627dfSChristoph Hellwig }
160281627dfSChristoph Hellwig 
161fc0063c4SChristoph Hellwig /*
1622813d682SChristoph Hellwig  * Update on-disk file size now that data has been written to disk.
163c59d87c4SChristoph Hellwig  */
164281627dfSChristoph Hellwig STATIC int
165e372843aSChristoph Hellwig __xfs_setfilesize(
1662ba66237SChristoph Hellwig 	struct xfs_inode	*ip,
1672ba66237SChristoph Hellwig 	struct xfs_trans	*tp,
1682ba66237SChristoph Hellwig 	xfs_off_t		offset,
1692ba66237SChristoph Hellwig 	size_t			size)
170c59d87c4SChristoph Hellwig {
171c59d87c4SChristoph Hellwig 	xfs_fsize_t		isize;
172c59d87c4SChristoph Hellwig 
173aa6bf01dSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1742ba66237SChristoph Hellwig 	isize = xfs_new_eof(ip, offset + size);
175281627dfSChristoph Hellwig 	if (!isize) {
176281627dfSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1774906e215SChristoph Hellwig 		xfs_trans_cancel(tp);
178281627dfSChristoph Hellwig 		return 0;
179c59d87c4SChristoph Hellwig 	}
180c59d87c4SChristoph Hellwig 
1812ba66237SChristoph Hellwig 	trace_xfs_setfilesize(ip, offset, size);
182281627dfSChristoph Hellwig 
183281627dfSChristoph Hellwig 	ip->i_d.di_size = isize;
184281627dfSChristoph Hellwig 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
185281627dfSChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
186281627dfSChristoph Hellwig 
18770393313SChristoph Hellwig 	return xfs_trans_commit(tp);
188c59d87c4SChristoph Hellwig }
189c59d87c4SChristoph Hellwig 
190e372843aSChristoph Hellwig int
191e372843aSChristoph Hellwig xfs_setfilesize(
192e372843aSChristoph Hellwig 	struct xfs_inode	*ip,
193e372843aSChristoph Hellwig 	xfs_off_t		offset,
194e372843aSChristoph Hellwig 	size_t			size)
195e372843aSChristoph Hellwig {
196e372843aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
197e372843aSChristoph Hellwig 	struct xfs_trans	*tp;
198e372843aSChristoph Hellwig 	int			error;
199e372843aSChristoph Hellwig 
200e372843aSChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
201e372843aSChristoph Hellwig 	if (error)
202e372843aSChristoph Hellwig 		return error;
203e372843aSChristoph Hellwig 
204e372843aSChristoph Hellwig 	return __xfs_setfilesize(ip, tp, offset, size);
205e372843aSChristoph Hellwig }
206e372843aSChristoph Hellwig 
2072ba66237SChristoph Hellwig STATIC int
2082ba66237SChristoph Hellwig xfs_setfilesize_ioend(
2090e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
2100e51a8e1SChristoph Hellwig 	int			error)
2112ba66237SChristoph Hellwig {
2122ba66237SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
2132ba66237SChristoph Hellwig 	struct xfs_trans	*tp = ioend->io_append_trans;
2142ba66237SChristoph Hellwig 
2152ba66237SChristoph Hellwig 	/*
2162ba66237SChristoph Hellwig 	 * The transaction may have been allocated in the I/O submission thread,
2172ba66237SChristoph Hellwig 	 * thus we need to mark ourselves as being in a transaction manually.
2182ba66237SChristoph Hellwig 	 * Similarly for freeze protection.
2192ba66237SChristoph Hellwig 	 */
2209070733bSMichal Hocko 	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
221bee9182dSOleg Nesterov 	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
2222ba66237SChristoph Hellwig 
2235cb13dcdSZhaohongjiang 	/* we abort the update if there was an IO error */
2240e51a8e1SChristoph Hellwig 	if (error) {
2255cb13dcdSZhaohongjiang 		xfs_trans_cancel(tp);
2260e51a8e1SChristoph Hellwig 		return error;
2275cb13dcdSZhaohongjiang 	}
2285cb13dcdSZhaohongjiang 
229e372843aSChristoph Hellwig 	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
2302ba66237SChristoph Hellwig }
2312ba66237SChristoph Hellwig 
232c59d87c4SChristoph Hellwig /*
233c59d87c4SChristoph Hellwig  * IO write completion.
234c59d87c4SChristoph Hellwig  */
235c59d87c4SChristoph Hellwig STATIC void
236c59d87c4SChristoph Hellwig xfs_end_io(
237c59d87c4SChristoph Hellwig 	struct work_struct *work)
238c59d87c4SChristoph Hellwig {
2390e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend =
2400e51a8e1SChristoph Hellwig 		container_of(work, struct xfs_ioend, io_work);
241c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
242787eb485SChristoph Hellwig 	xfs_off_t		offset = ioend->io_offset;
243787eb485SChristoph Hellwig 	size_t			size = ioend->io_size;
2444e4cbee9SChristoph Hellwig 	int			error;
245c59d87c4SChristoph Hellwig 
246af055e37SBrian Foster 	/*
247787eb485SChristoph Hellwig 	 * Just clean up the in-memory strutures if the fs has been shut down.
248af055e37SBrian Foster 	 */
249787eb485SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
2500e51a8e1SChristoph Hellwig 		error = -EIO;
25143caeb18SDarrick J. Wong 		goto done;
25243caeb18SDarrick J. Wong 	}
25343caeb18SDarrick J. Wong 
25443caeb18SDarrick J. Wong 	/*
255787eb485SChristoph Hellwig 	 * Clean up any COW blocks on an I/O error.
256c59d87c4SChristoph Hellwig 	 */
2574e4cbee9SChristoph Hellwig 	error = blk_status_to_errno(ioend->io_bio->bi_status);
258787eb485SChristoph Hellwig 	if (unlikely(error)) {
259be225fecSChristoph Hellwig 		if (ioend->io_fork == XFS_COW_FORK)
260787eb485SChristoph Hellwig 			xfs_reflink_cancel_cow_range(ip, offset, size, true);
2615cb13dcdSZhaohongjiang 		goto done;
262787eb485SChristoph Hellwig 	}
263787eb485SChristoph Hellwig 
264787eb485SChristoph Hellwig 	/*
265787eb485SChristoph Hellwig 	 * Success: commit the COW or unwritten blocks if needed.
266787eb485SChristoph Hellwig 	 */
267be225fecSChristoph Hellwig 	if (ioend->io_fork == XFS_COW_FORK)
268787eb485SChristoph Hellwig 		error = xfs_reflink_end_cow(ip, offset, size);
269be225fecSChristoph Hellwig 	else if (ioend->io_state == XFS_EXT_UNWRITTEN)
270ee70daabSEryu Guan 		error = xfs_iomap_write_unwritten(ip, offset, size, false);
271be225fecSChristoph Hellwig 	else
272787eb485SChristoph Hellwig 		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
27384803fb7SChristoph Hellwig 
27404f658eeSChristoph Hellwig done:
275787eb485SChristoph Hellwig 	if (ioend->io_append_trans)
276787eb485SChristoph Hellwig 		error = xfs_setfilesize_ioend(ioend, error);
2770e51a8e1SChristoph Hellwig 	xfs_destroy_ioend(ioend, error);
278c59d87c4SChristoph Hellwig }
279c59d87c4SChristoph Hellwig 
2800e51a8e1SChristoph Hellwig STATIC void
2810e51a8e1SChristoph Hellwig xfs_end_bio(
2820e51a8e1SChristoph Hellwig 	struct bio		*bio)
283c59d87c4SChristoph Hellwig {
2840e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend = bio->bi_private;
2850e51a8e1SChristoph Hellwig 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
286c59d87c4SChristoph Hellwig 
287be225fecSChristoph Hellwig 	if (ioend->io_fork == XFS_COW_FORK ||
288be225fecSChristoph Hellwig 	    ioend->io_state == XFS_EXT_UNWRITTEN)
2890e51a8e1SChristoph Hellwig 		queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
2900e51a8e1SChristoph Hellwig 	else if (ioend->io_append_trans)
2910e51a8e1SChristoph Hellwig 		queue_work(mp->m_data_workqueue, &ioend->io_work);
2920e51a8e1SChristoph Hellwig 	else
2934e4cbee9SChristoph Hellwig 		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
294c59d87c4SChristoph Hellwig }
295c59d87c4SChristoph Hellwig 
296d9252d52SBrian Foster /*
297d9252d52SBrian Foster  * Fast revalidation of the cached writeback mapping. Return true if the current
298d9252d52SBrian Foster  * mapping is valid, false otherwise.
299d9252d52SBrian Foster  */
300d9252d52SBrian Foster static bool
301d9252d52SBrian Foster xfs_imap_valid(
302d9252d52SBrian Foster 	struct xfs_writepage_ctx	*wpc,
303d9252d52SBrian Foster 	struct xfs_inode		*ip,
304d9252d52SBrian Foster 	xfs_fileoff_t			offset_fsb)
305d9252d52SBrian Foster {
306d9252d52SBrian Foster 	if (offset_fsb < wpc->imap.br_startoff ||
307d9252d52SBrian Foster 	    offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
308d9252d52SBrian Foster 		return false;
309d9252d52SBrian Foster 	/*
310d9252d52SBrian Foster 	 * If this is a COW mapping, it is sufficient to check that the mapping
311d9252d52SBrian Foster 	 * covers the offset. Be careful to check this first because the caller
312d9252d52SBrian Foster 	 * can revalidate a COW mapping without updating the data seqno.
313d9252d52SBrian Foster 	 */
314be225fecSChristoph Hellwig 	if (wpc->fork == XFS_COW_FORK)
315d9252d52SBrian Foster 		return true;
316d9252d52SBrian Foster 
317d9252d52SBrian Foster 	/*
318d9252d52SBrian Foster 	 * This is not a COW mapping. Check the sequence number of the data fork
319d9252d52SBrian Foster 	 * because concurrent changes could have invalidated the extent. Check
320d9252d52SBrian Foster 	 * the COW fork because concurrent changes since the last time we
321d9252d52SBrian Foster 	 * checked (and found nothing at this offset) could have added
322d9252d52SBrian Foster 	 * overlapping blocks.
323d9252d52SBrian Foster 	 */
324d9252d52SBrian Foster 	if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
325d9252d52SBrian Foster 		return false;
326d9252d52SBrian Foster 	if (xfs_inode_has_cow_data(ip) &&
327d9252d52SBrian Foster 	    wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
328d9252d52SBrian Foster 		return false;
329d9252d52SBrian Foster 	return true;
330d9252d52SBrian Foster }
331d9252d52SBrian Foster 
3324ad765edSChristoph Hellwig /*
3334ad765edSChristoph Hellwig  * Pass in a dellalloc extent and convert it to real extents, return the real
3344ad765edSChristoph Hellwig  * extent that maps offset_fsb in wpc->imap.
3354ad765edSChristoph Hellwig  *
3364ad765edSChristoph Hellwig  * The current page is held locked so nothing could have removed the block
337*7588cbeeSChristoph Hellwig  * backing offset_fsb, although it could have moved from the COW to the data
338*7588cbeeSChristoph Hellwig  * fork by another thread.
3394ad765edSChristoph Hellwig  */
3404ad765edSChristoph Hellwig static int
3414ad765edSChristoph Hellwig xfs_convert_blocks(
3424ad765edSChristoph Hellwig 	struct xfs_writepage_ctx *wpc,
3434ad765edSChristoph Hellwig 	struct xfs_inode	*ip,
3444ad765edSChristoph Hellwig 	xfs_fileoff_t		offset_fsb)
3454ad765edSChristoph Hellwig {
3464ad765edSChristoph Hellwig 	int			error;
3474ad765edSChristoph Hellwig 
3484ad765edSChristoph Hellwig 	/*
3494ad765edSChristoph Hellwig 	 * Attempt to allocate whatever delalloc extent currently backs
3504ad765edSChristoph Hellwig 	 * offset_fsb and put the result into wpc->imap.  Allocate in a loop
3514ad765edSChristoph Hellwig 	 * because it may take several attempts to allocate real blocks for a
3524ad765edSChristoph Hellwig 	 * contiguous delalloc extent if free space is sufficiently fragmented.
3534ad765edSChristoph Hellwig 	 */
3544ad765edSChristoph Hellwig 	do {
3554ad765edSChristoph Hellwig 		error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
3564ad765edSChristoph Hellwig 				&wpc->imap, wpc->fork == XFS_COW_FORK ?
3574ad765edSChristoph Hellwig 					&wpc->cow_seq : &wpc->data_seq);
3584ad765edSChristoph Hellwig 		if (error)
3594ad765edSChristoph Hellwig 			return error;
3604ad765edSChristoph Hellwig 	} while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
3614ad765edSChristoph Hellwig 
3624ad765edSChristoph Hellwig 	return 0;
3634ad765edSChristoph Hellwig }
3644ad765edSChristoph Hellwig 
365c59d87c4SChristoph Hellwig STATIC int
366c59d87c4SChristoph Hellwig xfs_map_blocks(
3675c665e5bSChristoph Hellwig 	struct xfs_writepage_ctx *wpc,
368c59d87c4SChristoph Hellwig 	struct inode		*inode,
3695c665e5bSChristoph Hellwig 	loff_t			offset)
370c59d87c4SChristoph Hellwig {
371c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
372c59d87c4SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
37393407472SFabian Frederick 	ssize_t			count = i_blocksize(inode);
374b4e29032SChristoph Hellwig 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
375b4e29032SChristoph Hellwig 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
376e666aa37SChristoph Hellwig 	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
3775c665e5bSChristoph Hellwig 	struct xfs_bmbt_irec	imap;
378060d4eaaSChristoph Hellwig 	struct xfs_iext_cursor	icur;
379*7588cbeeSChristoph Hellwig 	int			retries = 0;
380c59d87c4SChristoph Hellwig 	int			error = 0;
381c59d87c4SChristoph Hellwig 
382d9252d52SBrian Foster 	if (XFS_FORCED_SHUTDOWN(mp))
383d9252d52SBrian Foster 		return -EIO;
384d9252d52SBrian Foster 
385889c65b3SChristoph Hellwig 	/*
386889c65b3SChristoph Hellwig 	 * COW fork blocks can overlap data fork blocks even if the blocks
387889c65b3SChristoph Hellwig 	 * aren't shared.  COW I/O always takes precedent, so we must always
388889c65b3SChristoph Hellwig 	 * check for overlap on reflink inodes unless the mapping is already a
389e666aa37SChristoph Hellwig 	 * COW one, or the COW fork hasn't changed from the last time we looked
390e666aa37SChristoph Hellwig 	 * at it.
391e666aa37SChristoph Hellwig 	 *
392e666aa37SChristoph Hellwig 	 * It's safe to check the COW fork if_seq here without the ILOCK because
393e666aa37SChristoph Hellwig 	 * we've indirectly protected against concurrent updates: writeback has
394e666aa37SChristoph Hellwig 	 * the page locked, which prevents concurrent invalidations by reflink
395e666aa37SChristoph Hellwig 	 * and directio and prevents concurrent buffered writes to the same
396e666aa37SChristoph Hellwig 	 * page.  Changes to if_seq always happen under i_lock, which protects
397e666aa37SChristoph Hellwig 	 * against concurrent updates and provides a memory barrier on the way
398e666aa37SChristoph Hellwig 	 * out that ensures that we always see the current value.
399889c65b3SChristoph Hellwig 	 */
400d9252d52SBrian Foster 	if (xfs_imap_valid(wpc, ip, offset_fsb))
401889c65b3SChristoph Hellwig 		return 0;
402889c65b3SChristoph Hellwig 
403889c65b3SChristoph Hellwig 	/*
404889c65b3SChristoph Hellwig 	 * If we don't have a valid map, now it's time to get a new one for this
405889c65b3SChristoph Hellwig 	 * offset.  This will convert delayed allocations (including COW ones)
406889c65b3SChristoph Hellwig 	 * into real extents.  If we return without a valid map, it means we
407889c65b3SChristoph Hellwig 	 * landed in a hole and we skip the block.
408889c65b3SChristoph Hellwig 	 */
409*7588cbeeSChristoph Hellwig retry:
410c59d87c4SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_SHARED);
411c59d87c4SChristoph Hellwig 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
412c59d87c4SChristoph Hellwig 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
413060d4eaaSChristoph Hellwig 
414060d4eaaSChristoph Hellwig 	/*
415060d4eaaSChristoph Hellwig 	 * Check if this is offset is covered by a COW extents, and if yes use
416060d4eaaSChristoph Hellwig 	 * it directly instead of looking up anything in the data fork.
417060d4eaaSChristoph Hellwig 	 */
41851d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip) &&
419e666aa37SChristoph Hellwig 	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
420e666aa37SChristoph Hellwig 		cow_fsb = imap.br_startoff;
421e666aa37SChristoph Hellwig 	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
4222ba090d5SChristoph Hellwig 		wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
4235c665e5bSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
424be225fecSChristoph Hellwig 
425be225fecSChristoph Hellwig 		wpc->fork = XFS_COW_FORK;
4265c665e5bSChristoph Hellwig 		goto allocate_blocks;
4275c665e5bSChristoph Hellwig 	}
4285c665e5bSChristoph Hellwig 
4295c665e5bSChristoph Hellwig 	/*
430d9252d52SBrian Foster 	 * No COW extent overlap. Revalidate now that we may have updated
431d9252d52SBrian Foster 	 * ->cow_seq. If the data mapping is still valid, we're done.
4325c665e5bSChristoph Hellwig 	 */
433d9252d52SBrian Foster 	if (xfs_imap_valid(wpc, ip, offset_fsb)) {
4345c665e5bSChristoph Hellwig 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
4355c665e5bSChristoph Hellwig 		return 0;
4365c665e5bSChristoph Hellwig 	}
4375c665e5bSChristoph Hellwig 
4385c665e5bSChristoph Hellwig 	/*
4395c665e5bSChristoph Hellwig 	 * If we don't have a valid map, now it's time to get a new one for this
4405c665e5bSChristoph Hellwig 	 * offset.  This will convert delayed allocations (including COW ones)
4415c665e5bSChristoph Hellwig 	 * into real extents.
4425c665e5bSChristoph Hellwig 	 */
4433345746eSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
4443345746eSChristoph Hellwig 		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
445d9252d52SBrian Foster 	wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
446c59d87c4SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
447c59d87c4SChristoph Hellwig 
448be225fecSChristoph Hellwig 	wpc->fork = XFS_DATA_FORK;
449be225fecSChristoph Hellwig 
4503345746eSChristoph Hellwig 	if (imap.br_startoff > offset_fsb) {
4513345746eSChristoph Hellwig 		/* landed in a hole or beyond EOF */
4523345746eSChristoph Hellwig 		imap.br_blockcount = imap.br_startoff - offset_fsb;
4535c665e5bSChristoph Hellwig 		imap.br_startoff = offset_fsb;
4545c665e5bSChristoph Hellwig 		imap.br_startblock = HOLESTARTBLOCK;
455be225fecSChristoph Hellwig 		imap.br_state = XFS_EXT_NORM;
456e2f6ad46SDave Chinner 	} else {
457e666aa37SChristoph Hellwig 		/*
458e666aa37SChristoph Hellwig 		 * Truncate to the next COW extent if there is one.  This is the
459e666aa37SChristoph Hellwig 		 * only opportunity to do this because we can skip COW fork
460e666aa37SChristoph Hellwig 		 * lookups for the subsequent blocks in the mapping; however,
461e666aa37SChristoph Hellwig 		 * the requirement to treat the COW range separately remains.
462e666aa37SChristoph Hellwig 		 */
463e666aa37SChristoph Hellwig 		if (cow_fsb != NULLFILEOFF &&
464e666aa37SChristoph Hellwig 		    cow_fsb < imap.br_startoff + imap.br_blockcount)
465e666aa37SChristoph Hellwig 			imap.br_blockcount = cow_fsb - imap.br_startoff;
466e666aa37SChristoph Hellwig 
467be225fecSChristoph Hellwig 		/* got a delalloc extent? */
468be225fecSChristoph Hellwig 		if (isnullstartblock(imap.br_startblock))
4695c665e5bSChristoph Hellwig 			goto allocate_blocks;
470c59d87c4SChristoph Hellwig 	}
471e2f6ad46SDave Chinner 
4725c665e5bSChristoph Hellwig 	wpc->imap = imap;
473be225fecSChristoph Hellwig 	trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
4745c665e5bSChristoph Hellwig 	return 0;
4755c665e5bSChristoph Hellwig allocate_blocks:
4764ad765edSChristoph Hellwig 	error = xfs_convert_blocks(wpc, ip, offset_fsb);
477*7588cbeeSChristoph Hellwig 	if (error) {
478*7588cbeeSChristoph Hellwig 		/*
479*7588cbeeSChristoph Hellwig 		 * If we failed to find the extent in the COW fork we might have
480*7588cbeeSChristoph Hellwig 		 * raced with a COW to data fork conversion or truncate.
481*7588cbeeSChristoph Hellwig 		 * Restart the lookup to catch the extent in the data fork for
482*7588cbeeSChristoph Hellwig 		 * the former case, but prevent additional retries to avoid
483*7588cbeeSChristoph Hellwig 		 * looping forever for the latter case.
484*7588cbeeSChristoph Hellwig 		 */
485*7588cbeeSChristoph Hellwig 		if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
486*7588cbeeSChristoph Hellwig 			goto retry;
487*7588cbeeSChristoph Hellwig 		ASSERT(error != -EAGAIN);
4885c665e5bSChristoph Hellwig 		return error;
489*7588cbeeSChristoph Hellwig 	}
4904ad765edSChristoph Hellwig 
4914ad765edSChristoph Hellwig 	/*
4924ad765edSChristoph Hellwig 	 * Due to merging the return real extent might be larger than the
4934ad765edSChristoph Hellwig 	 * original delalloc one.  Trim the return extent to the next COW
4944ad765edSChristoph Hellwig 	 * boundary again to force a re-lookup.
4954ad765edSChristoph Hellwig 	 */
4964ad765edSChristoph Hellwig 	if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
4974ad765edSChristoph Hellwig 	    cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
4984ad765edSChristoph Hellwig 		wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
4994ad765edSChristoph Hellwig 
5004ad765edSChristoph Hellwig 	ASSERT(wpc->imap.br_startoff <= offset_fsb);
5014ad765edSChristoph Hellwig 	ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
502be225fecSChristoph Hellwig 	trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
503c59d87c4SChristoph Hellwig 	return 0;
504c59d87c4SChristoph Hellwig }
505c59d87c4SChristoph Hellwig 
506c59d87c4SChristoph Hellwig /*
507bb18782aSDave Chinner  * Submit the bio for an ioend. We are passed an ioend with a bio attached to
508bb18782aSDave Chinner  * it, and we submit that bio. The ioend may be used for multiple bio
509bb18782aSDave Chinner  * submissions, so we only want to allocate an append transaction for the ioend
510bb18782aSDave Chinner  * once. In the case of multiple bio submission, each bio will take an IO
511bb18782aSDave Chinner  * reference to the ioend to ensure that the ioend completion is only done once
512bb18782aSDave Chinner  * all bios have been submitted and the ioend is really done.
5137bf7f352SDave Chinner  *
5147bf7f352SDave Chinner  * If @fail is non-zero, it means that we have a situation where some part of
5157bf7f352SDave Chinner  * the submission process has failed after we have marked paged for writeback
516bb18782aSDave Chinner  * and unlocked them. In this situation, we need to fail the bio and ioend
517bb18782aSDave Chinner  * rather than submit it to IO. This typically only happens on a filesystem
518bb18782aSDave Chinner  * shutdown.
519c59d87c4SChristoph Hellwig  */
520e10de372SDave Chinner STATIC int
521c59d87c4SChristoph Hellwig xfs_submit_ioend(
522c59d87c4SChristoph Hellwig 	struct writeback_control *wbc,
5230e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
524e10de372SDave Chinner 	int			status)
525c59d87c4SChristoph Hellwig {
5265eda4300SDarrick J. Wong 	/* Convert CoW extents to regular */
527be225fecSChristoph Hellwig 	if (!status && ioend->io_fork == XFS_COW_FORK) {
5284a2d01b0SDave Chinner 		/*
5294a2d01b0SDave Chinner 		 * Yuk. This can do memory allocation, but is not a
5304a2d01b0SDave Chinner 		 * transactional operation so everything is done in GFP_KERNEL
5314a2d01b0SDave Chinner 		 * context. That can deadlock, because we hold pages in
5324a2d01b0SDave Chinner 		 * writeback state and GFP_KERNEL allocations can block on them.
5334a2d01b0SDave Chinner 		 * Hence we must operate in nofs conditions here.
5344a2d01b0SDave Chinner 		 */
5354a2d01b0SDave Chinner 		unsigned nofs_flag;
5364a2d01b0SDave Chinner 
5374a2d01b0SDave Chinner 		nofs_flag = memalloc_nofs_save();
5385eda4300SDarrick J. Wong 		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
5395eda4300SDarrick J. Wong 				ioend->io_offset, ioend->io_size);
5404a2d01b0SDave Chinner 		memalloc_nofs_restore(nofs_flag);
5415eda4300SDarrick J. Wong 	}
5425eda4300SDarrick J. Wong 
543e10de372SDave Chinner 	/* Reserve log space if we might write beyond the on-disk inode size. */
544e10de372SDave Chinner 	if (!status &&
545be225fecSChristoph Hellwig 	    (ioend->io_fork == XFS_COW_FORK ||
546be225fecSChristoph Hellwig 	     ioend->io_state != XFS_EXT_UNWRITTEN) &&
547bb18782aSDave Chinner 	    xfs_ioend_is_append(ioend) &&
548bb18782aSDave Chinner 	    !ioend->io_append_trans)
549e10de372SDave Chinner 		status = xfs_setfilesize_trans_alloc(ioend);
550bb18782aSDave Chinner 
5510e51a8e1SChristoph Hellwig 	ioend->io_bio->bi_private = ioend;
5520e51a8e1SChristoph Hellwig 	ioend->io_bio->bi_end_io = xfs_end_bio;
5537637241eSJens Axboe 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
55470fd7614SChristoph Hellwig 
5557bf7f352SDave Chinner 	/*
5567bf7f352SDave Chinner 	 * If we are failing the IO now, just mark the ioend with an
5577bf7f352SDave Chinner 	 * error and finish it. This will run IO completion immediately
5587bf7f352SDave Chinner 	 * as there is only one reference to the ioend at this point in
5597bf7f352SDave Chinner 	 * time.
5607bf7f352SDave Chinner 	 */
561e10de372SDave Chinner 	if (status) {
5624e4cbee9SChristoph Hellwig 		ioend->io_bio->bi_status = errno_to_blk_status(status);
5630e51a8e1SChristoph Hellwig 		bio_endio(ioend->io_bio);
564e10de372SDave Chinner 		return status;
5657bf7f352SDave Chinner 	}
5667bf7f352SDave Chinner 
56731d7d58dSJens Axboe 	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
5684e49ea4aSMike Christie 	submit_bio(ioend->io_bio);
569e10de372SDave Chinner 	return 0;
570c59d87c4SChristoph Hellwig }
571c59d87c4SChristoph Hellwig 
5720e51a8e1SChristoph Hellwig static struct xfs_ioend *
5730e51a8e1SChristoph Hellwig xfs_alloc_ioend(
5740e51a8e1SChristoph Hellwig 	struct inode		*inode,
575be225fecSChristoph Hellwig 	int			fork,
576be225fecSChristoph Hellwig 	xfs_exntst_t		state,
5770e51a8e1SChristoph Hellwig 	xfs_off_t		offset,
5783faed667SChristoph Hellwig 	struct block_device	*bdev,
5793faed667SChristoph Hellwig 	sector_t		sector)
5800e51a8e1SChristoph Hellwig {
5810e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend;
5820e51a8e1SChristoph Hellwig 	struct bio		*bio;
5830e51a8e1SChristoph Hellwig 
584e292d7bcSKent Overstreet 	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
5853faed667SChristoph Hellwig 	bio_set_dev(bio, bdev);
5863faed667SChristoph Hellwig 	bio->bi_iter.bi_sector = sector;
5870e51a8e1SChristoph Hellwig 
5880e51a8e1SChristoph Hellwig 	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
5890e51a8e1SChristoph Hellwig 	INIT_LIST_HEAD(&ioend->io_list);
590be225fecSChristoph Hellwig 	ioend->io_fork = fork;
591be225fecSChristoph Hellwig 	ioend->io_state = state;
5920e51a8e1SChristoph Hellwig 	ioend->io_inode = inode;
5930e51a8e1SChristoph Hellwig 	ioend->io_size = 0;
5940e51a8e1SChristoph Hellwig 	ioend->io_offset = offset;
5950e51a8e1SChristoph Hellwig 	INIT_WORK(&ioend->io_work, xfs_end_io);
5960e51a8e1SChristoph Hellwig 	ioend->io_append_trans = NULL;
5970e51a8e1SChristoph Hellwig 	ioend->io_bio = bio;
5980e51a8e1SChristoph Hellwig 	return ioend;
5990e51a8e1SChristoph Hellwig }
6000e51a8e1SChristoph Hellwig 
6010e51a8e1SChristoph Hellwig /*
6020e51a8e1SChristoph Hellwig  * Allocate a new bio, and chain the old bio to the new one.
6030e51a8e1SChristoph Hellwig  *
6040e51a8e1SChristoph Hellwig  * Note that we have to do perform the chaining in this unintuitive order
6050e51a8e1SChristoph Hellwig  * so that the bi_private linkage is set up in the right direction for the
6060e51a8e1SChristoph Hellwig  * traversal in xfs_destroy_ioend().
6070e51a8e1SChristoph Hellwig  */
6080e51a8e1SChristoph Hellwig static void
6090e51a8e1SChristoph Hellwig xfs_chain_bio(
6100e51a8e1SChristoph Hellwig 	struct xfs_ioend	*ioend,
6110e51a8e1SChristoph Hellwig 	struct writeback_control *wbc,
6123faed667SChristoph Hellwig 	struct block_device	*bdev,
6133faed667SChristoph Hellwig 	sector_t		sector)
6140e51a8e1SChristoph Hellwig {
6150e51a8e1SChristoph Hellwig 	struct bio *new;
6160e51a8e1SChristoph Hellwig 
6170e51a8e1SChristoph Hellwig 	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
6183faed667SChristoph Hellwig 	bio_set_dev(new, bdev);
6193faed667SChristoph Hellwig 	new->bi_iter.bi_sector = sector;
6200e51a8e1SChristoph Hellwig 	bio_chain(ioend->io_bio, new);
6210e51a8e1SChristoph Hellwig 	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
6227637241eSJens Axboe 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
62331d7d58dSJens Axboe 	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
6244e49ea4aSMike Christie 	submit_bio(ioend->io_bio);
6250e51a8e1SChristoph Hellwig 	ioend->io_bio = new;
6260e51a8e1SChristoph Hellwig }
6270e51a8e1SChristoph Hellwig 
628c59d87c4SChristoph Hellwig /*
6293faed667SChristoph Hellwig  * Test to see if we have an existing ioend structure that we could append to
6303faed667SChristoph Hellwig  * first, otherwise finish off the current ioend and start another.
631c59d87c4SChristoph Hellwig  */
632c59d87c4SChristoph Hellwig STATIC void
633c59d87c4SChristoph Hellwig xfs_add_to_ioend(
634c59d87c4SChristoph Hellwig 	struct inode		*inode,
635c59d87c4SChristoph Hellwig 	xfs_off_t		offset,
6363faed667SChristoph Hellwig 	struct page		*page,
63782cb1417SChristoph Hellwig 	struct iomap_page	*iop,
638e10de372SDave Chinner 	struct xfs_writepage_ctx *wpc,
639bb18782aSDave Chinner 	struct writeback_control *wbc,
640e10de372SDave Chinner 	struct list_head	*iolist)
641c59d87c4SChristoph Hellwig {
6423faed667SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
6433faed667SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
6443faed667SChristoph Hellwig 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
6453faed667SChristoph Hellwig 	unsigned		len = i_blocksize(inode);
6463faed667SChristoph Hellwig 	unsigned		poff = offset & (PAGE_SIZE - 1);
6473faed667SChristoph Hellwig 	sector_t		sector;
6483faed667SChristoph Hellwig 
6493faed667SChristoph Hellwig 	sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
6503faed667SChristoph Hellwig 		((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
6513faed667SChristoph Hellwig 
652be225fecSChristoph Hellwig 	if (!wpc->ioend ||
653be225fecSChristoph Hellwig 	    wpc->fork != wpc->ioend->io_fork ||
654be225fecSChristoph Hellwig 	    wpc->imap.br_state != wpc->ioend->io_state ||
6553faed667SChristoph Hellwig 	    sector != bio_end_sector(wpc->ioend->io_bio) ||
6560df61da8SDarrick J. Wong 	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
657e10de372SDave Chinner 		if (wpc->ioend)
658e10de372SDave Chinner 			list_add(&wpc->ioend->io_list, iolist);
659be225fecSChristoph Hellwig 		wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
660be225fecSChristoph Hellwig 				wpc->imap.br_state, offset, bdev, sector);
661c59d87c4SChristoph Hellwig 	}
662c59d87c4SChristoph Hellwig 
66382cb1417SChristoph Hellwig 	if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
66482cb1417SChristoph Hellwig 		if (iop)
66582cb1417SChristoph Hellwig 			atomic_inc(&iop->write_count);
66682cb1417SChristoph Hellwig 		if (bio_full(wpc->ioend->io_bio))
6673faed667SChristoph Hellwig 			xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
66882cb1417SChristoph Hellwig 		__bio_add_page(wpc->ioend->io_bio, page, len, poff);
66982cb1417SChristoph Hellwig 	}
670bb18782aSDave Chinner 
6713faed667SChristoph Hellwig 	wpc->ioend->io_size += len;
672c59d87c4SChristoph Hellwig }
673c59d87c4SChristoph Hellwig 
674c59d87c4SChristoph Hellwig STATIC void
675c59d87c4SChristoph Hellwig xfs_vm_invalidatepage(
676c59d87c4SChristoph Hellwig 	struct page		*page,
677d47992f8SLukas Czerner 	unsigned int		offset,
678d47992f8SLukas Czerner 	unsigned int		length)
679c59d87c4SChristoph Hellwig {
68082cb1417SChristoph Hellwig 	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
68182cb1417SChristoph Hellwig 	iomap_invalidatepage(page, offset, length);
682c59d87c4SChristoph Hellwig }
683c59d87c4SChristoph Hellwig 
684c59d87c4SChristoph Hellwig /*
68582cb1417SChristoph Hellwig  * If the page has delalloc blocks on it, we need to punch them out before we
686c59d87c4SChristoph Hellwig  * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
68782cb1417SChristoph Hellwig  * inode that can trip up a later direct I/O read operation on the same region.
688c59d87c4SChristoph Hellwig  *
68982cb1417SChristoph Hellwig  * We prevent this by truncating away the delalloc regions on the page.  Because
69082cb1417SChristoph Hellwig  * they are delalloc, we can do this without needing a transaction. Indeed - if
69182cb1417SChristoph Hellwig  * we get ENOSPC errors, we have to be able to do this truncation without a
69282cb1417SChristoph Hellwig  * transaction as there is no space left for block reservation (typically why we
69382cb1417SChristoph Hellwig  * see a ENOSPC in writeback).
694c59d87c4SChristoph Hellwig  */
695c59d87c4SChristoph Hellwig STATIC void
696c59d87c4SChristoph Hellwig xfs_aops_discard_page(
697c59d87c4SChristoph Hellwig 	struct page		*page)
698c59d87c4SChristoph Hellwig {
699c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
700c59d87c4SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(inode);
70103625721SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
702c59d87c4SChristoph Hellwig 	loff_t			offset = page_offset(page);
70303625721SChristoph Hellwig 	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
70403625721SChristoph Hellwig 	int			error;
705c59d87c4SChristoph Hellwig 
70603625721SChristoph Hellwig 	if (XFS_FORCED_SHUTDOWN(mp))
707c59d87c4SChristoph Hellwig 		goto out_invalidate;
708c59d87c4SChristoph Hellwig 
70903625721SChristoph Hellwig 	xfs_alert(mp,
710c9690043SDarrick J. Wong 		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
711c59d87c4SChristoph Hellwig 			page, ip->i_ino, offset);
712c59d87c4SChristoph Hellwig 
71303625721SChristoph Hellwig 	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
71403625721SChristoph Hellwig 			PAGE_SIZE / i_blocksize(inode));
71503625721SChristoph Hellwig 	if (error && !XFS_FORCED_SHUTDOWN(mp))
71603625721SChristoph Hellwig 		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
717c59d87c4SChristoph Hellwig out_invalidate:
71809cbfeafSKirill A. Shutemov 	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
719c59d87c4SChristoph Hellwig }
720c59d87c4SChristoph Hellwig 
721c59d87c4SChristoph Hellwig /*
722e10de372SDave Chinner  * We implement an immediate ioend submission policy here to avoid needing to
723e10de372SDave Chinner  * chain multiple ioends and hence nest mempool allocations which can violate
724e10de372SDave Chinner  * forward progress guarantees we need to provide. The current ioend we are
72582cb1417SChristoph Hellwig  * adding blocks to is cached on the writepage context, and if the new block
726e10de372SDave Chinner  * does not append to the cached ioend it will create a new ioend and cache that
727e10de372SDave Chinner  * instead.
728e10de372SDave Chinner  *
729e10de372SDave Chinner  * If a new ioend is created and cached, the old ioend is returned and queued
730e10de372SDave Chinner  * locally for submission once the entire page is processed or an error has been
731e10de372SDave Chinner  * detected.  While ioends are submitted immediately after they are completed,
732e10de372SDave Chinner  * batching optimisations are provided by higher level block plugging.
733e10de372SDave Chinner  *
734e10de372SDave Chinner  * At the end of a writeback pass, there will be a cached ioend remaining on the
735e10de372SDave Chinner  * writepage context that the caller will need to submit.
736e10de372SDave Chinner  */
737bfce7d2eSDave Chinner static int
738bfce7d2eSDave Chinner xfs_writepage_map(
739bfce7d2eSDave Chinner 	struct xfs_writepage_ctx *wpc,
740e10de372SDave Chinner 	struct writeback_control *wbc,
741bfce7d2eSDave Chinner 	struct inode		*inode,
742bfce7d2eSDave Chinner 	struct page		*page,
743c8ce540dSDarrick J. Wong 	uint64_t		end_offset)
744bfce7d2eSDave Chinner {
745e10de372SDave Chinner 	LIST_HEAD(submit_list);
74682cb1417SChristoph Hellwig 	struct iomap_page	*iop = to_iomap_page(page);
74782cb1417SChristoph Hellwig 	unsigned		len = i_blocksize(inode);
748e10de372SDave Chinner 	struct xfs_ioend	*ioend, *next;
7496a4c9501SChristoph Hellwig 	uint64_t		file_offset;	/* file offset of page */
75082cb1417SChristoph Hellwig 	int			error = 0, count = 0, i;
751bfce7d2eSDave Chinner 
75282cb1417SChristoph Hellwig 	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
75382cb1417SChristoph Hellwig 	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
754ac8ee546SChristoph Hellwig 
755e2f6ad46SDave Chinner 	/*
75682cb1417SChristoph Hellwig 	 * Walk through the page to find areas to write back. If we run off the
75782cb1417SChristoph Hellwig 	 * end of the current map or find the current map invalid, grab a new
75882cb1417SChristoph Hellwig 	 * one.
759e2f6ad46SDave Chinner 	 */
76082cb1417SChristoph Hellwig 	for (i = 0, file_offset = page_offset(page);
76182cb1417SChristoph Hellwig 	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
76282cb1417SChristoph Hellwig 	     i++, file_offset += len) {
76382cb1417SChristoph Hellwig 		if (iop && !test_bit(i, iop->uptodate))
764bfce7d2eSDave Chinner 			continue;
765bfce7d2eSDave Chinner 
7666a4c9501SChristoph Hellwig 		error = xfs_map_blocks(wpc, inode, file_offset);
767bfce7d2eSDave Chinner 		if (error)
768889c65b3SChristoph Hellwig 			break;
769be225fecSChristoph Hellwig 		if (wpc->imap.br_startblock == HOLESTARTBLOCK)
770ac8ee546SChristoph Hellwig 			continue;
77182cb1417SChristoph Hellwig 		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
7723faed667SChristoph Hellwig 				 &submit_list);
773bfce7d2eSDave Chinner 		count++;
774e2f6ad46SDave Chinner 	}
775bfce7d2eSDave Chinner 
776e10de372SDave Chinner 	ASSERT(wpc->ioend || list_empty(&submit_list));
7771b65d3ddSChristoph Hellwig 	ASSERT(PageLocked(page));
7781b65d3ddSChristoph Hellwig 	ASSERT(!PageWriteback(page));
779bfce7d2eSDave Chinner 
780bfce7d2eSDave Chinner 	/*
78182cb1417SChristoph Hellwig 	 * On error, we have to fail the ioend here because we may have set
78282cb1417SChristoph Hellwig 	 * pages under writeback, we have to make sure we run IO completion to
78382cb1417SChristoph Hellwig 	 * mark the error state of the IO appropriately, so we can't cancel the
78482cb1417SChristoph Hellwig 	 * ioend directly here.  That means we have to mark this page as under
78582cb1417SChristoph Hellwig 	 * writeback if we included any blocks from it in the ioend chain so
78682cb1417SChristoph Hellwig 	 * that completion treats it correctly.
787bfce7d2eSDave Chinner 	 *
788e10de372SDave Chinner 	 * If we didn't include the page in the ioend, the on error we can
789e10de372SDave Chinner 	 * simply discard and unlock it as there are no other users of the page
79082cb1417SChristoph Hellwig 	 * now.  The caller will still need to trigger submission of outstanding
79182cb1417SChristoph Hellwig 	 * ioends on the writepage context so they are treated correctly on
79282cb1417SChristoph Hellwig 	 * error.
793bfce7d2eSDave Chinner 	 */
7948e1f065bSChristoph Hellwig 	if (unlikely(error)) {
7958e1f065bSChristoph Hellwig 		if (!count) {
7968e1f065bSChristoph Hellwig 			xfs_aops_discard_page(page);
7978e1f065bSChristoph Hellwig 			ClearPageUptodate(page);
7988e1f065bSChristoph Hellwig 			unlock_page(page);
7998e1f065bSChristoph Hellwig 			goto done;
8008e1f065bSChristoph Hellwig 		}
8018e1f065bSChristoph Hellwig 
8021b65d3ddSChristoph Hellwig 		/*
8031b65d3ddSChristoph Hellwig 		 * If the page was not fully cleaned, we need to ensure that the
8041b65d3ddSChristoph Hellwig 		 * higher layers come back to it correctly.  That means we need
8051b65d3ddSChristoph Hellwig 		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
8061b65d3ddSChristoph Hellwig 		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
8071b65d3ddSChristoph Hellwig 		 * so another attempt to write this page in this writeback sweep
8081b65d3ddSChristoph Hellwig 		 * will be made.
8091b65d3ddSChristoph Hellwig 		 */
8101b65d3ddSChristoph Hellwig 		set_page_writeback_keepwrite(page);
8111b65d3ddSChristoph Hellwig 	} else {
8121b65d3ddSChristoph Hellwig 		clear_page_dirty_for_io(page);
8131b65d3ddSChristoph Hellwig 		set_page_writeback(page);
8141b65d3ddSChristoph Hellwig 	}
8158e1f065bSChristoph Hellwig 
8161b65d3ddSChristoph Hellwig 	unlock_page(page);
817e10de372SDave Chinner 
818e10de372SDave Chinner 	/*
819e10de372SDave Chinner 	 * Preserve the original error if there was one, otherwise catch
820e10de372SDave Chinner 	 * submission errors here and propagate into subsequent ioend
821e10de372SDave Chinner 	 * submissions.
822e10de372SDave Chinner 	 */
823e10de372SDave Chinner 	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
824e10de372SDave Chinner 		int error2;
825e10de372SDave Chinner 
826e10de372SDave Chinner 		list_del_init(&ioend->io_list);
827e10de372SDave Chinner 		error2 = xfs_submit_ioend(wbc, ioend, error);
828e10de372SDave Chinner 		if (error2 && !error)
829e10de372SDave Chinner 			error = error2;
830e10de372SDave Chinner 	}
831e10de372SDave Chinner 
8328e1f065bSChristoph Hellwig 	/*
83382cb1417SChristoph Hellwig 	 * We can end up here with no error and nothing to write only if we race
83482cb1417SChristoph Hellwig 	 * with a partial page truncate on a sub-page block sized filesystem.
8358e1f065bSChristoph Hellwig 	 */
8368e1f065bSChristoph Hellwig 	if (!count)
8378e1f065bSChristoph Hellwig 		end_page_writeback(page);
8388e1f065bSChristoph Hellwig done:
839bfce7d2eSDave Chinner 	mapping_set_error(page->mapping, error);
840bfce7d2eSDave Chinner 	return error;
841bfce7d2eSDave Chinner }
842bfce7d2eSDave Chinner 
843c59d87c4SChristoph Hellwig /*
844c59d87c4SChristoph Hellwig  * Write out a dirty page.
845c59d87c4SChristoph Hellwig  *
846c59d87c4SChristoph Hellwig  * For delalloc space on the page we need to allocate space and flush it.
847c59d87c4SChristoph Hellwig  * For unwritten space on the page we need to start the conversion to
848c59d87c4SChristoph Hellwig  * regular allocated space.
849c59d87c4SChristoph Hellwig  */
850c59d87c4SChristoph Hellwig STATIC int
851fbcc0256SDave Chinner xfs_do_writepage(
852c59d87c4SChristoph Hellwig 	struct page		*page,
853fbcc0256SDave Chinner 	struct writeback_control *wbc,
854fbcc0256SDave Chinner 	void			*data)
855c59d87c4SChristoph Hellwig {
856fbcc0256SDave Chinner 	struct xfs_writepage_ctx *wpc = data;
857c59d87c4SChristoph Hellwig 	struct inode		*inode = page->mapping->host;
858c59d87c4SChristoph Hellwig 	loff_t			offset;
859c8ce540dSDarrick J. Wong 	uint64_t              end_offset;
860ad68972aSDave Chinner 	pgoff_t                 end_index;
861c59d87c4SChristoph Hellwig 
86234097dfeSLukas Czerner 	trace_xfs_writepage(inode, page, 0, 0);
863c59d87c4SChristoph Hellwig 
864c59d87c4SChristoph Hellwig 	/*
865c59d87c4SChristoph Hellwig 	 * Refuse to write the page out if we are called from reclaim context.
866c59d87c4SChristoph Hellwig 	 *
867c59d87c4SChristoph Hellwig 	 * This avoids stack overflows when called from deeply used stacks in
868c59d87c4SChristoph Hellwig 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
869c59d87c4SChristoph Hellwig 	 * allow reclaim from kswapd as the stack usage there is relatively low.
870c59d87c4SChristoph Hellwig 	 *
87194054fa3SMel Gorman 	 * This should never happen except in the case of a VM regression so
87294054fa3SMel Gorman 	 * warn about it.
873c59d87c4SChristoph Hellwig 	 */
87494054fa3SMel Gorman 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
87594054fa3SMel Gorman 			PF_MEMALLOC))
876c59d87c4SChristoph Hellwig 		goto redirty;
877c59d87c4SChristoph Hellwig 
878c59d87c4SChristoph Hellwig 	/*
879c59d87c4SChristoph Hellwig 	 * Given that we do not allow direct reclaim to call us, we should
880c59d87c4SChristoph Hellwig 	 * never be called while in a filesystem transaction.
881c59d87c4SChristoph Hellwig 	 */
8829070733bSMichal Hocko 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
883c59d87c4SChristoph Hellwig 		goto redirty;
884c59d87c4SChristoph Hellwig 
8858695d27eSJie Liu 	/*
886ad68972aSDave Chinner 	 * Is this page beyond the end of the file?
887ad68972aSDave Chinner 	 *
8888695d27eSJie Liu 	 * The page index is less than the end_index, adjust the end_offset
8898695d27eSJie Liu 	 * to the highest offset that this page should represent.
8908695d27eSJie Liu 	 * -----------------------------------------------------
8918695d27eSJie Liu 	 * |			file mapping	       | <EOF> |
8928695d27eSJie Liu 	 * -----------------------------------------------------
8938695d27eSJie Liu 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
8948695d27eSJie Liu 	 * ^--------------------------------^----------|--------
8958695d27eSJie Liu 	 * |     desired writeback range    |      see else    |
8968695d27eSJie Liu 	 * ---------------------------------^------------------|
8978695d27eSJie Liu 	 */
898ad68972aSDave Chinner 	offset = i_size_read(inode);
89909cbfeafSKirill A. Shutemov 	end_index = offset >> PAGE_SHIFT;
9008695d27eSJie Liu 	if (page->index < end_index)
90109cbfeafSKirill A. Shutemov 		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
9028695d27eSJie Liu 	else {
9038695d27eSJie Liu 		/*
9048695d27eSJie Liu 		 * Check whether the page to write out is beyond or straddles
9058695d27eSJie Liu 		 * i_size or not.
9068695d27eSJie Liu 		 * -------------------------------------------------------
9078695d27eSJie Liu 		 * |		file mapping		        | <EOF>  |
9088695d27eSJie Liu 		 * -------------------------------------------------------
9098695d27eSJie Liu 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
9108695d27eSJie Liu 		 * ^--------------------------------^-----------|---------
9118695d27eSJie Liu 		 * |				    |      Straddles     |
9128695d27eSJie Liu 		 * ---------------------------------^-----------|--------|
9138695d27eSJie Liu 		 */
91409cbfeafSKirill A. Shutemov 		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
9156b7a03f0SChristoph Hellwig 
9166b7a03f0SChristoph Hellwig 		/*
917ff9a28f6SJan Kara 		 * Skip the page if it is fully outside i_size, e.g. due to a
918ff9a28f6SJan Kara 		 * truncate operation that is in progress. We must redirty the
919ff9a28f6SJan Kara 		 * page so that reclaim stops reclaiming it. Otherwise
920ff9a28f6SJan Kara 		 * xfs_vm_releasepage() is called on it and gets confused.
9218695d27eSJie Liu 		 *
9228695d27eSJie Liu 		 * Note that the end_index is unsigned long, it would overflow
9238695d27eSJie Liu 		 * if the given offset is greater than 16TB on 32-bit system
9248695d27eSJie Liu 		 * and if we do check the page is fully outside i_size or not
9258695d27eSJie Liu 		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
9268695d27eSJie Liu 		 * will be evaluated to 0.  Hence this page will be redirtied
9278695d27eSJie Liu 		 * and be written out repeatedly which would result in an
9288695d27eSJie Liu 		 * infinite loop, the user program that perform this operation
9298695d27eSJie Liu 		 * will hang.  Instead, we can verify this situation by checking
9308695d27eSJie Liu 		 * if the page to write is totally beyond the i_size or if it's
9318695d27eSJie Liu 		 * offset is just equal to the EOF.
9326b7a03f0SChristoph Hellwig 		 */
9338695d27eSJie Liu 		if (page->index > end_index ||
9348695d27eSJie Liu 		    (page->index == end_index && offset_into_page == 0))
935ff9a28f6SJan Kara 			goto redirty;
9366b7a03f0SChristoph Hellwig 
9376b7a03f0SChristoph Hellwig 		/*
9386b7a03f0SChristoph Hellwig 		 * The page straddles i_size.  It must be zeroed out on each
9396b7a03f0SChristoph Hellwig 		 * and every writepage invocation because it may be mmapped.
9406b7a03f0SChristoph Hellwig 		 * "A file is mapped in multiples of the page size.  For a file
9416b7a03f0SChristoph Hellwig 		 * that is not a multiple of the page size, the remaining
9426b7a03f0SChristoph Hellwig 		 * memory is zeroed when mapped, and writes to that region are
9436b7a03f0SChristoph Hellwig 		 * not written out to the file."
9446b7a03f0SChristoph Hellwig 		 */
94509cbfeafSKirill A. Shutemov 		zero_user_segment(page, offset_into_page, PAGE_SIZE);
9468695d27eSJie Liu 
9478695d27eSJie Liu 		/* Adjust the end_offset to the end of file */
9488695d27eSJie Liu 		end_offset = offset;
949c59d87c4SChristoph Hellwig 	}
950c59d87c4SChristoph Hellwig 
9512d5f4b5bSDarrick J. Wong 	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
952c59d87c4SChristoph Hellwig 
953c59d87c4SChristoph Hellwig redirty:
954c59d87c4SChristoph Hellwig 	redirty_page_for_writepage(wbc, page);
955c59d87c4SChristoph Hellwig 	unlock_page(page);
956c59d87c4SChristoph Hellwig 	return 0;
957c59d87c4SChristoph Hellwig }
958c59d87c4SChristoph Hellwig 
959c59d87c4SChristoph Hellwig STATIC int
960fbcc0256SDave Chinner xfs_vm_writepage(
961fbcc0256SDave Chinner 	struct page		*page,
962fbcc0256SDave Chinner 	struct writeback_control *wbc)
963fbcc0256SDave Chinner {
964be225fecSChristoph Hellwig 	struct xfs_writepage_ctx wpc = { };
965fbcc0256SDave Chinner 	int			ret;
966fbcc0256SDave Chinner 
967fbcc0256SDave Chinner 	ret = xfs_do_writepage(page, wbc, &wpc);
968e10de372SDave Chinner 	if (wpc.ioend)
969e10de372SDave Chinner 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
970e10de372SDave Chinner 	return ret;
971fbcc0256SDave Chinner }
972fbcc0256SDave Chinner 
973fbcc0256SDave Chinner STATIC int
974c59d87c4SChristoph Hellwig xfs_vm_writepages(
975c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
976c59d87c4SChristoph Hellwig 	struct writeback_control *wbc)
977c59d87c4SChristoph Hellwig {
978be225fecSChristoph Hellwig 	struct xfs_writepage_ctx wpc = { };
979fbcc0256SDave Chinner 	int			ret;
980fbcc0256SDave Chinner 
981c59d87c4SChristoph Hellwig 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
982fbcc0256SDave Chinner 	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
983e10de372SDave Chinner 	if (wpc.ioend)
984e10de372SDave Chinner 		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
985e10de372SDave Chinner 	return ret;
986c59d87c4SChristoph Hellwig }
987c59d87c4SChristoph Hellwig 
9886e2608dfSDan Williams STATIC int
9896e2608dfSDan Williams xfs_dax_writepages(
9906e2608dfSDan Williams 	struct address_space	*mapping,
9916e2608dfSDan Williams 	struct writeback_control *wbc)
9926e2608dfSDan Williams {
9936e2608dfSDan Williams 	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
9946e2608dfSDan Williams 	return dax_writeback_mapping_range(mapping,
9956e2608dfSDan Williams 			xfs_find_bdev_for_inode(mapping->host), wbc);
9966e2608dfSDan Williams }
9976e2608dfSDan Williams 
998c59d87c4SChristoph Hellwig STATIC int
999c59d87c4SChristoph Hellwig xfs_vm_releasepage(
1000c59d87c4SChristoph Hellwig 	struct page		*page,
1001c59d87c4SChristoph Hellwig 	gfp_t			gfp_mask)
1002c59d87c4SChristoph Hellwig {
100334097dfeSLukas Czerner 	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
100482cb1417SChristoph Hellwig 	return iomap_releasepage(page, gfp_mask);
1005c59d87c4SChristoph Hellwig }
1006c59d87c4SChristoph Hellwig 
1007c59d87c4SChristoph Hellwig STATIC sector_t
1008c59d87c4SChristoph Hellwig xfs_vm_bmap(
1009c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1010c59d87c4SChristoph Hellwig 	sector_t		block)
1011c59d87c4SChristoph Hellwig {
1012b84e7722SChristoph Hellwig 	struct xfs_inode	*ip = XFS_I(mapping->host);
1013c59d87c4SChristoph Hellwig 
1014b84e7722SChristoph Hellwig 	trace_xfs_vm_bmap(ip);
1015db1327b1SDarrick J. Wong 
1016db1327b1SDarrick J. Wong 	/*
1017db1327b1SDarrick J. Wong 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1018793057e1SIngo Molnar 	 * bypasses the file system for actual I/O.  We really can't allow
1019db1327b1SDarrick J. Wong 	 * that on reflinks inodes, so we have to skip out here.  And yes,
1020eb5e248dSDarrick J. Wong 	 * 0 is the magic code for a bmap error.
1021eb5e248dSDarrick J. Wong 	 *
1022eb5e248dSDarrick J. Wong 	 * Since we don't pass back blockdev info, we can't return bmap
1023eb5e248dSDarrick J. Wong 	 * information for rt files either.
1024db1327b1SDarrick J. Wong 	 */
1025eb5e248dSDarrick J. Wong 	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1026db1327b1SDarrick J. Wong 		return 0;
1027b84e7722SChristoph Hellwig 	return iomap_bmap(mapping, block, &xfs_iomap_ops);
1028c59d87c4SChristoph Hellwig }
1029c59d87c4SChristoph Hellwig 
1030c59d87c4SChristoph Hellwig STATIC int
1031c59d87c4SChristoph Hellwig xfs_vm_readpage(
1032c59d87c4SChristoph Hellwig 	struct file		*unused,
1033c59d87c4SChristoph Hellwig 	struct page		*page)
1034c59d87c4SChristoph Hellwig {
1035121e213eSDave Chinner 	trace_xfs_vm_readpage(page->mapping->host, 1);
10368b2e77c1SChristoph Hellwig 	return iomap_readpage(page, &xfs_iomap_ops);
1037c59d87c4SChristoph Hellwig }
1038c59d87c4SChristoph Hellwig 
1039c59d87c4SChristoph Hellwig STATIC int
1040c59d87c4SChristoph Hellwig xfs_vm_readpages(
1041c59d87c4SChristoph Hellwig 	struct file		*unused,
1042c59d87c4SChristoph Hellwig 	struct address_space	*mapping,
1043c59d87c4SChristoph Hellwig 	struct list_head	*pages,
1044c59d87c4SChristoph Hellwig 	unsigned		nr_pages)
1045c59d87c4SChristoph Hellwig {
1046121e213eSDave Chinner 	trace_xfs_vm_readpages(mapping->host, nr_pages);
10478b2e77c1SChristoph Hellwig 	return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
104822e757a4SDave Chinner }
104922e757a4SDave Chinner 
105067482129SDarrick J. Wong static int
105167482129SDarrick J. Wong xfs_iomap_swapfile_activate(
105267482129SDarrick J. Wong 	struct swap_info_struct		*sis,
105367482129SDarrick J. Wong 	struct file			*swap_file,
105467482129SDarrick J. Wong 	sector_t			*span)
105567482129SDarrick J. Wong {
105667482129SDarrick J. Wong 	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
105767482129SDarrick J. Wong 	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
105867482129SDarrick J. Wong }
105967482129SDarrick J. Wong 
1060c59d87c4SChristoph Hellwig const struct address_space_operations xfs_address_space_operations = {
1061c59d87c4SChristoph Hellwig 	.readpage		= xfs_vm_readpage,
1062c59d87c4SChristoph Hellwig 	.readpages		= xfs_vm_readpages,
1063c59d87c4SChristoph Hellwig 	.writepage		= xfs_vm_writepage,
1064c59d87c4SChristoph Hellwig 	.writepages		= xfs_vm_writepages,
106582cb1417SChristoph Hellwig 	.set_page_dirty		= iomap_set_page_dirty,
1066c59d87c4SChristoph Hellwig 	.releasepage		= xfs_vm_releasepage,
1067c59d87c4SChristoph Hellwig 	.invalidatepage		= xfs_vm_invalidatepage,
1068c59d87c4SChristoph Hellwig 	.bmap			= xfs_vm_bmap,
10696e2608dfSDan Williams 	.direct_IO		= noop_direct_IO,
107082cb1417SChristoph Hellwig 	.migratepage		= iomap_migrate_page,
107182cb1417SChristoph Hellwig 	.is_partially_uptodate  = iomap_is_partially_uptodate,
1072c59d87c4SChristoph Hellwig 	.error_remove_page	= generic_error_remove_page,
107367482129SDarrick J. Wong 	.swap_activate		= xfs_iomap_swapfile_activate,
1074c59d87c4SChristoph Hellwig };
10756e2608dfSDan Williams 
10766e2608dfSDan Williams const struct address_space_operations xfs_dax_aops = {
10776e2608dfSDan Williams 	.writepages		= xfs_dax_writepages,
10786e2608dfSDan Williams 	.direct_IO		= noop_direct_IO,
10796e2608dfSDan Williams 	.set_page_dirty		= noop_set_page_dirty,
10806e2608dfSDan Williams 	.invalidatepage		= noop_invalidatepage,
108167482129SDarrick J. Wong 	.swap_activate		= xfs_iomap_swapfile_activate,
10826e2608dfSDan Williams };
1083