1c59d87c4SChristoph Hellwig /* 2c59d87c4SChristoph Hellwig * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3c59d87c4SChristoph Hellwig * All Rights Reserved. 4c59d87c4SChristoph Hellwig * 5c59d87c4SChristoph Hellwig * This program is free software; you can redistribute it and/or 6c59d87c4SChristoph Hellwig * modify it under the terms of the GNU General Public License as 7c59d87c4SChristoph Hellwig * published by the Free Software Foundation. 8c59d87c4SChristoph Hellwig * 9c59d87c4SChristoph Hellwig * This program is distributed in the hope that it would be useful, 10c59d87c4SChristoph Hellwig * but WITHOUT ANY WARRANTY; without even the implied warranty of 11c59d87c4SChristoph Hellwig * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12c59d87c4SChristoph Hellwig * GNU General Public License for more details. 13c59d87c4SChristoph Hellwig * 14c59d87c4SChristoph Hellwig * You should have received a copy of the GNU General Public License 15c59d87c4SChristoph Hellwig * along with this program; if not, write the Free Software Foundation, 16c59d87c4SChristoph Hellwig * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17c59d87c4SChristoph Hellwig */ 18c59d87c4SChristoph Hellwig #include "xfs.h" 1970a9883cSDave Chinner #include "xfs_shared.h" 20239880efSDave Chinner #include "xfs_format.h" 21239880efSDave Chinner #include "xfs_log_format.h" 22239880efSDave Chinner #include "xfs_trans_resv.h" 23c59d87c4SChristoph Hellwig #include "xfs_mount.h" 24c59d87c4SChristoph Hellwig #include "xfs_inode.h" 25239880efSDave Chinner #include "xfs_trans.h" 26281627dfSChristoph Hellwig #include "xfs_inode_item.h" 27c59d87c4SChristoph Hellwig #include "xfs_alloc.h" 28c59d87c4SChristoph Hellwig #include "xfs_error.h" 29c59d87c4SChristoph Hellwig #include "xfs_iomap.h" 30c59d87c4SChristoph Hellwig #include "xfs_trace.h" 31c59d87c4SChristoph Hellwig #include "xfs_bmap.h" 3268988114SDave Chinner #include "xfs_bmap_util.h" 33a4fbe6abSDave Chinner #include "xfs_bmap_btree.h" 34a27bb332SKent Overstreet #include <linux/aio.h> 35c59d87c4SChristoph Hellwig #include <linux/gfp.h> 36c59d87c4SChristoph Hellwig #include <linux/mpage.h> 37c59d87c4SChristoph Hellwig #include <linux/pagevec.h> 38c59d87c4SChristoph Hellwig #include <linux/writeback.h> 39c59d87c4SChristoph Hellwig 40c59d87c4SChristoph Hellwig void 41c59d87c4SChristoph Hellwig xfs_count_page_state( 42c59d87c4SChristoph Hellwig struct page *page, 43c59d87c4SChristoph Hellwig int *delalloc, 44c59d87c4SChristoph Hellwig int *unwritten) 45c59d87c4SChristoph Hellwig { 46c59d87c4SChristoph Hellwig struct buffer_head *bh, *head; 47c59d87c4SChristoph Hellwig 48c59d87c4SChristoph Hellwig *delalloc = *unwritten = 0; 49c59d87c4SChristoph Hellwig 50c59d87c4SChristoph Hellwig bh = head = page_buffers(page); 51c59d87c4SChristoph Hellwig do { 52c59d87c4SChristoph Hellwig if (buffer_unwritten(bh)) 53c59d87c4SChristoph Hellwig (*unwritten) = 1; 54c59d87c4SChristoph Hellwig else if (buffer_delay(bh)) 55c59d87c4SChristoph Hellwig (*delalloc) = 1; 56c59d87c4SChristoph Hellwig } while ((bh = bh->b_this_page) != head); 57c59d87c4SChristoph Hellwig } 58c59d87c4SChristoph Hellwig 59c59d87c4SChristoph Hellwig STATIC struct block_device * 60c59d87c4SChristoph Hellwig xfs_find_bdev_for_inode( 61c59d87c4SChristoph Hellwig struct inode *inode) 62c59d87c4SChristoph Hellwig { 63c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 64c59d87c4SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 65c59d87c4SChristoph Hellwig 66c59d87c4SChristoph Hellwig if (XFS_IS_REALTIME_INODE(ip)) 67c59d87c4SChristoph Hellwig return mp->m_rtdev_targp->bt_bdev; 68c59d87c4SChristoph Hellwig else 69c59d87c4SChristoph Hellwig return mp->m_ddev_targp->bt_bdev; 70c59d87c4SChristoph Hellwig } 71c59d87c4SChristoph Hellwig 72c59d87c4SChristoph Hellwig /* 73c59d87c4SChristoph Hellwig * We're now finished for good with this ioend structure. 74c59d87c4SChristoph Hellwig * Update the page state via the associated buffer_heads, 75c59d87c4SChristoph Hellwig * release holds on the inode and bio, and finally free 76c59d87c4SChristoph Hellwig * up memory. Do not use the ioend after this. 77c59d87c4SChristoph Hellwig */ 78c59d87c4SChristoph Hellwig STATIC void 79c59d87c4SChristoph Hellwig xfs_destroy_ioend( 80c59d87c4SChristoph Hellwig xfs_ioend_t *ioend) 81c59d87c4SChristoph Hellwig { 82c59d87c4SChristoph Hellwig struct buffer_head *bh, *next; 83c59d87c4SChristoph Hellwig 84c59d87c4SChristoph Hellwig for (bh = ioend->io_buffer_head; bh; bh = next) { 85c59d87c4SChristoph Hellwig next = bh->b_private; 86c59d87c4SChristoph Hellwig bh->b_end_io(bh, !ioend->io_error); 87c59d87c4SChristoph Hellwig } 88c59d87c4SChristoph Hellwig 89c59d87c4SChristoph Hellwig mempool_free(ioend, xfs_ioend_pool); 90c59d87c4SChristoph Hellwig } 91c59d87c4SChristoph Hellwig 92c59d87c4SChristoph Hellwig /* 93fc0063c4SChristoph Hellwig * Fast and loose check if this write could update the on-disk inode size. 94fc0063c4SChristoph Hellwig */ 95fc0063c4SChristoph Hellwig static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) 96fc0063c4SChristoph Hellwig { 97fc0063c4SChristoph Hellwig return ioend->io_offset + ioend->io_size > 98fc0063c4SChristoph Hellwig XFS_I(ioend->io_inode)->i_d.di_size; 99fc0063c4SChristoph Hellwig } 100fc0063c4SChristoph Hellwig 101281627dfSChristoph Hellwig STATIC int 102281627dfSChristoph Hellwig xfs_setfilesize_trans_alloc( 103281627dfSChristoph Hellwig struct xfs_ioend *ioend) 104281627dfSChristoph Hellwig { 105281627dfSChristoph Hellwig struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 106281627dfSChristoph Hellwig struct xfs_trans *tp; 107281627dfSChristoph Hellwig int error; 108281627dfSChristoph Hellwig 109281627dfSChristoph Hellwig tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 110281627dfSChristoph Hellwig 1113d3c8b52SJie Liu error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0); 112281627dfSChristoph Hellwig if (error) { 113281627dfSChristoph Hellwig xfs_trans_cancel(tp, 0); 114281627dfSChristoph Hellwig return error; 115281627dfSChristoph Hellwig } 116281627dfSChristoph Hellwig 117281627dfSChristoph Hellwig ioend->io_append_trans = tp; 118281627dfSChristoph Hellwig 119281627dfSChristoph Hellwig /* 120437a255aSDave Chinner * We may pass freeze protection with a transaction. So tell lockdep 121d9457dc0SJan Kara * we released it. 122d9457dc0SJan Kara */ 123d9457dc0SJan Kara rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], 124d9457dc0SJan Kara 1, _THIS_IP_); 125d9457dc0SJan Kara /* 126281627dfSChristoph Hellwig * We hand off the transaction to the completion thread now, so 127281627dfSChristoph Hellwig * clear the flag here. 128281627dfSChristoph Hellwig */ 129281627dfSChristoph Hellwig current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 130281627dfSChristoph Hellwig return 0; 131281627dfSChristoph Hellwig } 132281627dfSChristoph Hellwig 133fc0063c4SChristoph Hellwig /* 1342813d682SChristoph Hellwig * Update on-disk file size now that data has been written to disk. 135c59d87c4SChristoph Hellwig */ 136281627dfSChristoph Hellwig STATIC int 137c59d87c4SChristoph Hellwig xfs_setfilesize( 138*2ba66237SChristoph Hellwig struct xfs_inode *ip, 139*2ba66237SChristoph Hellwig struct xfs_trans *tp, 140*2ba66237SChristoph Hellwig xfs_off_t offset, 141*2ba66237SChristoph Hellwig size_t size) 142c59d87c4SChristoph Hellwig { 143c59d87c4SChristoph Hellwig xfs_fsize_t isize; 144c59d87c4SChristoph Hellwig 145aa6bf01dSChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_EXCL); 146*2ba66237SChristoph Hellwig isize = xfs_new_eof(ip, offset + size); 147281627dfSChristoph Hellwig if (!isize) { 148281627dfSChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL); 149281627dfSChristoph Hellwig xfs_trans_cancel(tp, 0); 150281627dfSChristoph Hellwig return 0; 151c59d87c4SChristoph Hellwig } 152c59d87c4SChristoph Hellwig 153*2ba66237SChristoph Hellwig trace_xfs_setfilesize(ip, offset, size); 154281627dfSChristoph Hellwig 155281627dfSChristoph Hellwig ip->i_d.di_size = isize; 156281627dfSChristoph Hellwig xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 157281627dfSChristoph Hellwig xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 158281627dfSChristoph Hellwig 159281627dfSChristoph Hellwig return xfs_trans_commit(tp, 0); 160c59d87c4SChristoph Hellwig } 161c59d87c4SChristoph Hellwig 162*2ba66237SChristoph Hellwig STATIC int 163*2ba66237SChristoph Hellwig xfs_setfilesize_ioend( 164*2ba66237SChristoph Hellwig struct xfs_ioend *ioend) 165*2ba66237SChristoph Hellwig { 166*2ba66237SChristoph Hellwig struct xfs_inode *ip = XFS_I(ioend->io_inode); 167*2ba66237SChristoph Hellwig struct xfs_trans *tp = ioend->io_append_trans; 168*2ba66237SChristoph Hellwig 169*2ba66237SChristoph Hellwig /* 170*2ba66237SChristoph Hellwig * The transaction may have been allocated in the I/O submission thread, 171*2ba66237SChristoph Hellwig * thus we need to mark ourselves as being in a transaction manually. 172*2ba66237SChristoph Hellwig * Similarly for freeze protection. 173*2ba66237SChristoph Hellwig */ 174*2ba66237SChristoph Hellwig current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 175*2ba66237SChristoph Hellwig rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], 176*2ba66237SChristoph Hellwig 0, 1, _THIS_IP_); 177*2ba66237SChristoph Hellwig 178*2ba66237SChristoph Hellwig return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); 179*2ba66237SChristoph Hellwig } 180*2ba66237SChristoph Hellwig 181c59d87c4SChristoph Hellwig /* 182c59d87c4SChristoph Hellwig * Schedule IO completion handling on the final put of an ioend. 183fc0063c4SChristoph Hellwig * 184fc0063c4SChristoph Hellwig * If there is no work to do we might as well call it a day and free the 185fc0063c4SChristoph Hellwig * ioend right now. 186c59d87c4SChristoph Hellwig */ 187c59d87c4SChristoph Hellwig STATIC void 188c59d87c4SChristoph Hellwig xfs_finish_ioend( 189c59d87c4SChristoph Hellwig struct xfs_ioend *ioend) 190c59d87c4SChristoph Hellwig { 191c59d87c4SChristoph Hellwig if (atomic_dec_and_test(&ioend->io_remaining)) { 192aa6bf01dSChristoph Hellwig struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 193aa6bf01dSChristoph Hellwig 1940d882a36SAlain Renaud if (ioend->io_type == XFS_IO_UNWRITTEN) 195aa6bf01dSChristoph Hellwig queue_work(mp->m_unwritten_workqueue, &ioend->io_work); 196*2ba66237SChristoph Hellwig else if (ioend->io_append_trans) 197aa6bf01dSChristoph Hellwig queue_work(mp->m_data_workqueue, &ioend->io_work); 198fc0063c4SChristoph Hellwig else 199fc0063c4SChristoph Hellwig xfs_destroy_ioend(ioend); 200c59d87c4SChristoph Hellwig } 201c59d87c4SChristoph Hellwig } 202c59d87c4SChristoph Hellwig 203c59d87c4SChristoph Hellwig /* 204c59d87c4SChristoph Hellwig * IO write completion. 205c59d87c4SChristoph Hellwig */ 206c59d87c4SChristoph Hellwig STATIC void 207c59d87c4SChristoph Hellwig xfs_end_io( 208c59d87c4SChristoph Hellwig struct work_struct *work) 209c59d87c4SChristoph Hellwig { 210c59d87c4SChristoph Hellwig xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); 211c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(ioend->io_inode); 212c59d87c4SChristoph Hellwig int error = 0; 213c59d87c4SChristoph Hellwig 21404f658eeSChristoph Hellwig if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 215810627d9SChristoph Hellwig ioend->io_error = -EIO; 21604f658eeSChristoph Hellwig goto done; 21704f658eeSChristoph Hellwig } 21804f658eeSChristoph Hellwig if (ioend->io_error) 21904f658eeSChristoph Hellwig goto done; 22004f658eeSChristoph Hellwig 221c59d87c4SChristoph Hellwig /* 222c59d87c4SChristoph Hellwig * For unwritten extents we need to issue transactions to convert a 223c59d87c4SChristoph Hellwig * range to normal written extens after the data I/O has finished. 224c59d87c4SChristoph Hellwig */ 2250d882a36SAlain Renaud if (ioend->io_type == XFS_IO_UNWRITTEN) { 226c59d87c4SChristoph Hellwig error = xfs_iomap_write_unwritten(ip, ioend->io_offset, 227c59d87c4SChristoph Hellwig ioend->io_size); 228281627dfSChristoph Hellwig } else if (ioend->io_append_trans) { 229*2ba66237SChristoph Hellwig error = xfs_setfilesize_ioend(ioend); 23084803fb7SChristoph Hellwig } else { 231281627dfSChristoph Hellwig ASSERT(!xfs_ioend_is_append(ioend)); 23284803fb7SChristoph Hellwig } 23384803fb7SChristoph Hellwig 23404f658eeSChristoph Hellwig done: 235437a255aSDave Chinner if (error) 2362451337dSDave Chinner ioend->io_error = error; 237c59d87c4SChristoph Hellwig xfs_destroy_ioend(ioend); 238c59d87c4SChristoph Hellwig } 239c59d87c4SChristoph Hellwig 240c59d87c4SChristoph Hellwig /* 241c59d87c4SChristoph Hellwig * Allocate and initialise an IO completion structure. 242c59d87c4SChristoph Hellwig * We need to track unwritten extent write completion here initially. 243c59d87c4SChristoph Hellwig * We'll need to extend this for updating the ondisk inode size later 244c59d87c4SChristoph Hellwig * (vs. incore size). 245c59d87c4SChristoph Hellwig */ 246c59d87c4SChristoph Hellwig STATIC xfs_ioend_t * 247c59d87c4SChristoph Hellwig xfs_alloc_ioend( 248c59d87c4SChristoph Hellwig struct inode *inode, 249c59d87c4SChristoph Hellwig unsigned int type) 250c59d87c4SChristoph Hellwig { 251c59d87c4SChristoph Hellwig xfs_ioend_t *ioend; 252c59d87c4SChristoph Hellwig 253c59d87c4SChristoph Hellwig ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); 254c59d87c4SChristoph Hellwig 255c59d87c4SChristoph Hellwig /* 256c59d87c4SChristoph Hellwig * Set the count to 1 initially, which will prevent an I/O 257c59d87c4SChristoph Hellwig * completion callback from happening before we have started 258c59d87c4SChristoph Hellwig * all the I/O from calling the completion routine too early. 259c59d87c4SChristoph Hellwig */ 260c59d87c4SChristoph Hellwig atomic_set(&ioend->io_remaining, 1); 261c59d87c4SChristoph Hellwig ioend->io_error = 0; 262c59d87c4SChristoph Hellwig ioend->io_list = NULL; 263c59d87c4SChristoph Hellwig ioend->io_type = type; 264c59d87c4SChristoph Hellwig ioend->io_inode = inode; 265c59d87c4SChristoph Hellwig ioend->io_buffer_head = NULL; 266c59d87c4SChristoph Hellwig ioend->io_buffer_tail = NULL; 267c59d87c4SChristoph Hellwig ioend->io_offset = 0; 268c59d87c4SChristoph Hellwig ioend->io_size = 0; 269281627dfSChristoph Hellwig ioend->io_append_trans = NULL; 270c59d87c4SChristoph Hellwig 271c59d87c4SChristoph Hellwig INIT_WORK(&ioend->io_work, xfs_end_io); 272c59d87c4SChristoph Hellwig return ioend; 273c59d87c4SChristoph Hellwig } 274c59d87c4SChristoph Hellwig 275c59d87c4SChristoph Hellwig STATIC int 276c59d87c4SChristoph Hellwig xfs_map_blocks( 277c59d87c4SChristoph Hellwig struct inode *inode, 278c59d87c4SChristoph Hellwig loff_t offset, 279c59d87c4SChristoph Hellwig struct xfs_bmbt_irec *imap, 280c59d87c4SChristoph Hellwig int type, 281c59d87c4SChristoph Hellwig int nonblocking) 282c59d87c4SChristoph Hellwig { 283c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 284c59d87c4SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 285c59d87c4SChristoph Hellwig ssize_t count = 1 << inode->i_blkbits; 286c59d87c4SChristoph Hellwig xfs_fileoff_t offset_fsb, end_fsb; 287c59d87c4SChristoph Hellwig int error = 0; 288c59d87c4SChristoph Hellwig int bmapi_flags = XFS_BMAPI_ENTIRE; 289c59d87c4SChristoph Hellwig int nimaps = 1; 290c59d87c4SChristoph Hellwig 291c59d87c4SChristoph Hellwig if (XFS_FORCED_SHUTDOWN(mp)) 292b474c7aeSEric Sandeen return -EIO; 293c59d87c4SChristoph Hellwig 2940d882a36SAlain Renaud if (type == XFS_IO_UNWRITTEN) 295c59d87c4SChristoph Hellwig bmapi_flags |= XFS_BMAPI_IGSTATE; 296c59d87c4SChristoph Hellwig 297c59d87c4SChristoph Hellwig if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 298c59d87c4SChristoph Hellwig if (nonblocking) 299b474c7aeSEric Sandeen return -EAGAIN; 300c59d87c4SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_SHARED); 301c59d87c4SChristoph Hellwig } 302c59d87c4SChristoph Hellwig 303c59d87c4SChristoph Hellwig ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || 304c59d87c4SChristoph Hellwig (ip->i_df.if_flags & XFS_IFEXTENTS)); 305d2c28191SDave Chinner ASSERT(offset <= mp->m_super->s_maxbytes); 306c59d87c4SChristoph Hellwig 307d2c28191SDave Chinner if (offset + count > mp->m_super->s_maxbytes) 308d2c28191SDave Chinner count = mp->m_super->s_maxbytes - offset; 309c59d87c4SChristoph Hellwig end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 310c59d87c4SChristoph Hellwig offset_fsb = XFS_B_TO_FSBT(mp, offset); 3115c8ed202SDave Chinner error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, 3125c8ed202SDave Chinner imap, &nimaps, bmapi_flags); 313c59d87c4SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 314c59d87c4SChristoph Hellwig 315c59d87c4SChristoph Hellwig if (error) 3162451337dSDave Chinner return error; 317c59d87c4SChristoph Hellwig 3180d882a36SAlain Renaud if (type == XFS_IO_DELALLOC && 319c59d87c4SChristoph Hellwig (!nimaps || isnullstartblock(imap->br_startblock))) { 3200799a3e8SJie Liu error = xfs_iomap_write_allocate(ip, offset, imap); 321c59d87c4SChristoph Hellwig if (!error) 322c59d87c4SChristoph Hellwig trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); 3232451337dSDave Chinner return error; 324c59d87c4SChristoph Hellwig } 325c59d87c4SChristoph Hellwig 326c59d87c4SChristoph Hellwig #ifdef DEBUG 3270d882a36SAlain Renaud if (type == XFS_IO_UNWRITTEN) { 328c59d87c4SChristoph Hellwig ASSERT(nimaps); 329c59d87c4SChristoph Hellwig ASSERT(imap->br_startblock != HOLESTARTBLOCK); 330c59d87c4SChristoph Hellwig ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 331c59d87c4SChristoph Hellwig } 332c59d87c4SChristoph Hellwig #endif 333c59d87c4SChristoph Hellwig if (nimaps) 334c59d87c4SChristoph Hellwig trace_xfs_map_blocks_found(ip, offset, count, type, imap); 335c59d87c4SChristoph Hellwig return 0; 336c59d87c4SChristoph Hellwig } 337c59d87c4SChristoph Hellwig 338c59d87c4SChristoph Hellwig STATIC int 339c59d87c4SChristoph Hellwig xfs_imap_valid( 340c59d87c4SChristoph Hellwig struct inode *inode, 341c59d87c4SChristoph Hellwig struct xfs_bmbt_irec *imap, 342c59d87c4SChristoph Hellwig xfs_off_t offset) 343c59d87c4SChristoph Hellwig { 344c59d87c4SChristoph Hellwig offset >>= inode->i_blkbits; 345c59d87c4SChristoph Hellwig 346c59d87c4SChristoph Hellwig return offset >= imap->br_startoff && 347c59d87c4SChristoph Hellwig offset < imap->br_startoff + imap->br_blockcount; 348c59d87c4SChristoph Hellwig } 349c59d87c4SChristoph Hellwig 350c59d87c4SChristoph Hellwig /* 351c59d87c4SChristoph Hellwig * BIO completion handler for buffered IO. 352c59d87c4SChristoph Hellwig */ 353c59d87c4SChristoph Hellwig STATIC void 354c59d87c4SChristoph Hellwig xfs_end_bio( 355c59d87c4SChristoph Hellwig struct bio *bio, 356c59d87c4SChristoph Hellwig int error) 357c59d87c4SChristoph Hellwig { 358c59d87c4SChristoph Hellwig xfs_ioend_t *ioend = bio->bi_private; 359c59d87c4SChristoph Hellwig 360c59d87c4SChristoph Hellwig ASSERT(atomic_read(&bio->bi_cnt) >= 1); 361c59d87c4SChristoph Hellwig ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; 362c59d87c4SChristoph Hellwig 363c59d87c4SChristoph Hellwig /* Toss bio and pass work off to an xfsdatad thread */ 364c59d87c4SChristoph Hellwig bio->bi_private = NULL; 365c59d87c4SChristoph Hellwig bio->bi_end_io = NULL; 366c59d87c4SChristoph Hellwig bio_put(bio); 367c59d87c4SChristoph Hellwig 368c59d87c4SChristoph Hellwig xfs_finish_ioend(ioend); 369c59d87c4SChristoph Hellwig } 370c59d87c4SChristoph Hellwig 371c59d87c4SChristoph Hellwig STATIC void 372c59d87c4SChristoph Hellwig xfs_submit_ioend_bio( 373c59d87c4SChristoph Hellwig struct writeback_control *wbc, 374c59d87c4SChristoph Hellwig xfs_ioend_t *ioend, 375c59d87c4SChristoph Hellwig struct bio *bio) 376c59d87c4SChristoph Hellwig { 377c59d87c4SChristoph Hellwig atomic_inc(&ioend->io_remaining); 378c59d87c4SChristoph Hellwig bio->bi_private = ioend; 379c59d87c4SChristoph Hellwig bio->bi_end_io = xfs_end_bio; 380c59d87c4SChristoph Hellwig submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); 381c59d87c4SChristoph Hellwig } 382c59d87c4SChristoph Hellwig 383c59d87c4SChristoph Hellwig STATIC struct bio * 384c59d87c4SChristoph Hellwig xfs_alloc_ioend_bio( 385c59d87c4SChristoph Hellwig struct buffer_head *bh) 386c59d87c4SChristoph Hellwig { 387c59d87c4SChristoph Hellwig int nvecs = bio_get_nr_vecs(bh->b_bdev); 388c59d87c4SChristoph Hellwig struct bio *bio = bio_alloc(GFP_NOIO, nvecs); 389c59d87c4SChristoph Hellwig 390c59d87c4SChristoph Hellwig ASSERT(bio->bi_private == NULL); 3914f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 392c59d87c4SChristoph Hellwig bio->bi_bdev = bh->b_bdev; 393c59d87c4SChristoph Hellwig return bio; 394c59d87c4SChristoph Hellwig } 395c59d87c4SChristoph Hellwig 396c59d87c4SChristoph Hellwig STATIC void 397c59d87c4SChristoph Hellwig xfs_start_buffer_writeback( 398c59d87c4SChristoph Hellwig struct buffer_head *bh) 399c59d87c4SChristoph Hellwig { 400c59d87c4SChristoph Hellwig ASSERT(buffer_mapped(bh)); 401c59d87c4SChristoph Hellwig ASSERT(buffer_locked(bh)); 402c59d87c4SChristoph Hellwig ASSERT(!buffer_delay(bh)); 403c59d87c4SChristoph Hellwig ASSERT(!buffer_unwritten(bh)); 404c59d87c4SChristoph Hellwig 405c59d87c4SChristoph Hellwig mark_buffer_async_write(bh); 406c59d87c4SChristoph Hellwig set_buffer_uptodate(bh); 407c59d87c4SChristoph Hellwig clear_buffer_dirty(bh); 408c59d87c4SChristoph Hellwig } 409c59d87c4SChristoph Hellwig 410c59d87c4SChristoph Hellwig STATIC void 411c59d87c4SChristoph Hellwig xfs_start_page_writeback( 412c59d87c4SChristoph Hellwig struct page *page, 413c59d87c4SChristoph Hellwig int clear_dirty, 414c59d87c4SChristoph Hellwig int buffers) 415c59d87c4SChristoph Hellwig { 416c59d87c4SChristoph Hellwig ASSERT(PageLocked(page)); 417c59d87c4SChristoph Hellwig ASSERT(!PageWriteback(page)); 4180d085a52SDave Chinner 4190d085a52SDave Chinner /* 4200d085a52SDave Chinner * if the page was not fully cleaned, we need to ensure that the higher 4210d085a52SDave Chinner * layers come back to it correctly. That means we need to keep the page 4220d085a52SDave Chinner * dirty, and for WB_SYNC_ALL writeback we need to ensure the 4230d085a52SDave Chinner * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to 4240d085a52SDave Chinner * write this page in this writeback sweep will be made. 4250d085a52SDave Chinner */ 4260d085a52SDave Chinner if (clear_dirty) { 427c59d87c4SChristoph Hellwig clear_page_dirty_for_io(page); 428c59d87c4SChristoph Hellwig set_page_writeback(page); 4290d085a52SDave Chinner } else 4300d085a52SDave Chinner set_page_writeback_keepwrite(page); 4310d085a52SDave Chinner 432c59d87c4SChristoph Hellwig unlock_page(page); 4330d085a52SDave Chinner 434c59d87c4SChristoph Hellwig /* If no buffers on the page are to be written, finish it here */ 435c59d87c4SChristoph Hellwig if (!buffers) 436c59d87c4SChristoph Hellwig end_page_writeback(page); 437c59d87c4SChristoph Hellwig } 438c59d87c4SChristoph Hellwig 439c7c1a7d8SZhi Yong Wu static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh) 440c59d87c4SChristoph Hellwig { 441c59d87c4SChristoph Hellwig return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 442c59d87c4SChristoph Hellwig } 443c59d87c4SChristoph Hellwig 444c59d87c4SChristoph Hellwig /* 445c59d87c4SChristoph Hellwig * Submit all of the bios for all of the ioends we have saved up, covering the 446c59d87c4SChristoph Hellwig * initial writepage page and also any probed pages. 447c59d87c4SChristoph Hellwig * 448c59d87c4SChristoph Hellwig * Because we may have multiple ioends spanning a page, we need to start 449c59d87c4SChristoph Hellwig * writeback on all the buffers before we submit them for I/O. If we mark the 450c59d87c4SChristoph Hellwig * buffers as we got, then we can end up with a page that only has buffers 451c59d87c4SChristoph Hellwig * marked async write and I/O complete on can occur before we mark the other 452c59d87c4SChristoph Hellwig * buffers async write. 453c59d87c4SChristoph Hellwig * 454c59d87c4SChristoph Hellwig * The end result of this is that we trip a bug in end_page_writeback() because 455c59d87c4SChristoph Hellwig * we call it twice for the one page as the code in end_buffer_async_write() 456c59d87c4SChristoph Hellwig * assumes that all buffers on the page are started at the same time. 457c59d87c4SChristoph Hellwig * 458c59d87c4SChristoph Hellwig * The fix is two passes across the ioend list - one to start writeback on the 459c59d87c4SChristoph Hellwig * buffer_heads, and then submit them for I/O on the second pass. 4607bf7f352SDave Chinner * 4617bf7f352SDave Chinner * If @fail is non-zero, it means that we have a situation where some part of 4627bf7f352SDave Chinner * the submission process has failed after we have marked paged for writeback 4637bf7f352SDave Chinner * and unlocked them. In this situation, we need to fail the ioend chain rather 4647bf7f352SDave Chinner * than submit it to IO. This typically only happens on a filesystem shutdown. 465c59d87c4SChristoph Hellwig */ 466c59d87c4SChristoph Hellwig STATIC void 467c59d87c4SChristoph Hellwig xfs_submit_ioend( 468c59d87c4SChristoph Hellwig struct writeback_control *wbc, 4697bf7f352SDave Chinner xfs_ioend_t *ioend, 4707bf7f352SDave Chinner int fail) 471c59d87c4SChristoph Hellwig { 472c59d87c4SChristoph Hellwig xfs_ioend_t *head = ioend; 473c59d87c4SChristoph Hellwig xfs_ioend_t *next; 474c59d87c4SChristoph Hellwig struct buffer_head *bh; 475c59d87c4SChristoph Hellwig struct bio *bio; 476c59d87c4SChristoph Hellwig sector_t lastblock = 0; 477c59d87c4SChristoph Hellwig 478c59d87c4SChristoph Hellwig /* Pass 1 - start writeback */ 479c59d87c4SChristoph Hellwig do { 480c59d87c4SChristoph Hellwig next = ioend->io_list; 481c59d87c4SChristoph Hellwig for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) 482c59d87c4SChristoph Hellwig xfs_start_buffer_writeback(bh); 483c59d87c4SChristoph Hellwig } while ((ioend = next) != NULL); 484c59d87c4SChristoph Hellwig 485c59d87c4SChristoph Hellwig /* Pass 2 - submit I/O */ 486c59d87c4SChristoph Hellwig ioend = head; 487c59d87c4SChristoph Hellwig do { 488c59d87c4SChristoph Hellwig next = ioend->io_list; 489c59d87c4SChristoph Hellwig bio = NULL; 490c59d87c4SChristoph Hellwig 4917bf7f352SDave Chinner /* 4927bf7f352SDave Chinner * If we are failing the IO now, just mark the ioend with an 4937bf7f352SDave Chinner * error and finish it. This will run IO completion immediately 4947bf7f352SDave Chinner * as there is only one reference to the ioend at this point in 4957bf7f352SDave Chinner * time. 4967bf7f352SDave Chinner */ 4977bf7f352SDave Chinner if (fail) { 4982451337dSDave Chinner ioend->io_error = fail; 4997bf7f352SDave Chinner xfs_finish_ioend(ioend); 5007bf7f352SDave Chinner continue; 5017bf7f352SDave Chinner } 5027bf7f352SDave Chinner 503c59d87c4SChristoph Hellwig for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 504c59d87c4SChristoph Hellwig 505c59d87c4SChristoph Hellwig if (!bio) { 506c59d87c4SChristoph Hellwig retry: 507c59d87c4SChristoph Hellwig bio = xfs_alloc_ioend_bio(bh); 508c59d87c4SChristoph Hellwig } else if (bh->b_blocknr != lastblock + 1) { 509c59d87c4SChristoph Hellwig xfs_submit_ioend_bio(wbc, ioend, bio); 510c59d87c4SChristoph Hellwig goto retry; 511c59d87c4SChristoph Hellwig } 512c59d87c4SChristoph Hellwig 513c7c1a7d8SZhi Yong Wu if (xfs_bio_add_buffer(bio, bh) != bh->b_size) { 514c59d87c4SChristoph Hellwig xfs_submit_ioend_bio(wbc, ioend, bio); 515c59d87c4SChristoph Hellwig goto retry; 516c59d87c4SChristoph Hellwig } 517c59d87c4SChristoph Hellwig 518c59d87c4SChristoph Hellwig lastblock = bh->b_blocknr; 519c59d87c4SChristoph Hellwig } 520c59d87c4SChristoph Hellwig if (bio) 521c59d87c4SChristoph Hellwig xfs_submit_ioend_bio(wbc, ioend, bio); 522c59d87c4SChristoph Hellwig xfs_finish_ioend(ioend); 523c59d87c4SChristoph Hellwig } while ((ioend = next) != NULL); 524c59d87c4SChristoph Hellwig } 525c59d87c4SChristoph Hellwig 526c59d87c4SChristoph Hellwig /* 527c59d87c4SChristoph Hellwig * Cancel submission of all buffer_heads so far in this endio. 528c59d87c4SChristoph Hellwig * Toss the endio too. Only ever called for the initial page 529c59d87c4SChristoph Hellwig * in a writepage request, so only ever one page. 530c59d87c4SChristoph Hellwig */ 531c59d87c4SChristoph Hellwig STATIC void 532c59d87c4SChristoph Hellwig xfs_cancel_ioend( 533c59d87c4SChristoph Hellwig xfs_ioend_t *ioend) 534c59d87c4SChristoph Hellwig { 535c59d87c4SChristoph Hellwig xfs_ioend_t *next; 536c59d87c4SChristoph Hellwig struct buffer_head *bh, *next_bh; 537c59d87c4SChristoph Hellwig 538c59d87c4SChristoph Hellwig do { 539c59d87c4SChristoph Hellwig next = ioend->io_list; 540c59d87c4SChristoph Hellwig bh = ioend->io_buffer_head; 541c59d87c4SChristoph Hellwig do { 542c59d87c4SChristoph Hellwig next_bh = bh->b_private; 543c59d87c4SChristoph Hellwig clear_buffer_async_write(bh); 54407d08681SBrian Foster /* 54507d08681SBrian Foster * The unwritten flag is cleared when added to the 54607d08681SBrian Foster * ioend. We're not submitting for I/O so mark the 54707d08681SBrian Foster * buffer unwritten again for next time around. 54807d08681SBrian Foster */ 54907d08681SBrian Foster if (ioend->io_type == XFS_IO_UNWRITTEN) 55007d08681SBrian Foster set_buffer_unwritten(bh); 551c59d87c4SChristoph Hellwig unlock_buffer(bh); 552c59d87c4SChristoph Hellwig } while ((bh = next_bh) != NULL); 553c59d87c4SChristoph Hellwig 554c59d87c4SChristoph Hellwig mempool_free(ioend, xfs_ioend_pool); 555c59d87c4SChristoph Hellwig } while ((ioend = next) != NULL); 556c59d87c4SChristoph Hellwig } 557c59d87c4SChristoph Hellwig 558c59d87c4SChristoph Hellwig /* 559c59d87c4SChristoph Hellwig * Test to see if we've been building up a completion structure for 560c59d87c4SChristoph Hellwig * earlier buffers -- if so, we try to append to this ioend if we 561c59d87c4SChristoph Hellwig * can, otherwise we finish off any current ioend and start another. 562c59d87c4SChristoph Hellwig * Return true if we've finished the given ioend. 563c59d87c4SChristoph Hellwig */ 564c59d87c4SChristoph Hellwig STATIC void 565c59d87c4SChristoph Hellwig xfs_add_to_ioend( 566c59d87c4SChristoph Hellwig struct inode *inode, 567c59d87c4SChristoph Hellwig struct buffer_head *bh, 568c59d87c4SChristoph Hellwig xfs_off_t offset, 569c59d87c4SChristoph Hellwig unsigned int type, 570c59d87c4SChristoph Hellwig xfs_ioend_t **result, 571c59d87c4SChristoph Hellwig int need_ioend) 572c59d87c4SChristoph Hellwig { 573c59d87c4SChristoph Hellwig xfs_ioend_t *ioend = *result; 574c59d87c4SChristoph Hellwig 575c59d87c4SChristoph Hellwig if (!ioend || need_ioend || type != ioend->io_type) { 576c59d87c4SChristoph Hellwig xfs_ioend_t *previous = *result; 577c59d87c4SChristoph Hellwig 578c59d87c4SChristoph Hellwig ioend = xfs_alloc_ioend(inode, type); 579c59d87c4SChristoph Hellwig ioend->io_offset = offset; 580c59d87c4SChristoph Hellwig ioend->io_buffer_head = bh; 581c59d87c4SChristoph Hellwig ioend->io_buffer_tail = bh; 582c59d87c4SChristoph Hellwig if (previous) 583c59d87c4SChristoph Hellwig previous->io_list = ioend; 584c59d87c4SChristoph Hellwig *result = ioend; 585c59d87c4SChristoph Hellwig } else { 586c59d87c4SChristoph Hellwig ioend->io_buffer_tail->b_private = bh; 587c59d87c4SChristoph Hellwig ioend->io_buffer_tail = bh; 588c59d87c4SChristoph Hellwig } 589c59d87c4SChristoph Hellwig 590c59d87c4SChristoph Hellwig bh->b_private = NULL; 591c59d87c4SChristoph Hellwig ioend->io_size += bh->b_size; 592c59d87c4SChristoph Hellwig } 593c59d87c4SChristoph Hellwig 594c59d87c4SChristoph Hellwig STATIC void 595c59d87c4SChristoph Hellwig xfs_map_buffer( 596c59d87c4SChristoph Hellwig struct inode *inode, 597c59d87c4SChristoph Hellwig struct buffer_head *bh, 598c59d87c4SChristoph Hellwig struct xfs_bmbt_irec *imap, 599c59d87c4SChristoph Hellwig xfs_off_t offset) 600c59d87c4SChristoph Hellwig { 601c59d87c4SChristoph Hellwig sector_t bn; 602c59d87c4SChristoph Hellwig struct xfs_mount *m = XFS_I(inode)->i_mount; 603c59d87c4SChristoph Hellwig xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); 604c59d87c4SChristoph Hellwig xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); 605c59d87c4SChristoph Hellwig 606c59d87c4SChristoph Hellwig ASSERT(imap->br_startblock != HOLESTARTBLOCK); 607c59d87c4SChristoph Hellwig ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 608c59d87c4SChristoph Hellwig 609c59d87c4SChristoph Hellwig bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + 610c59d87c4SChristoph Hellwig ((offset - iomap_offset) >> inode->i_blkbits); 611c59d87c4SChristoph Hellwig 612c59d87c4SChristoph Hellwig ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); 613c59d87c4SChristoph Hellwig 614c59d87c4SChristoph Hellwig bh->b_blocknr = bn; 615c59d87c4SChristoph Hellwig set_buffer_mapped(bh); 616c59d87c4SChristoph Hellwig } 617c59d87c4SChristoph Hellwig 618c59d87c4SChristoph Hellwig STATIC void 619c59d87c4SChristoph Hellwig xfs_map_at_offset( 620c59d87c4SChristoph Hellwig struct inode *inode, 621c59d87c4SChristoph Hellwig struct buffer_head *bh, 622c59d87c4SChristoph Hellwig struct xfs_bmbt_irec *imap, 623c59d87c4SChristoph Hellwig xfs_off_t offset) 624c59d87c4SChristoph Hellwig { 625c59d87c4SChristoph Hellwig ASSERT(imap->br_startblock != HOLESTARTBLOCK); 626c59d87c4SChristoph Hellwig ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 627c59d87c4SChristoph Hellwig 628c59d87c4SChristoph Hellwig xfs_map_buffer(inode, bh, imap, offset); 629c59d87c4SChristoph Hellwig set_buffer_mapped(bh); 630c59d87c4SChristoph Hellwig clear_buffer_delay(bh); 631c59d87c4SChristoph Hellwig clear_buffer_unwritten(bh); 632c59d87c4SChristoph Hellwig } 633c59d87c4SChristoph Hellwig 634c59d87c4SChristoph Hellwig /* 635a49935f2SDave Chinner * Test if a given page contains at least one buffer of a given @type. 636a49935f2SDave Chinner * If @check_all_buffers is true, then we walk all the buffers in the page to 637a49935f2SDave Chinner * try to find one of the type passed in. If it is not set, then the caller only 638a49935f2SDave Chinner * needs to check the first buffer on the page for a match. 639c59d87c4SChristoph Hellwig */ 640a49935f2SDave Chinner STATIC bool 6416ffc4db5SDave Chinner xfs_check_page_type( 642c59d87c4SChristoph Hellwig struct page *page, 643a49935f2SDave Chinner unsigned int type, 644a49935f2SDave Chinner bool check_all_buffers) 645c59d87c4SChristoph Hellwig { 646a49935f2SDave Chinner struct buffer_head *bh; 647a49935f2SDave Chinner struct buffer_head *head; 648c59d87c4SChristoph Hellwig 649a49935f2SDave Chinner if (PageWriteback(page)) 650a49935f2SDave Chinner return false; 651a49935f2SDave Chinner if (!page->mapping) 652a49935f2SDave Chinner return false; 653a49935f2SDave Chinner if (!page_has_buffers(page)) 654a49935f2SDave Chinner return false; 655c59d87c4SChristoph Hellwig 656c59d87c4SChristoph Hellwig bh = head = page_buffers(page); 657c59d87c4SChristoph Hellwig do { 658a49935f2SDave Chinner if (buffer_unwritten(bh)) { 659a49935f2SDave Chinner if (type == XFS_IO_UNWRITTEN) 660a49935f2SDave Chinner return true; 661a49935f2SDave Chinner } else if (buffer_delay(bh)) { 662805eeb8eSDan Carpenter if (type == XFS_IO_DELALLOC) 663a49935f2SDave Chinner return true; 664a49935f2SDave Chinner } else if (buffer_dirty(bh) && buffer_mapped(bh)) { 665805eeb8eSDan Carpenter if (type == XFS_IO_OVERWRITE) 666a49935f2SDave Chinner return true; 667a49935f2SDave Chinner } 668a49935f2SDave Chinner 669a49935f2SDave Chinner /* If we are only checking the first buffer, we are done now. */ 670a49935f2SDave Chinner if (!check_all_buffers) 671c59d87c4SChristoph Hellwig break; 672c59d87c4SChristoph Hellwig } while ((bh = bh->b_this_page) != head); 673c59d87c4SChristoph Hellwig 674a49935f2SDave Chinner return false; 675c59d87c4SChristoph Hellwig } 676c59d87c4SChristoph Hellwig 677c59d87c4SChristoph Hellwig /* 678c59d87c4SChristoph Hellwig * Allocate & map buffers for page given the extent map. Write it out. 679c59d87c4SChristoph Hellwig * except for the original page of a writepage, this is called on 680c59d87c4SChristoph Hellwig * delalloc/unwritten pages only, for the original page it is possible 681c59d87c4SChristoph Hellwig * that the page has no mapping at all. 682c59d87c4SChristoph Hellwig */ 683c59d87c4SChristoph Hellwig STATIC int 684c59d87c4SChristoph Hellwig xfs_convert_page( 685c59d87c4SChristoph Hellwig struct inode *inode, 686c59d87c4SChristoph Hellwig struct page *page, 687c59d87c4SChristoph Hellwig loff_t tindex, 688c59d87c4SChristoph Hellwig struct xfs_bmbt_irec *imap, 689c59d87c4SChristoph Hellwig xfs_ioend_t **ioendp, 690c59d87c4SChristoph Hellwig struct writeback_control *wbc) 691c59d87c4SChristoph Hellwig { 692c59d87c4SChristoph Hellwig struct buffer_head *bh, *head; 693c59d87c4SChristoph Hellwig xfs_off_t end_offset; 694c59d87c4SChristoph Hellwig unsigned long p_offset; 695c59d87c4SChristoph Hellwig unsigned int type; 696c59d87c4SChristoph Hellwig int len, page_dirty; 697c59d87c4SChristoph Hellwig int count = 0, done = 0, uptodate = 1; 698c59d87c4SChristoph Hellwig xfs_off_t offset = page_offset(page); 699c59d87c4SChristoph Hellwig 700c59d87c4SChristoph Hellwig if (page->index != tindex) 701c59d87c4SChristoph Hellwig goto fail; 702c59d87c4SChristoph Hellwig if (!trylock_page(page)) 703c59d87c4SChristoph Hellwig goto fail; 704c59d87c4SChristoph Hellwig if (PageWriteback(page)) 705c59d87c4SChristoph Hellwig goto fail_unlock_page; 706c59d87c4SChristoph Hellwig if (page->mapping != inode->i_mapping) 707c59d87c4SChristoph Hellwig goto fail_unlock_page; 708a49935f2SDave Chinner if (!xfs_check_page_type(page, (*ioendp)->io_type, false)) 709c59d87c4SChristoph Hellwig goto fail_unlock_page; 710c59d87c4SChristoph Hellwig 711c59d87c4SChristoph Hellwig /* 712c59d87c4SChristoph Hellwig * page_dirty is initially a count of buffers on the page before 713c59d87c4SChristoph Hellwig * EOF and is decremented as we move each into a cleanable state. 714c59d87c4SChristoph Hellwig * 715c59d87c4SChristoph Hellwig * Derivation: 716c59d87c4SChristoph Hellwig * 717c59d87c4SChristoph Hellwig * End offset is the highest offset that this page should represent. 718c59d87c4SChristoph Hellwig * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) 719c59d87c4SChristoph Hellwig * will evaluate non-zero and be less than PAGE_CACHE_SIZE and 720c59d87c4SChristoph Hellwig * hence give us the correct page_dirty count. On any other page, 721c59d87c4SChristoph Hellwig * it will be zero and in that case we need page_dirty to be the 722c59d87c4SChristoph Hellwig * count of buffers on the page. 723c59d87c4SChristoph Hellwig */ 724c59d87c4SChristoph Hellwig end_offset = min_t(unsigned long long, 725c59d87c4SChristoph Hellwig (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 726c59d87c4SChristoph Hellwig i_size_read(inode)); 727c59d87c4SChristoph Hellwig 728480d7467SDave Chinner /* 729480d7467SDave Chinner * If the current map does not span the entire page we are about to try 730480d7467SDave Chinner * to write, then give up. The only way we can write a page that spans 731480d7467SDave Chinner * multiple mappings in a single writeback iteration is via the 732480d7467SDave Chinner * xfs_vm_writepage() function. Data integrity writeback requires the 733480d7467SDave Chinner * entire page to be written in a single attempt, otherwise the part of 734480d7467SDave Chinner * the page we don't write here doesn't get written as part of the data 735480d7467SDave Chinner * integrity sync. 736480d7467SDave Chinner * 737480d7467SDave Chinner * For normal writeback, we also don't attempt to write partial pages 738480d7467SDave Chinner * here as it simply means that write_cache_pages() will see it under 739480d7467SDave Chinner * writeback and ignore the page until some point in the future, at 740480d7467SDave Chinner * which time this will be the only page in the file that needs 741480d7467SDave Chinner * writeback. Hence for more optimal IO patterns, we should always 742480d7467SDave Chinner * avoid partial page writeback due to multiple mappings on a page here. 743480d7467SDave Chinner */ 744480d7467SDave Chinner if (!xfs_imap_valid(inode, imap, end_offset)) 745480d7467SDave Chinner goto fail_unlock_page; 746480d7467SDave Chinner 747c59d87c4SChristoph Hellwig len = 1 << inode->i_blkbits; 748c59d87c4SChristoph Hellwig p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), 749c59d87c4SChristoph Hellwig PAGE_CACHE_SIZE); 750c59d87c4SChristoph Hellwig p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 751c59d87c4SChristoph Hellwig page_dirty = p_offset / len; 752c59d87c4SChristoph Hellwig 753a49935f2SDave Chinner /* 754a49935f2SDave Chinner * The moment we find a buffer that doesn't match our current type 755a49935f2SDave Chinner * specification or can't be written, abort the loop and start 756a49935f2SDave Chinner * writeback. As per the above xfs_imap_valid() check, only 757a49935f2SDave Chinner * xfs_vm_writepage() can handle partial page writeback fully - we are 758a49935f2SDave Chinner * limited here to the buffers that are contiguous with the current 759a49935f2SDave Chinner * ioend, and hence a buffer we can't write breaks that contiguity and 760a49935f2SDave Chinner * we have to defer the rest of the IO to xfs_vm_writepage(). 761a49935f2SDave Chinner */ 762c59d87c4SChristoph Hellwig bh = head = page_buffers(page); 763c59d87c4SChristoph Hellwig do { 764c59d87c4SChristoph Hellwig if (offset >= end_offset) 765c59d87c4SChristoph Hellwig break; 766c59d87c4SChristoph Hellwig if (!buffer_uptodate(bh)) 767c59d87c4SChristoph Hellwig uptodate = 0; 768c59d87c4SChristoph Hellwig if (!(PageUptodate(page) || buffer_uptodate(bh))) { 769c59d87c4SChristoph Hellwig done = 1; 770a49935f2SDave Chinner break; 771c59d87c4SChristoph Hellwig } 772c59d87c4SChristoph Hellwig 773c59d87c4SChristoph Hellwig if (buffer_unwritten(bh) || buffer_delay(bh) || 774c59d87c4SChristoph Hellwig buffer_mapped(bh)) { 775c59d87c4SChristoph Hellwig if (buffer_unwritten(bh)) 7760d882a36SAlain Renaud type = XFS_IO_UNWRITTEN; 777c59d87c4SChristoph Hellwig else if (buffer_delay(bh)) 7780d882a36SAlain Renaud type = XFS_IO_DELALLOC; 779c59d87c4SChristoph Hellwig else 7800d882a36SAlain Renaud type = XFS_IO_OVERWRITE; 781c59d87c4SChristoph Hellwig 782a49935f2SDave Chinner /* 783a49935f2SDave Chinner * imap should always be valid because of the above 784a49935f2SDave Chinner * partial page end_offset check on the imap. 785a49935f2SDave Chinner */ 786a49935f2SDave Chinner ASSERT(xfs_imap_valid(inode, imap, offset)); 787c59d87c4SChristoph Hellwig 788c59d87c4SChristoph Hellwig lock_buffer(bh); 7890d882a36SAlain Renaud if (type != XFS_IO_OVERWRITE) 790c59d87c4SChristoph Hellwig xfs_map_at_offset(inode, bh, imap, offset); 791c59d87c4SChristoph Hellwig xfs_add_to_ioend(inode, bh, offset, type, 792c59d87c4SChristoph Hellwig ioendp, done); 793c59d87c4SChristoph Hellwig 794c59d87c4SChristoph Hellwig page_dirty--; 795c59d87c4SChristoph Hellwig count++; 796c59d87c4SChristoph Hellwig } else { 797c59d87c4SChristoph Hellwig done = 1; 798a49935f2SDave Chinner break; 799c59d87c4SChristoph Hellwig } 800c59d87c4SChristoph Hellwig } while (offset += len, (bh = bh->b_this_page) != head); 801c59d87c4SChristoph Hellwig 802c59d87c4SChristoph Hellwig if (uptodate && bh == head) 803c59d87c4SChristoph Hellwig SetPageUptodate(page); 804c59d87c4SChristoph Hellwig 805c59d87c4SChristoph Hellwig if (count) { 806c59d87c4SChristoph Hellwig if (--wbc->nr_to_write <= 0 && 807c59d87c4SChristoph Hellwig wbc->sync_mode == WB_SYNC_NONE) 808c59d87c4SChristoph Hellwig done = 1; 809c59d87c4SChristoph Hellwig } 810c59d87c4SChristoph Hellwig xfs_start_page_writeback(page, !page_dirty, count); 811c59d87c4SChristoph Hellwig 812c59d87c4SChristoph Hellwig return done; 813c59d87c4SChristoph Hellwig fail_unlock_page: 814c59d87c4SChristoph Hellwig unlock_page(page); 815c59d87c4SChristoph Hellwig fail: 816c59d87c4SChristoph Hellwig return 1; 817c59d87c4SChristoph Hellwig } 818c59d87c4SChristoph Hellwig 819c59d87c4SChristoph Hellwig /* 820c59d87c4SChristoph Hellwig * Convert & write out a cluster of pages in the same extent as defined 821c59d87c4SChristoph Hellwig * by mp and following the start page. 822c59d87c4SChristoph Hellwig */ 823c59d87c4SChristoph Hellwig STATIC void 824c59d87c4SChristoph Hellwig xfs_cluster_write( 825c59d87c4SChristoph Hellwig struct inode *inode, 826c59d87c4SChristoph Hellwig pgoff_t tindex, 827c59d87c4SChristoph Hellwig struct xfs_bmbt_irec *imap, 828c59d87c4SChristoph Hellwig xfs_ioend_t **ioendp, 829c59d87c4SChristoph Hellwig struct writeback_control *wbc, 830c59d87c4SChristoph Hellwig pgoff_t tlast) 831c59d87c4SChristoph Hellwig { 832c59d87c4SChristoph Hellwig struct pagevec pvec; 833c59d87c4SChristoph Hellwig int done = 0, i; 834c59d87c4SChristoph Hellwig 835c59d87c4SChristoph Hellwig pagevec_init(&pvec, 0); 836c59d87c4SChristoph Hellwig while (!done && tindex <= tlast) { 837c59d87c4SChristoph Hellwig unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 838c59d87c4SChristoph Hellwig 839c59d87c4SChristoph Hellwig if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 840c59d87c4SChristoph Hellwig break; 841c59d87c4SChristoph Hellwig 842c59d87c4SChristoph Hellwig for (i = 0; i < pagevec_count(&pvec); i++) { 843c59d87c4SChristoph Hellwig done = xfs_convert_page(inode, pvec.pages[i], tindex++, 844c59d87c4SChristoph Hellwig imap, ioendp, wbc); 845c59d87c4SChristoph Hellwig if (done) 846c59d87c4SChristoph Hellwig break; 847c59d87c4SChristoph Hellwig } 848c59d87c4SChristoph Hellwig 849c59d87c4SChristoph Hellwig pagevec_release(&pvec); 850c59d87c4SChristoph Hellwig cond_resched(); 851c59d87c4SChristoph Hellwig } 852c59d87c4SChristoph Hellwig } 853c59d87c4SChristoph Hellwig 854c59d87c4SChristoph Hellwig STATIC void 855c59d87c4SChristoph Hellwig xfs_vm_invalidatepage( 856c59d87c4SChristoph Hellwig struct page *page, 857d47992f8SLukas Czerner unsigned int offset, 858d47992f8SLukas Czerner unsigned int length) 859c59d87c4SChristoph Hellwig { 86034097dfeSLukas Czerner trace_xfs_invalidatepage(page->mapping->host, page, offset, 86134097dfeSLukas Czerner length); 86234097dfeSLukas Czerner block_invalidatepage(page, offset, length); 863c59d87c4SChristoph Hellwig } 864c59d87c4SChristoph Hellwig 865c59d87c4SChristoph Hellwig /* 866c59d87c4SChristoph Hellwig * If the page has delalloc buffers on it, we need to punch them out before we 867c59d87c4SChristoph Hellwig * invalidate the page. If we don't, we leave a stale delalloc mapping on the 868c59d87c4SChristoph Hellwig * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read 869c59d87c4SChristoph Hellwig * is done on that same region - the delalloc extent is returned when none is 870c59d87c4SChristoph Hellwig * supposed to be there. 871c59d87c4SChristoph Hellwig * 872c59d87c4SChristoph Hellwig * We prevent this by truncating away the delalloc regions on the page before 873c59d87c4SChristoph Hellwig * invalidating it. Because they are delalloc, we can do this without needing a 874c59d87c4SChristoph Hellwig * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this 875c59d87c4SChristoph Hellwig * truncation without a transaction as there is no space left for block 876c59d87c4SChristoph Hellwig * reservation (typically why we see a ENOSPC in writeback). 877c59d87c4SChristoph Hellwig * 878c59d87c4SChristoph Hellwig * This is not a performance critical path, so for now just do the punching a 879c59d87c4SChristoph Hellwig * buffer head at a time. 880c59d87c4SChristoph Hellwig */ 881c59d87c4SChristoph Hellwig STATIC void 882c59d87c4SChristoph Hellwig xfs_aops_discard_page( 883c59d87c4SChristoph Hellwig struct page *page) 884c59d87c4SChristoph Hellwig { 885c59d87c4SChristoph Hellwig struct inode *inode = page->mapping->host; 886c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 887c59d87c4SChristoph Hellwig struct buffer_head *bh, *head; 888c59d87c4SChristoph Hellwig loff_t offset = page_offset(page); 889c59d87c4SChristoph Hellwig 890a49935f2SDave Chinner if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true)) 891c59d87c4SChristoph Hellwig goto out_invalidate; 892c59d87c4SChristoph Hellwig 893c59d87c4SChristoph Hellwig if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 894c59d87c4SChristoph Hellwig goto out_invalidate; 895c59d87c4SChristoph Hellwig 896c59d87c4SChristoph Hellwig xfs_alert(ip->i_mount, 897c59d87c4SChristoph Hellwig "page discard on page %p, inode 0x%llx, offset %llu.", 898c59d87c4SChristoph Hellwig page, ip->i_ino, offset); 899c59d87c4SChristoph Hellwig 900c59d87c4SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_EXCL); 901c59d87c4SChristoph Hellwig bh = head = page_buffers(page); 902c59d87c4SChristoph Hellwig do { 903c59d87c4SChristoph Hellwig int error; 904c59d87c4SChristoph Hellwig xfs_fileoff_t start_fsb; 905c59d87c4SChristoph Hellwig 906c59d87c4SChristoph Hellwig if (!buffer_delay(bh)) 907c59d87c4SChristoph Hellwig goto next_buffer; 908c59d87c4SChristoph Hellwig 909c59d87c4SChristoph Hellwig start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 910c59d87c4SChristoph Hellwig error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); 911c59d87c4SChristoph Hellwig if (error) { 912c59d87c4SChristoph Hellwig /* something screwed, just bail */ 913c59d87c4SChristoph Hellwig if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 914c59d87c4SChristoph Hellwig xfs_alert(ip->i_mount, 915c59d87c4SChristoph Hellwig "page discard unable to remove delalloc mapping."); 916c59d87c4SChristoph Hellwig } 917c59d87c4SChristoph Hellwig break; 918c59d87c4SChristoph Hellwig } 919c59d87c4SChristoph Hellwig next_buffer: 920c59d87c4SChristoph Hellwig offset += 1 << inode->i_blkbits; 921c59d87c4SChristoph Hellwig 922c59d87c4SChristoph Hellwig } while ((bh = bh->b_this_page) != head); 923c59d87c4SChristoph Hellwig 924c59d87c4SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL); 925c59d87c4SChristoph Hellwig out_invalidate: 926d47992f8SLukas Czerner xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); 927c59d87c4SChristoph Hellwig return; 928c59d87c4SChristoph Hellwig } 929c59d87c4SChristoph Hellwig 930c59d87c4SChristoph Hellwig /* 931c59d87c4SChristoph Hellwig * Write out a dirty page. 932c59d87c4SChristoph Hellwig * 933c59d87c4SChristoph Hellwig * For delalloc space on the page we need to allocate space and flush it. 934c59d87c4SChristoph Hellwig * For unwritten space on the page we need to start the conversion to 935c59d87c4SChristoph Hellwig * regular allocated space. 936c59d87c4SChristoph Hellwig * For any other dirty buffer heads on the page we should flush them. 937c59d87c4SChristoph Hellwig */ 938c59d87c4SChristoph Hellwig STATIC int 939c59d87c4SChristoph Hellwig xfs_vm_writepage( 940c59d87c4SChristoph Hellwig struct page *page, 941c59d87c4SChristoph Hellwig struct writeback_control *wbc) 942c59d87c4SChristoph Hellwig { 943c59d87c4SChristoph Hellwig struct inode *inode = page->mapping->host; 944c59d87c4SChristoph Hellwig struct buffer_head *bh, *head; 945c59d87c4SChristoph Hellwig struct xfs_bmbt_irec imap; 946c59d87c4SChristoph Hellwig xfs_ioend_t *ioend = NULL, *iohead = NULL; 947c59d87c4SChristoph Hellwig loff_t offset; 948c59d87c4SChristoph Hellwig unsigned int type; 949c59d87c4SChristoph Hellwig __uint64_t end_offset; 950c59d87c4SChristoph Hellwig pgoff_t end_index, last_index; 951c59d87c4SChristoph Hellwig ssize_t len; 952c59d87c4SChristoph Hellwig int err, imap_valid = 0, uptodate = 1; 953c59d87c4SChristoph Hellwig int count = 0; 954c59d87c4SChristoph Hellwig int nonblocking = 0; 955c59d87c4SChristoph Hellwig 95634097dfeSLukas Czerner trace_xfs_writepage(inode, page, 0, 0); 957c59d87c4SChristoph Hellwig 958c59d87c4SChristoph Hellwig ASSERT(page_has_buffers(page)); 959c59d87c4SChristoph Hellwig 960c59d87c4SChristoph Hellwig /* 961c59d87c4SChristoph Hellwig * Refuse to write the page out if we are called from reclaim context. 962c59d87c4SChristoph Hellwig * 963c59d87c4SChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 964c59d87c4SChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 965c59d87c4SChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 966c59d87c4SChristoph Hellwig * 96794054fa3SMel Gorman * This should never happen except in the case of a VM regression so 96894054fa3SMel Gorman * warn about it. 969c59d87c4SChristoph Hellwig */ 97094054fa3SMel Gorman if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 97194054fa3SMel Gorman PF_MEMALLOC)) 972c59d87c4SChristoph Hellwig goto redirty; 973c59d87c4SChristoph Hellwig 974c59d87c4SChristoph Hellwig /* 975c59d87c4SChristoph Hellwig * Given that we do not allow direct reclaim to call us, we should 976c59d87c4SChristoph Hellwig * never be called while in a filesystem transaction. 977c59d87c4SChristoph Hellwig */ 978448011e2SChristoph Hellwig if (WARN_ON_ONCE(current->flags & PF_FSTRANS)) 979c59d87c4SChristoph Hellwig goto redirty; 980c59d87c4SChristoph Hellwig 981c59d87c4SChristoph Hellwig /* Is this page beyond the end of the file? */ 982c59d87c4SChristoph Hellwig offset = i_size_read(inode); 983c59d87c4SChristoph Hellwig end_index = offset >> PAGE_CACHE_SHIFT; 984c59d87c4SChristoph Hellwig last_index = (offset - 1) >> PAGE_CACHE_SHIFT; 9858695d27eSJie Liu 9868695d27eSJie Liu /* 9878695d27eSJie Liu * The page index is less than the end_index, adjust the end_offset 9888695d27eSJie Liu * to the highest offset that this page should represent. 9898695d27eSJie Liu * ----------------------------------------------------- 9908695d27eSJie Liu * | file mapping | <EOF> | 9918695d27eSJie Liu * ----------------------------------------------------- 9928695d27eSJie Liu * | Page ... | Page N-2 | Page N-1 | Page N | | 9938695d27eSJie Liu * ^--------------------------------^----------|-------- 9948695d27eSJie Liu * | desired writeback range | see else | 9958695d27eSJie Liu * ---------------------------------^------------------| 9968695d27eSJie Liu */ 9978695d27eSJie Liu if (page->index < end_index) 9988695d27eSJie Liu end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; 9998695d27eSJie Liu else { 10008695d27eSJie Liu /* 10018695d27eSJie Liu * Check whether the page to write out is beyond or straddles 10028695d27eSJie Liu * i_size or not. 10038695d27eSJie Liu * ------------------------------------------------------- 10048695d27eSJie Liu * | file mapping | <EOF> | 10058695d27eSJie Liu * ------------------------------------------------------- 10068695d27eSJie Liu * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 10078695d27eSJie Liu * ^--------------------------------^-----------|--------- 10088695d27eSJie Liu * | | Straddles | 10098695d27eSJie Liu * ---------------------------------^-----------|--------| 10108695d27eSJie Liu */ 10116b7a03f0SChristoph Hellwig unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); 10126b7a03f0SChristoph Hellwig 10136b7a03f0SChristoph Hellwig /* 1014ff9a28f6SJan Kara * Skip the page if it is fully outside i_size, e.g. due to a 1015ff9a28f6SJan Kara * truncate operation that is in progress. We must redirty the 1016ff9a28f6SJan Kara * page so that reclaim stops reclaiming it. Otherwise 1017ff9a28f6SJan Kara * xfs_vm_releasepage() is called on it and gets confused. 10188695d27eSJie Liu * 10198695d27eSJie Liu * Note that the end_index is unsigned long, it would overflow 10208695d27eSJie Liu * if the given offset is greater than 16TB on 32-bit system 10218695d27eSJie Liu * and if we do check the page is fully outside i_size or not 10228695d27eSJie Liu * via "if (page->index >= end_index + 1)" as "end_index + 1" 10238695d27eSJie Liu * will be evaluated to 0. Hence this page will be redirtied 10248695d27eSJie Liu * and be written out repeatedly which would result in an 10258695d27eSJie Liu * infinite loop, the user program that perform this operation 10268695d27eSJie Liu * will hang. Instead, we can verify this situation by checking 10278695d27eSJie Liu * if the page to write is totally beyond the i_size or if it's 10288695d27eSJie Liu * offset is just equal to the EOF. 10296b7a03f0SChristoph Hellwig */ 10308695d27eSJie Liu if (page->index > end_index || 10318695d27eSJie Liu (page->index == end_index && offset_into_page == 0)) 1032ff9a28f6SJan Kara goto redirty; 10336b7a03f0SChristoph Hellwig 10346b7a03f0SChristoph Hellwig /* 10356b7a03f0SChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 10366b7a03f0SChristoph Hellwig * and every writepage invocation because it may be mmapped. 10376b7a03f0SChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 10386b7a03f0SChristoph Hellwig * that is not a multiple of the page size, the remaining 10396b7a03f0SChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 10406b7a03f0SChristoph Hellwig * not written out to the file." 10416b7a03f0SChristoph Hellwig */ 10426b7a03f0SChristoph Hellwig zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); 10438695d27eSJie Liu 10448695d27eSJie Liu /* Adjust the end_offset to the end of file */ 10458695d27eSJie Liu end_offset = offset; 1046c59d87c4SChristoph Hellwig } 1047c59d87c4SChristoph Hellwig 1048c59d87c4SChristoph Hellwig len = 1 << inode->i_blkbits; 1049c59d87c4SChristoph Hellwig 1050c59d87c4SChristoph Hellwig bh = head = page_buffers(page); 1051c59d87c4SChristoph Hellwig offset = page_offset(page); 10520d882a36SAlain Renaud type = XFS_IO_OVERWRITE; 1053c59d87c4SChristoph Hellwig 1054c59d87c4SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_NONE) 1055c59d87c4SChristoph Hellwig nonblocking = 1; 1056c59d87c4SChristoph Hellwig 1057c59d87c4SChristoph Hellwig do { 1058c59d87c4SChristoph Hellwig int new_ioend = 0; 1059c59d87c4SChristoph Hellwig 1060c59d87c4SChristoph Hellwig if (offset >= end_offset) 1061c59d87c4SChristoph Hellwig break; 1062c59d87c4SChristoph Hellwig if (!buffer_uptodate(bh)) 1063c59d87c4SChristoph Hellwig uptodate = 0; 1064c59d87c4SChristoph Hellwig 1065c59d87c4SChristoph Hellwig /* 1066c59d87c4SChristoph Hellwig * set_page_dirty dirties all buffers in a page, independent 1067c59d87c4SChristoph Hellwig * of their state. The dirty state however is entirely 1068c59d87c4SChristoph Hellwig * meaningless for holes (!mapped && uptodate), so skip 1069c59d87c4SChristoph Hellwig * buffers covering holes here. 1070c59d87c4SChristoph Hellwig */ 1071c59d87c4SChristoph Hellwig if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 1072c59d87c4SChristoph Hellwig imap_valid = 0; 1073c59d87c4SChristoph Hellwig continue; 1074c59d87c4SChristoph Hellwig } 1075c59d87c4SChristoph Hellwig 1076c59d87c4SChristoph Hellwig if (buffer_unwritten(bh)) { 10770d882a36SAlain Renaud if (type != XFS_IO_UNWRITTEN) { 10780d882a36SAlain Renaud type = XFS_IO_UNWRITTEN; 1079c59d87c4SChristoph Hellwig imap_valid = 0; 1080c59d87c4SChristoph Hellwig } 1081c59d87c4SChristoph Hellwig } else if (buffer_delay(bh)) { 10820d882a36SAlain Renaud if (type != XFS_IO_DELALLOC) { 10830d882a36SAlain Renaud type = XFS_IO_DELALLOC; 1084c59d87c4SChristoph Hellwig imap_valid = 0; 1085c59d87c4SChristoph Hellwig } 1086c59d87c4SChristoph Hellwig } else if (buffer_uptodate(bh)) { 10870d882a36SAlain Renaud if (type != XFS_IO_OVERWRITE) { 10880d882a36SAlain Renaud type = XFS_IO_OVERWRITE; 1089c59d87c4SChristoph Hellwig imap_valid = 0; 1090c59d87c4SChristoph Hellwig } 1091c59d87c4SChristoph Hellwig } else { 10927d0fa3ecSAlain Renaud if (PageUptodate(page)) 1093c59d87c4SChristoph Hellwig ASSERT(buffer_mapped(bh)); 10947d0fa3ecSAlain Renaud /* 10957d0fa3ecSAlain Renaud * This buffer is not uptodate and will not be 10967d0fa3ecSAlain Renaud * written to disk. Ensure that we will put any 10977d0fa3ecSAlain Renaud * subsequent writeable buffers into a new 10987d0fa3ecSAlain Renaud * ioend. 10997d0fa3ecSAlain Renaud */ 1100c59d87c4SChristoph Hellwig imap_valid = 0; 1101c59d87c4SChristoph Hellwig continue; 1102c59d87c4SChristoph Hellwig } 1103c59d87c4SChristoph Hellwig 1104c59d87c4SChristoph Hellwig if (imap_valid) 1105c59d87c4SChristoph Hellwig imap_valid = xfs_imap_valid(inode, &imap, offset); 1106c59d87c4SChristoph Hellwig if (!imap_valid) { 1107c59d87c4SChristoph Hellwig /* 1108c59d87c4SChristoph Hellwig * If we didn't have a valid mapping then we need to 1109c59d87c4SChristoph Hellwig * put the new mapping into a separate ioend structure. 1110c59d87c4SChristoph Hellwig * This ensures non-contiguous extents always have 1111c59d87c4SChristoph Hellwig * separate ioends, which is particularly important 1112c59d87c4SChristoph Hellwig * for unwritten extent conversion at I/O completion 1113c59d87c4SChristoph Hellwig * time. 1114c59d87c4SChristoph Hellwig */ 1115c59d87c4SChristoph Hellwig new_ioend = 1; 1116c59d87c4SChristoph Hellwig err = xfs_map_blocks(inode, offset, &imap, type, 1117c59d87c4SChristoph Hellwig nonblocking); 1118c59d87c4SChristoph Hellwig if (err) 1119c59d87c4SChristoph Hellwig goto error; 1120c59d87c4SChristoph Hellwig imap_valid = xfs_imap_valid(inode, &imap, offset); 1121c59d87c4SChristoph Hellwig } 1122c59d87c4SChristoph Hellwig if (imap_valid) { 1123c59d87c4SChristoph Hellwig lock_buffer(bh); 11240d882a36SAlain Renaud if (type != XFS_IO_OVERWRITE) 1125c59d87c4SChristoph Hellwig xfs_map_at_offset(inode, bh, &imap, offset); 1126c59d87c4SChristoph Hellwig xfs_add_to_ioend(inode, bh, offset, type, &ioend, 1127c59d87c4SChristoph Hellwig new_ioend); 1128c59d87c4SChristoph Hellwig count++; 1129c59d87c4SChristoph Hellwig } 1130c59d87c4SChristoph Hellwig 1131c59d87c4SChristoph Hellwig if (!iohead) 1132c59d87c4SChristoph Hellwig iohead = ioend; 1133c59d87c4SChristoph Hellwig 1134c59d87c4SChristoph Hellwig } while (offset += len, ((bh = bh->b_this_page) != head)); 1135c59d87c4SChristoph Hellwig 1136c59d87c4SChristoph Hellwig if (uptodate && bh == head) 1137c59d87c4SChristoph Hellwig SetPageUptodate(page); 1138c59d87c4SChristoph Hellwig 1139c59d87c4SChristoph Hellwig xfs_start_page_writeback(page, 1, count); 1140c59d87c4SChristoph Hellwig 11417bf7f352SDave Chinner /* if there is no IO to be submitted for this page, we are done */ 11427bf7f352SDave Chinner if (!ioend) 11437bf7f352SDave Chinner return 0; 11447bf7f352SDave Chinner 11457bf7f352SDave Chinner ASSERT(iohead); 11467bf7f352SDave Chinner 11477bf7f352SDave Chinner /* 11487bf7f352SDave Chinner * Any errors from this point onwards need tobe reported through the IO 11497bf7f352SDave Chinner * completion path as we have marked the initial page as under writeback 11507bf7f352SDave Chinner * and unlocked it. 11517bf7f352SDave Chinner */ 11527bf7f352SDave Chinner if (imap_valid) { 1153c59d87c4SChristoph Hellwig xfs_off_t end_index; 1154c59d87c4SChristoph Hellwig 1155c59d87c4SChristoph Hellwig end_index = imap.br_startoff + imap.br_blockcount; 1156c59d87c4SChristoph Hellwig 1157c59d87c4SChristoph Hellwig /* to bytes */ 1158c59d87c4SChristoph Hellwig end_index <<= inode->i_blkbits; 1159c59d87c4SChristoph Hellwig 1160c59d87c4SChristoph Hellwig /* to pages */ 1161c59d87c4SChristoph Hellwig end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; 1162c59d87c4SChristoph Hellwig 1163c59d87c4SChristoph Hellwig /* check against file size */ 1164c59d87c4SChristoph Hellwig if (end_index > last_index) 1165c59d87c4SChristoph Hellwig end_index = last_index; 1166c59d87c4SChristoph Hellwig 1167c59d87c4SChristoph Hellwig xfs_cluster_write(inode, page->index + 1, &imap, &ioend, 1168c59d87c4SChristoph Hellwig wbc, end_index); 1169c59d87c4SChristoph Hellwig } 1170c59d87c4SChristoph Hellwig 1171281627dfSChristoph Hellwig 11727bf7f352SDave Chinner /* 11737bf7f352SDave Chinner * Reserve log space if we might write beyond the on-disk inode size. 11747bf7f352SDave Chinner */ 11757bf7f352SDave Chinner err = 0; 11767bf7f352SDave Chinner if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend)) 11777bf7f352SDave Chinner err = xfs_setfilesize_trans_alloc(ioend); 11787bf7f352SDave Chinner 11797bf7f352SDave Chinner xfs_submit_ioend(wbc, iohead, err); 1180c59d87c4SChristoph Hellwig 1181c59d87c4SChristoph Hellwig return 0; 1182c59d87c4SChristoph Hellwig 1183c59d87c4SChristoph Hellwig error: 1184c59d87c4SChristoph Hellwig if (iohead) 1185c59d87c4SChristoph Hellwig xfs_cancel_ioend(iohead); 1186c59d87c4SChristoph Hellwig 1187c59d87c4SChristoph Hellwig if (err == -EAGAIN) 1188c59d87c4SChristoph Hellwig goto redirty; 1189c59d87c4SChristoph Hellwig 1190c59d87c4SChristoph Hellwig xfs_aops_discard_page(page); 1191c59d87c4SChristoph Hellwig ClearPageUptodate(page); 1192c59d87c4SChristoph Hellwig unlock_page(page); 1193c59d87c4SChristoph Hellwig return err; 1194c59d87c4SChristoph Hellwig 1195c59d87c4SChristoph Hellwig redirty: 1196c59d87c4SChristoph Hellwig redirty_page_for_writepage(wbc, page); 1197c59d87c4SChristoph Hellwig unlock_page(page); 1198c59d87c4SChristoph Hellwig return 0; 1199c59d87c4SChristoph Hellwig } 1200c59d87c4SChristoph Hellwig 1201c59d87c4SChristoph Hellwig STATIC int 1202c59d87c4SChristoph Hellwig xfs_vm_writepages( 1203c59d87c4SChristoph Hellwig struct address_space *mapping, 1204c59d87c4SChristoph Hellwig struct writeback_control *wbc) 1205c59d87c4SChristoph Hellwig { 1206c59d87c4SChristoph Hellwig xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 1207c59d87c4SChristoph Hellwig return generic_writepages(mapping, wbc); 1208c59d87c4SChristoph Hellwig } 1209c59d87c4SChristoph Hellwig 1210c59d87c4SChristoph Hellwig /* 1211c59d87c4SChristoph Hellwig * Called to move a page into cleanable state - and from there 1212c59d87c4SChristoph Hellwig * to be released. The page should already be clean. We always 1213c59d87c4SChristoph Hellwig * have buffer heads in this call. 1214c59d87c4SChristoph Hellwig * 1215c59d87c4SChristoph Hellwig * Returns 1 if the page is ok to release, 0 otherwise. 1216c59d87c4SChristoph Hellwig */ 1217c59d87c4SChristoph Hellwig STATIC int 1218c59d87c4SChristoph Hellwig xfs_vm_releasepage( 1219c59d87c4SChristoph Hellwig struct page *page, 1220c59d87c4SChristoph Hellwig gfp_t gfp_mask) 1221c59d87c4SChristoph Hellwig { 1222c59d87c4SChristoph Hellwig int delalloc, unwritten; 1223c59d87c4SChristoph Hellwig 122434097dfeSLukas Czerner trace_xfs_releasepage(page->mapping->host, page, 0, 0); 1225c59d87c4SChristoph Hellwig 1226c59d87c4SChristoph Hellwig xfs_count_page_state(page, &delalloc, &unwritten); 1227c59d87c4SChristoph Hellwig 1228448011e2SChristoph Hellwig if (WARN_ON_ONCE(delalloc)) 1229c59d87c4SChristoph Hellwig return 0; 1230448011e2SChristoph Hellwig if (WARN_ON_ONCE(unwritten)) 1231c59d87c4SChristoph Hellwig return 0; 1232c59d87c4SChristoph Hellwig 1233c59d87c4SChristoph Hellwig return try_to_free_buffers(page); 1234c59d87c4SChristoph Hellwig } 1235c59d87c4SChristoph Hellwig 1236c59d87c4SChristoph Hellwig STATIC int 1237c59d87c4SChristoph Hellwig __xfs_get_blocks( 1238c59d87c4SChristoph Hellwig struct inode *inode, 1239c59d87c4SChristoph Hellwig sector_t iblock, 1240c59d87c4SChristoph Hellwig struct buffer_head *bh_result, 1241c59d87c4SChristoph Hellwig int create, 1242c59d87c4SChristoph Hellwig int direct) 1243c59d87c4SChristoph Hellwig { 1244c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 1245c59d87c4SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 1246c59d87c4SChristoph Hellwig xfs_fileoff_t offset_fsb, end_fsb; 1247c59d87c4SChristoph Hellwig int error = 0; 1248c59d87c4SChristoph Hellwig int lockmode = 0; 1249c59d87c4SChristoph Hellwig struct xfs_bmbt_irec imap; 1250c59d87c4SChristoph Hellwig int nimaps = 1; 1251c59d87c4SChristoph Hellwig xfs_off_t offset; 1252c59d87c4SChristoph Hellwig ssize_t size; 1253c59d87c4SChristoph Hellwig int new = 0; 1254c59d87c4SChristoph Hellwig 1255c59d87c4SChristoph Hellwig if (XFS_FORCED_SHUTDOWN(mp)) 1256b474c7aeSEric Sandeen return -EIO; 1257c59d87c4SChristoph Hellwig 1258c59d87c4SChristoph Hellwig offset = (xfs_off_t)iblock << inode->i_blkbits; 1259c59d87c4SChristoph Hellwig ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); 1260c59d87c4SChristoph Hellwig size = bh_result->b_size; 1261c59d87c4SChristoph Hellwig 1262c59d87c4SChristoph Hellwig if (!create && direct && offset >= i_size_read(inode)) 1263c59d87c4SChristoph Hellwig return 0; 1264c59d87c4SChristoph Hellwig 1265507630b2SDave Chinner /* 1266507630b2SDave Chinner * Direct I/O is usually done on preallocated files, so try getting 1267507630b2SDave Chinner * a block mapping without an exclusive lock first. For buffered 1268507630b2SDave Chinner * writes we already have the exclusive iolock anyway, so avoiding 1269507630b2SDave Chinner * a lock roundtrip here by taking the ilock exclusive from the 1270507630b2SDave Chinner * beginning is a useful micro optimization. 1271507630b2SDave Chinner */ 1272507630b2SDave Chinner if (create && !direct) { 1273c59d87c4SChristoph Hellwig lockmode = XFS_ILOCK_EXCL; 1274c59d87c4SChristoph Hellwig xfs_ilock(ip, lockmode); 1275c59d87c4SChristoph Hellwig } else { 1276309ecac8SChristoph Hellwig lockmode = xfs_ilock_data_map_shared(ip); 1277c59d87c4SChristoph Hellwig } 1278c59d87c4SChristoph Hellwig 1279d2c28191SDave Chinner ASSERT(offset <= mp->m_super->s_maxbytes); 1280d2c28191SDave Chinner if (offset + size > mp->m_super->s_maxbytes) 1281d2c28191SDave Chinner size = mp->m_super->s_maxbytes - offset; 1282c59d87c4SChristoph Hellwig end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); 1283c59d87c4SChristoph Hellwig offset_fsb = XFS_B_TO_FSBT(mp, offset); 1284c59d87c4SChristoph Hellwig 12855c8ed202SDave Chinner error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, 12865c8ed202SDave Chinner &imap, &nimaps, XFS_BMAPI_ENTIRE); 1287c59d87c4SChristoph Hellwig if (error) 1288c59d87c4SChristoph Hellwig goto out_unlock; 1289c59d87c4SChristoph Hellwig 1290c59d87c4SChristoph Hellwig if (create && 1291c59d87c4SChristoph Hellwig (!nimaps || 1292c59d87c4SChristoph Hellwig (imap.br_startblock == HOLESTARTBLOCK || 1293c59d87c4SChristoph Hellwig imap.br_startblock == DELAYSTARTBLOCK))) { 1294aff3a9edSDave Chinner if (direct || xfs_get_extsz_hint(ip)) { 1295507630b2SDave Chinner /* 1296507630b2SDave Chinner * Drop the ilock in preparation for starting the block 1297507630b2SDave Chinner * allocation transaction. It will be retaken 1298507630b2SDave Chinner * exclusively inside xfs_iomap_write_direct for the 1299507630b2SDave Chinner * actual allocation. 1300507630b2SDave Chinner */ 1301507630b2SDave Chinner xfs_iunlock(ip, lockmode); 1302c59d87c4SChristoph Hellwig error = xfs_iomap_write_direct(ip, offset, size, 1303c59d87c4SChristoph Hellwig &imap, nimaps); 1304507630b2SDave Chinner if (error) 13052451337dSDave Chinner return error; 1306d3bc815aSDave Chinner new = 1; 1307c59d87c4SChristoph Hellwig } else { 1308507630b2SDave Chinner /* 1309507630b2SDave Chinner * Delalloc reservations do not require a transaction, 1310d3bc815aSDave Chinner * we can go on without dropping the lock here. If we 1311d3bc815aSDave Chinner * are allocating a new delalloc block, make sure that 1312d3bc815aSDave Chinner * we set the new flag so that we mark the buffer new so 1313d3bc815aSDave Chinner * that we know that it is newly allocated if the write 1314d3bc815aSDave Chinner * fails. 1315507630b2SDave Chinner */ 1316d3bc815aSDave Chinner if (nimaps && imap.br_startblock == HOLESTARTBLOCK) 1317d3bc815aSDave Chinner new = 1; 1318c59d87c4SChristoph Hellwig error = xfs_iomap_write_delay(ip, offset, size, &imap); 1319c59d87c4SChristoph Hellwig if (error) 1320c59d87c4SChristoph Hellwig goto out_unlock; 1321c59d87c4SChristoph Hellwig 1322507630b2SDave Chinner xfs_iunlock(ip, lockmode); 1323507630b2SDave Chinner } 1324507630b2SDave Chinner 1325c59d87c4SChristoph Hellwig trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); 1326c59d87c4SChristoph Hellwig } else if (nimaps) { 1327c59d87c4SChristoph Hellwig trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); 1328507630b2SDave Chinner xfs_iunlock(ip, lockmode); 1329c59d87c4SChristoph Hellwig } else { 1330c59d87c4SChristoph Hellwig trace_xfs_get_blocks_notfound(ip, offset, size); 1331c59d87c4SChristoph Hellwig goto out_unlock; 1332c59d87c4SChristoph Hellwig } 1333c59d87c4SChristoph Hellwig 1334c59d87c4SChristoph Hellwig if (imap.br_startblock != HOLESTARTBLOCK && 1335c59d87c4SChristoph Hellwig imap.br_startblock != DELAYSTARTBLOCK) { 1336c59d87c4SChristoph Hellwig /* 1337c59d87c4SChristoph Hellwig * For unwritten extents do not report a disk address on 1338c59d87c4SChristoph Hellwig * the read case (treat as if we're reading into a hole). 1339c59d87c4SChristoph Hellwig */ 1340c59d87c4SChristoph Hellwig if (create || !ISUNWRITTEN(&imap)) 1341c59d87c4SChristoph Hellwig xfs_map_buffer(inode, bh_result, &imap, offset); 1342c59d87c4SChristoph Hellwig if (create && ISUNWRITTEN(&imap)) { 13437b7a8665SChristoph Hellwig if (direct) { 1344c59d87c4SChristoph Hellwig bh_result->b_private = inode; 13457b7a8665SChristoph Hellwig set_buffer_defer_completion(bh_result); 13467b7a8665SChristoph Hellwig } 1347c59d87c4SChristoph Hellwig set_buffer_unwritten(bh_result); 1348c59d87c4SChristoph Hellwig } 1349c59d87c4SChristoph Hellwig } 1350c59d87c4SChristoph Hellwig 1351c59d87c4SChristoph Hellwig /* 1352c59d87c4SChristoph Hellwig * If this is a realtime file, data may be on a different device. 1353c59d87c4SChristoph Hellwig * to that pointed to from the buffer_head b_bdev currently. 1354c59d87c4SChristoph Hellwig */ 1355c59d87c4SChristoph Hellwig bh_result->b_bdev = xfs_find_bdev_for_inode(inode); 1356c59d87c4SChristoph Hellwig 1357c59d87c4SChristoph Hellwig /* 1358c59d87c4SChristoph Hellwig * If we previously allocated a block out beyond eof and we are now 1359c59d87c4SChristoph Hellwig * coming back to use it then we will need to flag it as new even if it 1360c59d87c4SChristoph Hellwig * has a disk address. 1361c59d87c4SChristoph Hellwig * 1362c59d87c4SChristoph Hellwig * With sub-block writes into unwritten extents we also need to mark 1363c59d87c4SChristoph Hellwig * the buffer as new so that the unwritten parts of the buffer gets 1364c59d87c4SChristoph Hellwig * correctly zeroed. 1365c59d87c4SChristoph Hellwig */ 1366c59d87c4SChristoph Hellwig if (create && 1367c59d87c4SChristoph Hellwig ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || 1368c59d87c4SChristoph Hellwig (offset >= i_size_read(inode)) || 1369c59d87c4SChristoph Hellwig (new || ISUNWRITTEN(&imap)))) 1370c59d87c4SChristoph Hellwig set_buffer_new(bh_result); 1371c59d87c4SChristoph Hellwig 1372c59d87c4SChristoph Hellwig if (imap.br_startblock == DELAYSTARTBLOCK) { 1373c59d87c4SChristoph Hellwig BUG_ON(direct); 1374c59d87c4SChristoph Hellwig if (create) { 1375c59d87c4SChristoph Hellwig set_buffer_uptodate(bh_result); 1376c59d87c4SChristoph Hellwig set_buffer_mapped(bh_result); 1377c59d87c4SChristoph Hellwig set_buffer_delay(bh_result); 1378c59d87c4SChristoph Hellwig } 1379c59d87c4SChristoph Hellwig } 1380c59d87c4SChristoph Hellwig 1381c59d87c4SChristoph Hellwig /* 1382c59d87c4SChristoph Hellwig * If this is O_DIRECT or the mpage code calling tell them how large 1383c59d87c4SChristoph Hellwig * the mapping is, so that we can avoid repeated get_blocks calls. 13840e1f789dSDave Chinner * 13850e1f789dSDave Chinner * If the mapping spans EOF, then we have to break the mapping up as the 13860e1f789dSDave Chinner * mapping for blocks beyond EOF must be marked new so that sub block 13870e1f789dSDave Chinner * regions can be correctly zeroed. We can't do this for mappings within 13880e1f789dSDave Chinner * EOF unless the mapping was just allocated or is unwritten, otherwise 13890e1f789dSDave Chinner * the callers would overwrite existing data with zeros. Hence we have 13900e1f789dSDave Chinner * to split the mapping into a range up to and including EOF, and a 13910e1f789dSDave Chinner * second mapping for beyond EOF. 1392c59d87c4SChristoph Hellwig */ 1393c59d87c4SChristoph Hellwig if (direct || size > (1 << inode->i_blkbits)) { 1394c59d87c4SChristoph Hellwig xfs_off_t mapping_size; 1395c59d87c4SChristoph Hellwig 1396c59d87c4SChristoph Hellwig mapping_size = imap.br_startoff + imap.br_blockcount - iblock; 1397c59d87c4SChristoph Hellwig mapping_size <<= inode->i_blkbits; 1398c59d87c4SChristoph Hellwig 1399c59d87c4SChristoph Hellwig ASSERT(mapping_size > 0); 1400c59d87c4SChristoph Hellwig if (mapping_size > size) 1401c59d87c4SChristoph Hellwig mapping_size = size; 14020e1f789dSDave Chinner if (offset < i_size_read(inode) && 14030e1f789dSDave Chinner offset + mapping_size >= i_size_read(inode)) { 14040e1f789dSDave Chinner /* limit mapping to block that spans EOF */ 14050e1f789dSDave Chinner mapping_size = roundup_64(i_size_read(inode) - offset, 14060e1f789dSDave Chinner 1 << inode->i_blkbits); 14070e1f789dSDave Chinner } 1408c59d87c4SChristoph Hellwig if (mapping_size > LONG_MAX) 1409c59d87c4SChristoph Hellwig mapping_size = LONG_MAX; 1410c59d87c4SChristoph Hellwig 1411c59d87c4SChristoph Hellwig bh_result->b_size = mapping_size; 1412c59d87c4SChristoph Hellwig } 1413c59d87c4SChristoph Hellwig 1414c59d87c4SChristoph Hellwig return 0; 1415c59d87c4SChristoph Hellwig 1416c59d87c4SChristoph Hellwig out_unlock: 1417c59d87c4SChristoph Hellwig xfs_iunlock(ip, lockmode); 14182451337dSDave Chinner return error; 1419c59d87c4SChristoph Hellwig } 1420c59d87c4SChristoph Hellwig 1421c59d87c4SChristoph Hellwig int 1422c59d87c4SChristoph Hellwig xfs_get_blocks( 1423c59d87c4SChristoph Hellwig struct inode *inode, 1424c59d87c4SChristoph Hellwig sector_t iblock, 1425c59d87c4SChristoph Hellwig struct buffer_head *bh_result, 1426c59d87c4SChristoph Hellwig int create) 1427c59d87c4SChristoph Hellwig { 1428c59d87c4SChristoph Hellwig return __xfs_get_blocks(inode, iblock, bh_result, create, 0); 1429c59d87c4SChristoph Hellwig } 1430c59d87c4SChristoph Hellwig 1431c59d87c4SChristoph Hellwig STATIC int 1432c59d87c4SChristoph Hellwig xfs_get_blocks_direct( 1433c59d87c4SChristoph Hellwig struct inode *inode, 1434c59d87c4SChristoph Hellwig sector_t iblock, 1435c59d87c4SChristoph Hellwig struct buffer_head *bh_result, 1436c59d87c4SChristoph Hellwig int create) 1437c59d87c4SChristoph Hellwig { 1438c59d87c4SChristoph Hellwig return __xfs_get_blocks(inode, iblock, bh_result, create, 1); 1439c59d87c4SChristoph Hellwig } 1440c59d87c4SChristoph Hellwig 1441c59d87c4SChristoph Hellwig /* 1442c59d87c4SChristoph Hellwig * Complete a direct I/O write request. 1443c59d87c4SChristoph Hellwig * 1444c59d87c4SChristoph Hellwig * If the private argument is non-NULL __xfs_get_blocks signals us that we 1445c59d87c4SChristoph Hellwig * need to issue a transaction to convert the range from unwritten to written 1446*2ba66237SChristoph Hellwig * extents. 1447c59d87c4SChristoph Hellwig */ 1448c59d87c4SChristoph Hellwig STATIC void 1449c59d87c4SChristoph Hellwig xfs_end_io_direct_write( 1450c59d87c4SChristoph Hellwig struct kiocb *iocb, 1451c59d87c4SChristoph Hellwig loff_t offset, 1452c59d87c4SChristoph Hellwig ssize_t size, 14537b7a8665SChristoph Hellwig void *private) 1454c59d87c4SChristoph Hellwig { 1455*2ba66237SChristoph Hellwig struct inode *inode = file_inode(iocb->ki_filp); 1456*2ba66237SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 1457*2ba66237SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 1458*2ba66237SChristoph Hellwig 1459*2ba66237SChristoph Hellwig if (XFS_FORCED_SHUTDOWN(mp)) 1460*2ba66237SChristoph Hellwig return; 1461c59d87c4SChristoph Hellwig 1462c59d87c4SChristoph Hellwig /* 14632813d682SChristoph Hellwig * While the generic direct I/O code updates the inode size, it does 14642813d682SChristoph Hellwig * so only after the end_io handler is called, which means our 14652813d682SChristoph Hellwig * end_io handler thinks the on-disk size is outside the in-core 14662813d682SChristoph Hellwig * size. To prevent this just update it a little bit earlier here. 14672813d682SChristoph Hellwig */ 1468*2ba66237SChristoph Hellwig if (offset + size > i_size_read(inode)) 1469*2ba66237SChristoph Hellwig i_size_write(inode, offset + size); 14702813d682SChristoph Hellwig 14712813d682SChristoph Hellwig /* 1472*2ba66237SChristoph Hellwig * For direct I/O we do not know if we need to allocate blocks or not, 1473*2ba66237SChristoph Hellwig * so we can't preallocate an append transaction, as that results in 1474*2ba66237SChristoph Hellwig * nested reservations and log space deadlocks. Hence allocate the 1475*2ba66237SChristoph Hellwig * transaction here. While this is sub-optimal and can block IO 1476*2ba66237SChristoph Hellwig * completion for some time, we're stuck with doing it this way until 1477*2ba66237SChristoph Hellwig * we can pass the ioend to the direct IO allocation callbacks and 1478*2ba66237SChristoph Hellwig * avoid nesting that way. 1479c59d87c4SChristoph Hellwig */ 1480*2ba66237SChristoph Hellwig if (private && size > 0) { 1481*2ba66237SChristoph Hellwig xfs_iomap_write_unwritten(ip, offset, size); 1482*2ba66237SChristoph Hellwig } else if (offset + size > ip->i_d.di_size) { 1483*2ba66237SChristoph Hellwig struct xfs_trans *tp; 1484*2ba66237SChristoph Hellwig int error; 1485c59d87c4SChristoph Hellwig 1486*2ba66237SChristoph Hellwig tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 1487*2ba66237SChristoph Hellwig error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0); 1488*2ba66237SChristoph Hellwig if (error) { 1489*2ba66237SChristoph Hellwig xfs_trans_cancel(tp, 0); 1490*2ba66237SChristoph Hellwig return; 1491*2ba66237SChristoph Hellwig } 1492c59d87c4SChristoph Hellwig 1493*2ba66237SChristoph Hellwig xfs_setfilesize(ip, tp, offset, size); 1494*2ba66237SChristoph Hellwig } 1495c59d87c4SChristoph Hellwig } 1496c59d87c4SChristoph Hellwig 1497c59d87c4SChristoph Hellwig STATIC ssize_t 1498c59d87c4SChristoph Hellwig xfs_vm_direct_IO( 1499c59d87c4SChristoph Hellwig int rw, 1500c59d87c4SChristoph Hellwig struct kiocb *iocb, 1501d8d3d94bSAl Viro struct iov_iter *iter, 1502d8d3d94bSAl Viro loff_t offset) 1503c59d87c4SChristoph Hellwig { 1504c59d87c4SChristoph Hellwig struct inode *inode = iocb->ki_filp->f_mapping->host; 1505c59d87c4SChristoph Hellwig struct block_device *bdev = xfs_find_bdev_for_inode(inode); 1506c59d87c4SChristoph Hellwig 1507c59d87c4SChristoph Hellwig if (rw & WRITE) { 1508*2ba66237SChristoph Hellwig return __blockdev_direct_IO(rw, iocb, inode, bdev, iter, 150931b14039SAl Viro offset, xfs_get_blocks_direct, 15109862f62fSChristoph Hellwig xfs_end_io_direct_write, NULL, 15119862f62fSChristoph Hellwig DIO_ASYNC_EXTEND); 1512*2ba66237SChristoph Hellwig } 1513*2ba66237SChristoph Hellwig return __blockdev_direct_IO(rw, iocb, inode, bdev, iter, 151431b14039SAl Viro offset, xfs_get_blocks_direct, 1515c59d87c4SChristoph Hellwig NULL, NULL, 0); 1516c59d87c4SChristoph Hellwig } 1517c59d87c4SChristoph Hellwig 1518c59d87c4SChristoph Hellwig /* 15192813d682SChristoph Hellwig * Punch out the delalloc blocks we have already allocated. 15202813d682SChristoph Hellwig * 1521d3bc815aSDave Chinner * Don't bother with xfs_setattr given that nothing can have made it to disk yet 1522d3bc815aSDave Chinner * as the page is still locked at this point. 1523c59d87c4SChristoph Hellwig */ 1524d3bc815aSDave Chinner STATIC void 1525d3bc815aSDave Chinner xfs_vm_kill_delalloc_range( 1526d3bc815aSDave Chinner struct inode *inode, 1527d3bc815aSDave Chinner loff_t start, 1528d3bc815aSDave Chinner loff_t end) 1529d3bc815aSDave Chinner { 1530c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 1531c59d87c4SChristoph Hellwig xfs_fileoff_t start_fsb; 1532c59d87c4SChristoph Hellwig xfs_fileoff_t end_fsb; 1533c59d87c4SChristoph Hellwig int error; 1534c59d87c4SChristoph Hellwig 1535d3bc815aSDave Chinner start_fsb = XFS_B_TO_FSB(ip->i_mount, start); 1536d3bc815aSDave Chinner end_fsb = XFS_B_TO_FSB(ip->i_mount, end); 1537c59d87c4SChristoph Hellwig if (end_fsb <= start_fsb) 1538c59d87c4SChristoph Hellwig return; 1539c59d87c4SChristoph Hellwig 1540c59d87c4SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_EXCL); 1541c59d87c4SChristoph Hellwig error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1542c59d87c4SChristoph Hellwig end_fsb - start_fsb); 1543c59d87c4SChristoph Hellwig if (error) { 1544c59d87c4SChristoph Hellwig /* something screwed, just bail */ 1545c59d87c4SChristoph Hellwig if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1546c59d87c4SChristoph Hellwig xfs_alert(ip->i_mount, 1547c59d87c4SChristoph Hellwig "xfs_vm_write_failed: unable to clean up ino %lld", 1548c59d87c4SChristoph Hellwig ip->i_ino); 1549c59d87c4SChristoph Hellwig } 1550c59d87c4SChristoph Hellwig } 1551c59d87c4SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL); 1552c59d87c4SChristoph Hellwig } 1553d3bc815aSDave Chinner 1554d3bc815aSDave Chinner STATIC void 1555d3bc815aSDave Chinner xfs_vm_write_failed( 1556d3bc815aSDave Chinner struct inode *inode, 1557d3bc815aSDave Chinner struct page *page, 1558d3bc815aSDave Chinner loff_t pos, 1559d3bc815aSDave Chinner unsigned len) 1560d3bc815aSDave Chinner { 156158e59854SJie Liu loff_t block_offset; 1562d3bc815aSDave Chinner loff_t block_start; 1563d3bc815aSDave Chinner loff_t block_end; 1564d3bc815aSDave Chinner loff_t from = pos & (PAGE_CACHE_SIZE - 1); 1565d3bc815aSDave Chinner loff_t to = from + len; 1566d3bc815aSDave Chinner struct buffer_head *bh, *head; 1567d3bc815aSDave Chinner 156858e59854SJie Liu /* 156958e59854SJie Liu * The request pos offset might be 32 or 64 bit, this is all fine 157058e59854SJie Liu * on 64-bit platform. However, for 64-bit pos request on 32-bit 157158e59854SJie Liu * platform, the high 32-bit will be masked off if we evaluate the 157258e59854SJie Liu * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is 157358e59854SJie Liu * 0xfffff000 as an unsigned long, hence the result is incorrect 157458e59854SJie Liu * which could cause the following ASSERT failed in most cases. 157558e59854SJie Liu * In order to avoid this, we can evaluate the block_offset of the 157658e59854SJie Liu * start of the page by using shifts rather than masks the mismatch 157758e59854SJie Liu * problem. 157858e59854SJie Liu */ 157958e59854SJie Liu block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT; 158058e59854SJie Liu 1581d3bc815aSDave Chinner ASSERT(block_offset + from == pos); 1582d3bc815aSDave Chinner 1583d3bc815aSDave Chinner head = page_buffers(page); 1584d3bc815aSDave Chinner block_start = 0; 1585d3bc815aSDave Chinner for (bh = head; bh != head || !block_start; 1586d3bc815aSDave Chinner bh = bh->b_this_page, block_start = block_end, 1587d3bc815aSDave Chinner block_offset += bh->b_size) { 1588d3bc815aSDave Chinner block_end = block_start + bh->b_size; 1589d3bc815aSDave Chinner 1590d3bc815aSDave Chinner /* skip buffers before the write */ 1591d3bc815aSDave Chinner if (block_end <= from) 1592d3bc815aSDave Chinner continue; 1593d3bc815aSDave Chinner 1594d3bc815aSDave Chinner /* if the buffer is after the write, we're done */ 1595d3bc815aSDave Chinner if (block_start >= to) 1596d3bc815aSDave Chinner break; 1597d3bc815aSDave Chinner 1598d3bc815aSDave Chinner if (!buffer_delay(bh)) 1599d3bc815aSDave Chinner continue; 1600d3bc815aSDave Chinner 1601d3bc815aSDave Chinner if (!buffer_new(bh) && block_offset < i_size_read(inode)) 1602d3bc815aSDave Chinner continue; 1603d3bc815aSDave Chinner 1604d3bc815aSDave Chinner xfs_vm_kill_delalloc_range(inode, block_offset, 1605d3bc815aSDave Chinner block_offset + bh->b_size); 16064ab9ed57SDave Chinner 16074ab9ed57SDave Chinner /* 16084ab9ed57SDave Chinner * This buffer does not contain data anymore. make sure anyone 16094ab9ed57SDave Chinner * who finds it knows that for certain. 16104ab9ed57SDave Chinner */ 16114ab9ed57SDave Chinner clear_buffer_delay(bh); 16124ab9ed57SDave Chinner clear_buffer_uptodate(bh); 16134ab9ed57SDave Chinner clear_buffer_mapped(bh); 16144ab9ed57SDave Chinner clear_buffer_new(bh); 16154ab9ed57SDave Chinner clear_buffer_dirty(bh); 1616c59d87c4SChristoph Hellwig } 1617c59d87c4SChristoph Hellwig 1618d3bc815aSDave Chinner } 1619d3bc815aSDave Chinner 1620d3bc815aSDave Chinner /* 1621d3bc815aSDave Chinner * This used to call block_write_begin(), but it unlocks and releases the page 1622d3bc815aSDave Chinner * on error, and we need that page to be able to punch stale delalloc blocks out 1623d3bc815aSDave Chinner * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at 1624d3bc815aSDave Chinner * the appropriate point. 1625d3bc815aSDave Chinner */ 1626c59d87c4SChristoph Hellwig STATIC int 1627c59d87c4SChristoph Hellwig xfs_vm_write_begin( 1628c59d87c4SChristoph Hellwig struct file *file, 1629c59d87c4SChristoph Hellwig struct address_space *mapping, 1630c59d87c4SChristoph Hellwig loff_t pos, 1631c59d87c4SChristoph Hellwig unsigned len, 1632c59d87c4SChristoph Hellwig unsigned flags, 1633c59d87c4SChristoph Hellwig struct page **pagep, 1634c59d87c4SChristoph Hellwig void **fsdata) 1635c59d87c4SChristoph Hellwig { 1636d3bc815aSDave Chinner pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1637d3bc815aSDave Chinner struct page *page; 1638d3bc815aSDave Chinner int status; 1639c59d87c4SChristoph Hellwig 1640d3bc815aSDave Chinner ASSERT(len <= PAGE_CACHE_SIZE); 1641d3bc815aSDave Chinner 1642ad22c7a0SDave Chinner page = grab_cache_page_write_begin(mapping, index, flags); 1643d3bc815aSDave Chinner if (!page) 1644d3bc815aSDave Chinner return -ENOMEM; 1645d3bc815aSDave Chinner 1646d3bc815aSDave Chinner status = __block_write_begin(page, pos, len, xfs_get_blocks); 1647d3bc815aSDave Chinner if (unlikely(status)) { 1648d3bc815aSDave Chinner struct inode *inode = mapping->host; 164972ab70a1SDave Chinner size_t isize = i_size_read(inode); 1650d3bc815aSDave Chinner 1651d3bc815aSDave Chinner xfs_vm_write_failed(inode, page, pos, len); 1652d3bc815aSDave Chinner unlock_page(page); 1653d3bc815aSDave Chinner 165472ab70a1SDave Chinner /* 165572ab70a1SDave Chinner * If the write is beyond EOF, we only want to kill blocks 165672ab70a1SDave Chinner * allocated in this write, not blocks that were previously 165772ab70a1SDave Chinner * written successfully. 165872ab70a1SDave Chinner */ 165972ab70a1SDave Chinner if (pos + len > isize) { 166072ab70a1SDave Chinner ssize_t start = max_t(ssize_t, pos, isize); 166172ab70a1SDave Chinner 166272ab70a1SDave Chinner truncate_pagecache_range(inode, start, pos + len); 166372ab70a1SDave Chinner } 1664d3bc815aSDave Chinner 1665d3bc815aSDave Chinner page_cache_release(page); 1666d3bc815aSDave Chinner page = NULL; 1667c59d87c4SChristoph Hellwig } 1668c59d87c4SChristoph Hellwig 1669d3bc815aSDave Chinner *pagep = page; 1670d3bc815aSDave Chinner return status; 1671d3bc815aSDave Chinner } 1672d3bc815aSDave Chinner 1673d3bc815aSDave Chinner /* 1674aad3f375SDave Chinner * On failure, we only need to kill delalloc blocks beyond EOF in the range of 1675aad3f375SDave Chinner * this specific write because they will never be written. Previous writes 1676aad3f375SDave Chinner * beyond EOF where block allocation succeeded do not need to be trashed, so 1677aad3f375SDave Chinner * only new blocks from this write should be trashed. For blocks within 1678aad3f375SDave Chinner * EOF, generic_write_end() zeros them so they are safe to leave alone and be 1679aad3f375SDave Chinner * written with all the other valid data. 1680d3bc815aSDave Chinner */ 1681c59d87c4SChristoph Hellwig STATIC int 1682c59d87c4SChristoph Hellwig xfs_vm_write_end( 1683c59d87c4SChristoph Hellwig struct file *file, 1684c59d87c4SChristoph Hellwig struct address_space *mapping, 1685c59d87c4SChristoph Hellwig loff_t pos, 1686c59d87c4SChristoph Hellwig unsigned len, 1687c59d87c4SChristoph Hellwig unsigned copied, 1688c59d87c4SChristoph Hellwig struct page *page, 1689c59d87c4SChristoph Hellwig void *fsdata) 1690c59d87c4SChristoph Hellwig { 1691c59d87c4SChristoph Hellwig int ret; 1692c59d87c4SChristoph Hellwig 1693d3bc815aSDave Chinner ASSERT(len <= PAGE_CACHE_SIZE); 1694d3bc815aSDave Chinner 1695c59d87c4SChristoph Hellwig ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1696d3bc815aSDave Chinner if (unlikely(ret < len)) { 1697d3bc815aSDave Chinner struct inode *inode = mapping->host; 1698d3bc815aSDave Chinner size_t isize = i_size_read(inode); 1699d3bc815aSDave Chinner loff_t to = pos + len; 1700d3bc815aSDave Chinner 1701d3bc815aSDave Chinner if (to > isize) { 1702aad3f375SDave Chinner /* only kill blocks in this write beyond EOF */ 1703aad3f375SDave Chinner if (pos > isize) 1704aad3f375SDave Chinner isize = pos; 1705d3bc815aSDave Chinner xfs_vm_kill_delalloc_range(inode, isize, to); 1706aad3f375SDave Chinner truncate_pagecache_range(inode, isize, to); 1707d3bc815aSDave Chinner } 1708d3bc815aSDave Chinner } 1709c59d87c4SChristoph Hellwig return ret; 1710c59d87c4SChristoph Hellwig } 1711c59d87c4SChristoph Hellwig 1712c59d87c4SChristoph Hellwig STATIC sector_t 1713c59d87c4SChristoph Hellwig xfs_vm_bmap( 1714c59d87c4SChristoph Hellwig struct address_space *mapping, 1715c59d87c4SChristoph Hellwig sector_t block) 1716c59d87c4SChristoph Hellwig { 1717c59d87c4SChristoph Hellwig struct inode *inode = (struct inode *)mapping->host; 1718c59d87c4SChristoph Hellwig struct xfs_inode *ip = XFS_I(inode); 1719c59d87c4SChristoph Hellwig 1720c59d87c4SChristoph Hellwig trace_xfs_vm_bmap(XFS_I(inode)); 1721c59d87c4SChristoph Hellwig xfs_ilock(ip, XFS_IOLOCK_SHARED); 17224bc1ea6bSDave Chinner filemap_write_and_wait(mapping); 1723c59d87c4SChristoph Hellwig xfs_iunlock(ip, XFS_IOLOCK_SHARED); 1724c59d87c4SChristoph Hellwig return generic_block_bmap(mapping, block, xfs_get_blocks); 1725c59d87c4SChristoph Hellwig } 1726c59d87c4SChristoph Hellwig 1727c59d87c4SChristoph Hellwig STATIC int 1728c59d87c4SChristoph Hellwig xfs_vm_readpage( 1729c59d87c4SChristoph Hellwig struct file *unused, 1730c59d87c4SChristoph Hellwig struct page *page) 1731c59d87c4SChristoph Hellwig { 1732c59d87c4SChristoph Hellwig return mpage_readpage(page, xfs_get_blocks); 1733c59d87c4SChristoph Hellwig } 1734c59d87c4SChristoph Hellwig 1735c59d87c4SChristoph Hellwig STATIC int 1736c59d87c4SChristoph Hellwig xfs_vm_readpages( 1737c59d87c4SChristoph Hellwig struct file *unused, 1738c59d87c4SChristoph Hellwig struct address_space *mapping, 1739c59d87c4SChristoph Hellwig struct list_head *pages, 1740c59d87c4SChristoph Hellwig unsigned nr_pages) 1741c59d87c4SChristoph Hellwig { 1742c59d87c4SChristoph Hellwig return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); 1743c59d87c4SChristoph Hellwig } 1744c59d87c4SChristoph Hellwig 174522e757a4SDave Chinner /* 174622e757a4SDave Chinner * This is basically a copy of __set_page_dirty_buffers() with one 174722e757a4SDave Chinner * small tweak: buffers beyond EOF do not get marked dirty. If we mark them 174822e757a4SDave Chinner * dirty, we'll never be able to clean them because we don't write buffers 174922e757a4SDave Chinner * beyond EOF, and that means we can't invalidate pages that span EOF 175022e757a4SDave Chinner * that have been marked dirty. Further, the dirty state can leak into 175122e757a4SDave Chinner * the file interior if the file is extended, resulting in all sorts of 175222e757a4SDave Chinner * bad things happening as the state does not match the underlying data. 175322e757a4SDave Chinner * 175422e757a4SDave Chinner * XXX: this really indicates that bufferheads in XFS need to die. Warts like 175522e757a4SDave Chinner * this only exist because of bufferheads and how the generic code manages them. 175622e757a4SDave Chinner */ 175722e757a4SDave Chinner STATIC int 175822e757a4SDave Chinner xfs_vm_set_page_dirty( 175922e757a4SDave Chinner struct page *page) 176022e757a4SDave Chinner { 176122e757a4SDave Chinner struct address_space *mapping = page->mapping; 176222e757a4SDave Chinner struct inode *inode = mapping->host; 176322e757a4SDave Chinner loff_t end_offset; 176422e757a4SDave Chinner loff_t offset; 176522e757a4SDave Chinner int newly_dirty; 176622e757a4SDave Chinner 176722e757a4SDave Chinner if (unlikely(!mapping)) 176822e757a4SDave Chinner return !TestSetPageDirty(page); 176922e757a4SDave Chinner 177022e757a4SDave Chinner end_offset = i_size_read(inode); 177122e757a4SDave Chinner offset = page_offset(page); 177222e757a4SDave Chinner 177322e757a4SDave Chinner spin_lock(&mapping->private_lock); 177422e757a4SDave Chinner if (page_has_buffers(page)) { 177522e757a4SDave Chinner struct buffer_head *head = page_buffers(page); 177622e757a4SDave Chinner struct buffer_head *bh = head; 177722e757a4SDave Chinner 177822e757a4SDave Chinner do { 177922e757a4SDave Chinner if (offset < end_offset) 178022e757a4SDave Chinner set_buffer_dirty(bh); 178122e757a4SDave Chinner bh = bh->b_this_page; 178222e757a4SDave Chinner offset += 1 << inode->i_blkbits; 178322e757a4SDave Chinner } while (bh != head); 178422e757a4SDave Chinner } 178522e757a4SDave Chinner newly_dirty = !TestSetPageDirty(page); 178622e757a4SDave Chinner spin_unlock(&mapping->private_lock); 178722e757a4SDave Chinner 178822e757a4SDave Chinner if (newly_dirty) { 178922e757a4SDave Chinner /* sigh - __set_page_dirty() is static, so copy it here, too */ 179022e757a4SDave Chinner unsigned long flags; 179122e757a4SDave Chinner 179222e757a4SDave Chinner spin_lock_irqsave(&mapping->tree_lock, flags); 179322e757a4SDave Chinner if (page->mapping) { /* Race with truncate? */ 179422e757a4SDave Chinner WARN_ON_ONCE(!PageUptodate(page)); 179522e757a4SDave Chinner account_page_dirtied(page, mapping); 179622e757a4SDave Chinner radix_tree_tag_set(&mapping->page_tree, 179722e757a4SDave Chinner page_index(page), PAGECACHE_TAG_DIRTY); 179822e757a4SDave Chinner } 179922e757a4SDave Chinner spin_unlock_irqrestore(&mapping->tree_lock, flags); 180022e757a4SDave Chinner __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 180122e757a4SDave Chinner } 180222e757a4SDave Chinner return newly_dirty; 180322e757a4SDave Chinner } 180422e757a4SDave Chinner 1805c59d87c4SChristoph Hellwig const struct address_space_operations xfs_address_space_operations = { 1806c59d87c4SChristoph Hellwig .readpage = xfs_vm_readpage, 1807c59d87c4SChristoph Hellwig .readpages = xfs_vm_readpages, 1808c59d87c4SChristoph Hellwig .writepage = xfs_vm_writepage, 1809c59d87c4SChristoph Hellwig .writepages = xfs_vm_writepages, 181022e757a4SDave Chinner .set_page_dirty = xfs_vm_set_page_dirty, 1811c59d87c4SChristoph Hellwig .releasepage = xfs_vm_releasepage, 1812c59d87c4SChristoph Hellwig .invalidatepage = xfs_vm_invalidatepage, 1813c59d87c4SChristoph Hellwig .write_begin = xfs_vm_write_begin, 1814c59d87c4SChristoph Hellwig .write_end = xfs_vm_write_end, 1815c59d87c4SChristoph Hellwig .bmap = xfs_vm_bmap, 1816c59d87c4SChristoph Hellwig .direct_IO = xfs_vm_direct_IO, 1817c59d87c4SChristoph Hellwig .migratepage = buffer_migrate_page, 1818c59d87c4SChristoph Hellwig .is_partially_uptodate = block_is_partially_uptodate, 1819c59d87c4SChristoph Hellwig .error_remove_page = generic_error_remove_page, 1820c59d87c4SChristoph Hellwig }; 1821