Lines Matching +full:data +full:- +full:mapping

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
36 * Fast and loose check if this write could update the on-disk inode size.
40 return ioend->io_offset + ioend->io_size > in xfs_ioend_is_append()
41 XFS_I(ioend->io_inode)->i_disk_size; in xfs_ioend_is_append()
45 * Update on-disk file size now that data has been written to disk.
53 struct xfs_mount *mp = ip->i_mount; in xfs_setfilesize()
58 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize()
72 ip->i_disk_size = isize; in xfs_setfilesize()
86 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_ioend()
87 struct xfs_mount *mp = ip->i_mount; in xfs_end_ioend()
88 xfs_off_t offset = ioend->io_offset; in xfs_end_ioend()
89 size_t size = ioend->io_size; in xfs_end_ioend()
96 * task-wide nofs context for the following operations. in xfs_end_ioend()
101 * Just clean up the in-memory structures if the fs has been shut down. in xfs_end_ioend()
104 error = -EIO; in xfs_end_ioend()
109 * Clean up all COW blocks and underlying data fork delalloc blocks on in xfs_end_ioend()
115 error = blk_status_to_errno(ioend->io_bio.bi_status); in xfs_end_ioend()
117 if (ioend->io_flags & IOMAP_F_SHARED) { in xfs_end_ioend()
128 if (ioend->io_flags & IOMAP_F_SHARED) in xfs_end_ioend()
130 else if (ioend->io_type == IOMAP_UNWRITTEN) in xfs_end_ioend()
134 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); in xfs_end_ioend()
164 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_io()
165 list_replace_init(&ip->i_ioend_list, &tmp); in xfs_end_io()
166 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_io()
171 list_del_init(&ioend->io_list); in xfs_end_io()
183 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_bio()
186 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_bio()
187 if (list_empty(&ip->i_ioend_list)) in xfs_end_bio()
188 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, in xfs_end_bio()
189 &ip->i_ioend_work)); in xfs_end_bio()
190 list_add_tail(&ioend->io_list, &ip->i_ioend_list); in xfs_end_bio()
191 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_bio()
195 * Fast revalidation of the cached writeback mapping. Return true if the current
196 * mapping is valid, false otherwise.
204 if (offset < wpc->iomap.offset || in xfs_imap_valid()
205 offset >= wpc->iomap.offset + wpc->iomap.length) in xfs_imap_valid()
208 * If this is a COW mapping, it is sufficient to check that the mapping in xfs_imap_valid()
210 * can revalidate a COW mapping without updating the data seqno. in xfs_imap_valid()
212 if (wpc->iomap.flags & IOMAP_F_SHARED) in xfs_imap_valid()
216 * This is not a COW mapping. Check the sequence number of the data fork in xfs_imap_valid()
222 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) { in xfs_imap_valid()
223 trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap, in xfs_imap_valid()
224 XFS_WPC(wpc)->data_seq, XFS_DATA_FORK); in xfs_imap_valid()
228 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) { in xfs_imap_valid()
229 trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap, in xfs_imap_valid()
230 XFS_WPC(wpc)->cow_seq, XFS_COW_FORK); in xfs_imap_valid()
244 struct xfs_mount *mp = ip->i_mount; in xfs_map_blocks()
257 return -EIO; in xfs_map_blocks()
262 * COW fork blocks can overlap data fork blocks even if the blocks in xfs_map_blocks()
264 * check for overlap on reflink inodes unless the mapping is already a in xfs_map_blocks()
289 ASSERT(!xfs_need_iread_extents(&ip->i_df)); in xfs_map_blocks()
293 * it directly instead of looking up anything in the data fork. in xfs_map_blocks()
296 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) in xfs_map_blocks()
299 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); in xfs_map_blocks()
308 * ->cow_seq. If the data mapping is still valid, we're done. in xfs_map_blocks()
320 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) in xfs_map_blocks()
322 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); in xfs_map_blocks()
327 imap.br_blockcount = imap.br_startoff - offset_fsb; in xfs_map_blocks()
336 * subsequent blocks in the mapping; however, the requirement to treat in xfs_map_blocks()
341 imap.br_blockcount = cow_fsb - imap.br_startoff; in xfs_map_blocks()
348 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq); in xfs_map_blocks()
355 * although it could have moved from the COW to the data fork by another in xfs_map_blocks()
359 seq = &XFS_WPC(wpc)->cow_seq; in xfs_map_blocks()
361 seq = &XFS_WPC(wpc)->data_seq; in xfs_map_blocks()
364 &wpc->iomap, seq); in xfs_map_blocks()
368 * raced with a COW to data fork conversion or truncate. in xfs_map_blocks()
369 * Restart the lookup to catch the extent in the data fork for in xfs_map_blocks()
373 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) in xfs_map_blocks()
375 ASSERT(error != -EAGAIN); in xfs_map_blocks()
382 * boundary again to force a re-lookup. in xfs_map_blocks()
387 if (cow_offset < wpc->iomap.offset + wpc->iomap.length) in xfs_map_blocks()
388 wpc->iomap.length = cow_offset - wpc->iomap.offset; in xfs_map_blocks()
391 ASSERT(wpc->iomap.offset <= offset); in xfs_map_blocks()
392 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); in xfs_map_blocks()
407 * task-wide nofs context for the following operations. in xfs_prepare_ioend()
412 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { in xfs_prepare_ioend()
413 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), in xfs_prepare_ioend()
414 ioend->io_offset, ioend->io_size); in xfs_prepare_ioend()
420 if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN || in xfs_prepare_ioend()
421 (ioend->io_flags & IOMAP_F_SHARED)) in xfs_prepare_ioend()
422 ioend->io_bio.bi_end_io = xfs_end_bio; in xfs_prepare_ioend()
428 * out. If we don't, we can leave a stale delalloc mapping covered by a clean
429 * page that needs to be dirtied again before the delalloc mapping can be
430 * converted. This stale delalloc mapping can trip up a later direct I/O read
434 * they are delalloc, we can do this without needing a transaction. Indeed - if
444 struct xfs_inode *ip = XFS_I(folio->mapping->host); in xfs_discard_folio()
445 struct xfs_mount *mp = ip->i_mount; in xfs_discard_folio()
452 folio, ip->i_ino, pos); in xfs_discard_folio()
471 struct address_space *mapping, in xfs_vm_writepages() argument
476 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); in xfs_vm_writepages()
477 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); in xfs_vm_writepages()
482 struct address_space *mapping, in xfs_dax_writepages() argument
485 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_dax_writepages()
488 return dax_writeback_mapping_range(mapping, in xfs_dax_writepages()
489 xfs_inode_buftarg(ip)->bt_daxdev, wbc); in xfs_dax_writepages()
494 struct address_space *mapping, in xfs_vm_bmap() argument
497 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_vm_bmap()
502 * The swap code (ab-)uses ->bmap to get a block mapping and then in xfs_vm_bmap()
512 return iomap_bmap(mapping, block, &xfs_read_iomap_ops); in xfs_vm_bmap()
536 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev; in xfs_iomap_swapfile_activate()