Lines Matching +full:left +full:- +full:shift
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
37 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
44 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, in xfs_alert_fsblock_zero()
47 "blkcnt: %llx extent-state: %x", in xfs_alert_fsblock_zero()
48 (unsigned long long)ip->i_ino, in xfs_alert_fsblock_zero()
49 (unsigned long long)imap->br_startblock, in xfs_alert_fsblock_zero()
50 (unsigned long long)imap->br_startoff, in xfs_alert_fsblock_zero()
51 (unsigned long long)imap->br_blockcount, in xfs_alert_fsblock_zero()
52 imap->br_state); in xfs_alert_fsblock_zero()
54 return -EFSCORRUPTED; in xfs_alert_fsblock_zero()
65 return READ_ONCE(ip->i_af.if_seq); in xfs_iomap_inode_sequence()
66 if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp) in xfs_iomap_inode_sequence()
67 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32; in xfs_iomap_inode_sequence()
68 return cookie | READ_ONCE(ip->i_df.if_seq); in xfs_iomap_inode_sequence()
82 if (iomap->type == IOMAP_HOLE) in xfs_iomap_valid()
85 if (iomap->validity_cookie != in xfs_iomap_valid()
86 xfs_iomap_inode_sequence(ip, iomap->flags)) { in xfs_iomap_valid()
91 XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS); in xfs_iomap_valid()
108 struct xfs_mount *mp = ip->i_mount; in xfs_bmbt_to_iomap()
111 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { in xfs_bmbt_to_iomap()
116 if (imap->br_startblock == HOLESTARTBLOCK) { in xfs_bmbt_to_iomap()
117 iomap->addr = IOMAP_NULL_ADDR; in xfs_bmbt_to_iomap()
118 iomap->type = IOMAP_HOLE; in xfs_bmbt_to_iomap()
119 } else if (imap->br_startblock == DELAYSTARTBLOCK || in xfs_bmbt_to_iomap()
120 isnullstartblock(imap->br_startblock)) { in xfs_bmbt_to_iomap()
121 iomap->addr = IOMAP_NULL_ADDR; in xfs_bmbt_to_iomap()
122 iomap->type = IOMAP_DELALLOC; in xfs_bmbt_to_iomap()
124 xfs_daddr_t daddr = xfs_fsb_to_db(ip, imap->br_startblock); in xfs_bmbt_to_iomap()
126 iomap->addr = BBTOB(daddr); in xfs_bmbt_to_iomap()
128 iomap->addr += target->bt_dax_part_off; in xfs_bmbt_to_iomap()
130 if (imap->br_state == XFS_EXT_UNWRITTEN) in xfs_bmbt_to_iomap()
131 iomap->type = IOMAP_UNWRITTEN; in xfs_bmbt_to_iomap()
133 iomap->type = IOMAP_MAPPED; in xfs_bmbt_to_iomap()
141 xfs_rtbno_is_group_start(mp, imap->br_startblock)) in xfs_bmbt_to_iomap()
142 iomap->flags |= IOMAP_F_BOUNDARY; in xfs_bmbt_to_iomap()
144 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); in xfs_bmbt_to_iomap()
145 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); in xfs_bmbt_to_iomap()
147 iomap->dax_dev = target->bt_daxdev; in xfs_bmbt_to_iomap()
149 iomap->bdev = target->bt_bdev; in xfs_bmbt_to_iomap()
150 iomap->flags = iomap_flags; in xfs_bmbt_to_iomap()
156 if (ip->i_itemp) { in xfs_bmbt_to_iomap()
157 struct xfs_inode_log_item *iip = ip->i_itemp; in xfs_bmbt_to_iomap()
159 spin_lock(&iip->ili_lock); in xfs_bmbt_to_iomap()
160 if (iip->ili_datasync_seq) in xfs_bmbt_to_iomap()
161 iomap->flags |= IOMAP_F_DIRTY; in xfs_bmbt_to_iomap()
162 spin_unlock(&iip->ili_lock); in xfs_bmbt_to_iomap()
165 iomap->validity_cookie = sequence_cookie; in xfs_bmbt_to_iomap()
178 iomap->addr = IOMAP_NULL_ADDR; in xfs_hole_to_iomap()
179 iomap->type = IOMAP_HOLE; in xfs_hole_to_iomap()
180 iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); in xfs_hole_to_iomap()
181 iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); in xfs_hole_to_iomap()
182 iomap->bdev = target->bt_bdev; in xfs_hole_to_iomap()
183 iomap->dax_dev = target->bt_daxdev; in xfs_hole_to_iomap()
192 ASSERT(offset <= mp->m_super->s_maxbytes); in xfs_iomap_end_fsb()
194 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); in xfs_iomap_end_fsb()
201 struct xfs_mount *mp = ip->i_mount; in xfs_eof_alignment()
210 * If mounted with the "-o swalloc" option the alignment is in xfs_eof_alignment()
213 if (mp->m_swidth && xfs_has_swalloc(mp)) in xfs_eof_alignment()
214 align = mp->m_swidth; in xfs_eof_alignment()
215 else if (mp->m_dalign) in xfs_eof_alignment()
216 align = mp->m_dalign; in xfs_eof_alignment()
273 struct xfs_mount *mp = ip->i_mount; in xfs_iomap_write_direct()
310 * left but we need to do unwritten extent conversion. in xfs_iomap_write_direct()
314 if (imap->br_state == XFS_EXT_UNWRITTEN) { in xfs_iomap_write_direct()
321 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, in xfs_iomap_write_direct()
347 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { in xfs_iomap_write_direct()
372 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) in xfs_quota_need_throttle()
376 res = &dq->q_rtb; in xfs_quota_need_throttle()
377 pre = &dq->q_rtb_prealloc; in xfs_quota_need_throttle()
379 res = &dq->q_blk; in xfs_quota_need_throttle()
380 pre = &dq->q_blk_prealloc; in xfs_quota_need_throttle()
384 if (!pre->q_prealloc_hi_wmark) in xfs_quota_need_throttle()
388 if (res->reserved + alloc_blocks < pre->q_prealloc_lo_wmark) in xfs_quota_need_throttle()
406 int shift = 0; in xfs_quota_calc_throttle() local
412 res = &dq->q_rtb; in xfs_quota_calc_throttle()
413 pre = &dq->q_rtb_prealloc; in xfs_quota_calc_throttle()
415 res = &dq->q_blk; in xfs_quota_calc_throttle()
416 pre = &dq->q_blk_prealloc; in xfs_quota_calc_throttle()
420 if (!res || res->reserved >= pre->q_prealloc_hi_wmark) { in xfs_quota_calc_throttle()
426 freesp = pre->q_prealloc_hi_wmark - res->reserved; in xfs_quota_calc_throttle()
427 if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) { in xfs_quota_calc_throttle()
428 shift = 2; in xfs_quota_calc_throttle()
429 if (freesp < pre->q_low_space[XFS_QLOWSP_3_PCNT]) in xfs_quota_calc_throttle()
430 shift += 2; in xfs_quota_calc_throttle()
431 if (freesp < pre->q_low_space[XFS_QLOWSP_1_PCNT]) in xfs_quota_calc_throttle()
432 shift += 2; in xfs_quota_calc_throttle()
439 if ((freesp >> shift) < (*qblocks >> *qshift)) { in xfs_quota_calc_throttle()
441 *qshift = shift; in xfs_quota_calc_throttle()
450 int *shift) in xfs_iomap_freesp() argument
456 *shift = 2; in xfs_iomap_freesp()
458 (*shift)++; in xfs_iomap_freesp()
460 (*shift)++; in xfs_iomap_freesp()
462 (*shift)++; in xfs_iomap_freesp()
464 (*shift)++; in xfs_iomap_freesp()
485 struct xfs_mount *mp = ip->i_mount; in xfs_iomap_prealloc_size()
492 int shift = 0; in xfs_iomap_prealloc_size() local
501 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) in xfs_iomap_prealloc_size()
508 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || in xfs_iomap_prealloc_size()
511 return mp->m_allocsize_blocks; in xfs_iomap_prealloc_size()
554 mp->m_low_rtexts, &shift)); in xfs_iomap_prealloc_size()
556 freesp = xfs_iomap_freesp(mp, XC_FREE_BLOCKS, mp->m_low_space, in xfs_iomap_prealloc_size()
557 &shift); in xfs_iomap_prealloc_size()
560 * Check each quota to cap the prealloc size, provide a shift value to in xfs_iomap_prealloc_size()
577 * The shift throttle value is set to the maximum value as determined by in xfs_iomap_prealloc_size()
578 * the global low free space values and per-quota low free space values. in xfs_iomap_prealloc_size()
581 shift = max(shift, qshift); in xfs_iomap_prealloc_size()
583 if (shift) in xfs_iomap_prealloc_size()
584 alloc_blocks >>= shift; in xfs_iomap_prealloc_size()
602 if (alloc_blocks < mp->m_allocsize_blocks) in xfs_iomap_prealloc_size()
603 alloc_blocks = mp->m_allocsize_blocks; in xfs_iomap_prealloc_size()
604 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, in xfs_iomap_prealloc_size()
605 mp->m_allocsize_blocks); in xfs_iomap_prealloc_size()
616 xfs_mount_t *mp = ip->i_mount; in xfs_iomap_write_unwritten()
632 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); in xfs_iomap_write_unwritten()
661 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, in xfs_iomap_write_unwritten()
693 ip->i_disk_size = i_size; in xfs_iomap_write_unwritten()
716 count_fsb -= numblks_fsb; in xfs_iomap_write_unwritten()
738 imap->br_startblock == HOLESTARTBLOCK || in imap_needs_alloc()
739 imap->br_startblock == DELAYSTARTBLOCK) in imap_needs_alloc()
742 if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN) in imap_needs_alloc()
760 imap->br_startblock == HOLESTARTBLOCK || in imap_needs_cow()
761 imap->br_state == XFS_EXT_UNWRITTEN) in imap_needs_cow()
782 if (xfs_need_iread_extents(&ip->i_df)) in xfs_ilock_for_iomap()
783 return -EAGAIN; in xfs_ilock_for_iomap()
785 return -EAGAIN; in xfs_ilock_for_iomap()
787 if (xfs_need_iread_extents(&ip->i_df)) in xfs_ilock_for_iomap()
805 if (imap->br_startoff > offset_fsb) in imap_spans_range()
807 if (imap->br_startoff + imap->br_blockcount < end_fsb) in imap_spans_range()
819 struct xfs_mount *mp = ip->i_mount; in xfs_bmap_hw_atomic_write_possible()
820 xfs_fsize_t len = XFS_FSB_TO_B(mp, end_fsb - offset_fsb); in xfs_bmap_hw_atomic_write_possible()
827 if (!IS_ALIGNED(imap->br_startblock, imap->br_blockcount)) in xfs_bmap_hw_atomic_write_possible()
832 * issued, and so would lose atomicity required for REQ_ATOMIC-based in xfs_bmap_hw_atomic_write_possible()
839 * The ->iomap_begin caller should ensure this, but check anyway. in xfs_bmap_hw_atomic_write_possible()
841 return len <= xfs_inode_buftarg(ip)->bt_awu_max; in xfs_bmap_hw_atomic_write_possible()
854 struct xfs_mount *mp = ip->i_mount; in xfs_direct_write_iomap_begin()
869 return -EIO; in xfs_direct_write_iomap_begin()
879 /* HW-offload atomics are always used in this path */ in xfs_direct_write_iomap_begin()
907 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, in xfs_direct_write_iomap_begin()
913 error = -EAGAIN; in xfs_direct_write_iomap_begin()
917 /* may drop and re-acquire the ilock */ in xfs_direct_write_iomap_begin()
927 error = -ENOPROTOOPT; in xfs_direct_write_iomap_begin()
933 length = XFS_FSB_TO_B(mp, end_fsb) - offset; in xfs_direct_write_iomap_begin()
939 error = -ENOPROTOOPT; in xfs_direct_write_iomap_begin()
943 * REQ_ATOMIC-based cannot be used, so avoid this possibility. in xfs_direct_write_iomap_begin()
945 if (needs_alloc && orig_end_fsb - offset_fsb > 1) in xfs_direct_write_iomap_begin()
963 error = -EAGAIN; in xfs_direct_write_iomap_begin()
970 * requiring sub-block zeroing. This can only be done under an in xfs_direct_write_iomap_begin()
971 * exclusive IOLOCK, hence return -EAGAIN if this is not a written in xfs_direct_write_iomap_begin()
975 error = -EAGAIN; in xfs_direct_write_iomap_begin()
977 ((offset | length) & mp->m_blockmask)) in xfs_direct_write_iomap_begin()
987 error = -EAGAIN; in xfs_direct_write_iomap_begin()
997 * Note that the values needs to be less than 32-bits wide until the in xfs_direct_write_iomap_begin()
1009 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, in xfs_direct_write_iomap_begin()
1020 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); in xfs_direct_write_iomap_begin()
1066 return -EAGAIN; in xfs_zoned_direct_write_iomap_begin()
1072 if (xfs_need_iread_extents(&ip->i_df)) { in xfs_zoned_direct_write_iomap_begin()
1080 iomap->type = IOMAP_MAPPED; in xfs_zoned_direct_write_iomap_begin()
1081 iomap->flags = IOMAP_F_DIRTY; in xfs_zoned_direct_write_iomap_begin()
1082 iomap->bdev = ip->i_mount->m_rtdev_targp->bt_bdev; in xfs_zoned_direct_write_iomap_begin()
1083 iomap->offset = offset; in xfs_zoned_direct_write_iomap_begin()
1084 iomap->length = length; in xfs_zoned_direct_write_iomap_begin()
1085 iomap->flags = IOMAP_F_ANON_WRITE; in xfs_zoned_direct_write_iomap_begin()
1105 if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap2)) in xfs_check_atomic_cow_conversion()
1108 ASSERT(cmap2.br_startoff == cmap->br_startoff); in xfs_check_atomic_cow_conversion()
1109 ASSERT(cmap2.br_blockcount == cmap->br_blockcount); in xfs_check_atomic_cow_conversion()
1110 ASSERT(cmap2.br_startblock == cmap->br_startblock); in xfs_check_atomic_cow_conversion()
1111 ASSERT(cmap2.br_state == cmap->br_state); in xfs_check_atomic_cow_conversion()
1127 struct xfs_mount *mp = ip->i_mount; in xfs_atomic_write_cow_iomap_begin()
1130 const xfs_filblks_t count_fsb = end_fsb - offset_fsb; in xfs_atomic_write_cow_iomap_begin()
1145 return -EIO; in xfs_atomic_write_cow_iomap_begin()
1149 return -EINVAL; in xfs_atomic_write_cow_iomap_begin()
1154 return -EAGAIN; in xfs_atomic_write_cow_iomap_begin()
1160 if (!ip->i_cowfp) { in xfs_atomic_write_cow_iomap_begin()
1165 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) in xfs_atomic_write_cow_iomap_begin()
1181 hole_count_fsb = cmap.br_startoff - offset_fsb; in xfs_atomic_write_cow_iomap_begin()
1195 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, in xfs_atomic_write_cow_iomap_begin()
1201 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) in xfs_atomic_write_cow_iomap_begin()
1217 * this COW-based method). in xfs_atomic_write_cow_iomap_begin()
1234 * because of EXTSZALIGN or adjacent pre-existing unwritten mappings in xfs_atomic_write_cow_iomap_begin()
1314 xfs_bmbt_irec_t left; /* left neighbor extent entry */ in xfs_bmap_add_extent_hole_delay() local
1322 ASSERT(isnullstartblock(new->br_startblock)); in xfs_bmap_add_extent_hole_delay()
1325 * Check and set flags if this segment has a left neighbor in xfs_bmap_add_extent_hole_delay()
1327 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { in xfs_bmap_add_extent_hole_delay()
1329 if (isnullstartblock(left.br_startblock)) in xfs_bmap_add_extent_hole_delay()
1335 * If it doesn't exist, we're converting the hole at end-of-file. in xfs_bmap_add_extent_hole_delay()
1344 * Set contiguity flags on the left and right neighbors. in xfs_bmap_add_extent_hole_delay()
1348 left.br_startoff + left.br_blockcount == new->br_startoff && in xfs_bmap_add_extent_hole_delay()
1349 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) in xfs_bmap_add_extent_hole_delay()
1353 new->br_startoff + new->br_blockcount == right.br_startoff && in xfs_bmap_add_extent_hole_delay()
1354 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && in xfs_bmap_add_extent_hole_delay()
1356 (left.br_blockcount + new->br_blockcount + in xfs_bmap_add_extent_hole_delay()
1367 * on the left and on the right. in xfs_bmap_add_extent_hole_delay()
1370 temp = left.br_blockcount + new->br_blockcount + in xfs_bmap_add_extent_hole_delay()
1373 oldlen = startblockval(left.br_startblock) + in xfs_bmap_add_extent_hole_delay()
1374 startblockval(new->br_startblock) + in xfs_bmap_add_extent_hole_delay()
1378 left.br_startblock = nullstartblock(newlen); in xfs_bmap_add_extent_hole_delay()
1379 left.br_blockcount = temp; in xfs_bmap_add_extent_hole_delay()
1383 xfs_iext_update_extent(ip, state, icur, &left); in xfs_bmap_add_extent_hole_delay()
1389 * on the left. in xfs_bmap_add_extent_hole_delay()
1390 * Merge the new allocation with the left neighbor. in xfs_bmap_add_extent_hole_delay()
1392 temp = left.br_blockcount + new->br_blockcount; in xfs_bmap_add_extent_hole_delay()
1394 oldlen = startblockval(left.br_startblock) + in xfs_bmap_add_extent_hole_delay()
1395 startblockval(new->br_startblock); in xfs_bmap_add_extent_hole_delay()
1398 left.br_blockcount = temp; in xfs_bmap_add_extent_hole_delay()
1399 left.br_startblock = nullstartblock(newlen); in xfs_bmap_add_extent_hole_delay()
1402 xfs_iext_update_extent(ip, state, icur, &left); in xfs_bmap_add_extent_hole_delay()
1411 temp = new->br_blockcount + right.br_blockcount; in xfs_bmap_add_extent_hole_delay()
1412 oldlen = startblockval(new->br_startblock) + in xfs_bmap_add_extent_hole_delay()
1416 right.br_startoff = new->br_startoff; in xfs_bmap_add_extent_hole_delay()
1434 xfs_add_fdblocks(ip->i_mount, oldlen - newlen); in xfs_bmap_add_extent_hole_delay()
1439 xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen); in xfs_bmap_add_extent_hole_delay()
1445 * global pool and the extent inserted into the inode in-core extent tree.
1467 struct xfs_mount *mp = ip->i_mount; in xfs_bmapi_reserve_delalloc()
1485 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); in xfs_bmapi_reserve_delalloc()
1487 prealloc = alen - len; in xfs_bmapi_reserve_delalloc()
1511 * Make a transaction-less quota reservation for delayed allocation in xfs_bmapi_reserve_delalloc()
1540 ip->i_delayed_blks += alen; in xfs_bmapi_reserve_delalloc()
1543 got->br_startoff = aoff; in xfs_bmapi_reserve_delalloc()
1544 got->br_startblock = nullstartblock(indlen); in xfs_bmapi_reserve_delalloc()
1545 got->br_blockcount = alen; in xfs_bmapi_reserve_delalloc()
1546 got->br_state = XFS_EXT_NORM; in xfs_bmapi_reserve_delalloc()
1569 if (error == -ENOSPC || error == -EDQUOT) { in xfs_bmapi_reserve_delalloc()
1593 struct xfs_zone_alloc_ctx *ac = iter->private; in xfs_zoned_buffered_write_iomap_begin()
1595 struct xfs_mount *mp = ip->i_mount; in xfs_zoned_buffered_write_iomap_begin()
1611 return -EIO; in xfs_zoned_buffered_write_iomap_begin()
1621 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || in xfs_zoned_buffered_write_iomap_begin()
1624 error = -EFSCORRUPTED; in xfs_zoned_buffered_write_iomap_begin()
1639 * read-modify-write of the whole block in the page cache. in xfs_zoned_buffered_write_iomap_begin()
1644 if (!IS_ALIGNED(offset, mp->m_sb.sb_blocksize) || in xfs_zoned_buffered_write_iomap_begin()
1645 !IS_ALIGNED(offset + count, mp->m_sb.sb_blocksize) || in xfs_zoned_buffered_write_iomap_begin()
1650 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &scur, in xfs_zoned_buffered_write_iomap_begin()
1667 end_fsb - offset_fsb); in xfs_zoned_buffered_write_iomap_begin()
1675 if (!ip->i_cowfp) in xfs_zoned_buffered_write_iomap_begin()
1678 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got)) in xfs_zoned_buffered_write_iomap_begin()
1690 count_fsb = min3(end_fsb - offset_fsb, XFS_MAX_BMBT_EXTLEN, in xfs_zoned_buffered_write_iomap_begin()
1700 * ->page_mkwrite in range this thread writes to, using up the in xfs_zoned_buffered_write_iomap_begin()
1705 * iteration short, causing the new call to ->iomap_begin that gets in xfs_zoned_buffered_write_iomap_begin()
1712 if (count_fsb > ac->reserved_blocks) { in xfs_zoned_buffered_write_iomap_begin()
1714 "Short write on ino 0x%llx comm %.20s due to three-way race with write fault and direct I/O", in xfs_zoned_buffered_write_iomap_begin()
1715 ip->i_ino, current->comm); in xfs_zoned_buffered_write_iomap_begin()
1716 count_fsb = ac->reserved_blocks; in xfs_zoned_buffered_write_iomap_begin()
1718 error = -EIO; in xfs_zoned_buffered_write_iomap_begin()
1731 ip->i_delayed_blks += count_fsb; in xfs_zoned_buffered_write_iomap_begin()
1739 ac->reserved_blocks -= count_fsb; in xfs_zoned_buffered_write_iomap_begin()
1762 struct xfs_mount *mp = ip->i_mount; in xfs_buffered_write_iomap_begin()
1776 return -EIO; in xfs_buffered_write_iomap_begin()
1795 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || in xfs_buffered_write_iomap_begin()
1798 error = -EFSCORRUPTED; in xfs_buffered_write_iomap_begin()
1812 * perform read-modify-write cycles for unaligned writes. in xfs_buffered_write_iomap_begin()
1814 eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); in xfs_buffered_write_iomap_begin()
1839 end_fsb - offset_fsb); in xfs_buffered_write_iomap_begin()
1852 if (!ip->i_cowfp) { in xfs_buffered_write_iomap_begin()
1856 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, in xfs_buffered_write_iomap_begin()
1877 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); in xfs_buffered_write_iomap_begin()
1904 * Note that the values needs to be less than 32-bits wide until in xfs_buffered_write_iomap_begin()
1920 prealloc_blocks = mp->m_allocsize_blocks; in xfs_buffered_write_iomap_begin()
1932 end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1); in xfs_buffered_write_iomap_begin()
1941 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); in xfs_buffered_write_iomap_begin()
1943 prealloc_blocks = p_end_fsb - end_fsb; in xfs_buffered_write_iomap_begin()
1954 end_fsb - offset_fsb, prealloc_blocks, &cmap, in xfs_buffered_write_iomap_begin()
1964 end_fsb - offset_fsb, prealloc_blocks, &imap, &icur, in xfs_buffered_write_iomap_begin()
1994 imap.br_startoff - offset_fsb); in xfs_buffered_write_iomap_begin()
2018 (iomap->flags & IOMAP_F_SHARED) ? in xfs_buffered_write_delalloc_punch()
2020 offset, offset + length, iter->private); in xfs_buffered_write_delalloc_punch()
2035 if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW)) in xfs_buffered_write_iomap_end()
2054 rwsem_assert_held_write(&inode->i_mapping->invalidate_lock); in xfs_buffered_write_iomap_end()
2058 filemap_invalidate_lock(inode->i_mapping); in xfs_buffered_write_iomap_end()
2061 filemap_invalidate_unlock(inode->i_mapping); in xfs_buffered_write_iomap_end()
2082 struct xfs_mount *mp = ip->i_mount; in xfs_read_iomap_begin()
2094 return -EIO; in xfs_read_iomap_begin()
2099 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, in xfs_read_iomap_begin()
2127 struct xfs_mount *mp = ip->i_mount; in xfs_seek_iomap_begin()
2138 return -EIO; in xfs_seek_iomap_begin()
2145 if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { in xfs_seek_iomap_begin()
2160 * If a COW fork extent covers the hole, report it - capped to the next in xfs_seek_iomap_begin()
2164 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) in xfs_seek_iomap_begin()
2169 xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb); in xfs_seek_iomap_begin()
2178 iomap->type = IOMAP_UNWRITTEN; in xfs_seek_iomap_begin()
2186 imap.br_blockcount = cow_fsb - offset_fsb; in xfs_seek_iomap_begin()
2188 imap.br_blockcount = data_fsb - offset_fsb; in xfs_seek_iomap_begin()
2194 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); in xfs_seek_iomap_begin()
2215 struct xfs_mount *mp = ip->i_mount; in xfs_xattr_iomap_begin()
2224 return -EIO; in xfs_xattr_iomap_begin()
2229 if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) { in xfs_xattr_iomap_begin()
2230 error = -ENOENT; in xfs_xattr_iomap_begin()
2234 ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL); in xfs_xattr_iomap_begin()
2235 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, in xfs_xattr_iomap_begin()