1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2016 Christoph Hellwig. 5 * All Rights Reserved. 6 */ 7 #include <linux/iomap.h> 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans_resv.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_btree.h" 18 #include "xfs_bmap_btree.h" 19 #include "xfs_bmap.h" 20 #include "xfs_bmap_util.h" 21 #include "xfs_errortag.h" 22 #include "xfs_error.h" 23 #include "xfs_trans.h" 24 #include "xfs_trans_space.h" 25 #include "xfs_inode_item.h" 26 #include "xfs_iomap.h" 27 #include "xfs_trace.h" 28 #include "xfs_icache.h" 29 #include "xfs_quota.h" 30 #include "xfs_dquot_item.h" 31 #include "xfs_dquot.h" 32 #include "xfs_reflink.h" 33 34 35 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 36 << mp->m_writeio_log) 37 38 void 39 xfs_bmbt_to_iomap( 40 struct xfs_inode *ip, 41 struct iomap *iomap, 42 struct xfs_bmbt_irec *imap) 43 { 44 struct xfs_mount *mp = ip->i_mount; 45 46 if (imap->br_startblock == HOLESTARTBLOCK) { 47 iomap->addr = IOMAP_NULL_ADDR; 48 iomap->type = IOMAP_HOLE; 49 } else if (imap->br_startblock == DELAYSTARTBLOCK) { 50 iomap->addr = IOMAP_NULL_ADDR; 51 iomap->type = IOMAP_DELALLOC; 52 } else { 53 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock)); 54 if (imap->br_state == XFS_EXT_UNWRITTEN) 55 iomap->type = IOMAP_UNWRITTEN; 56 else 57 iomap->type = IOMAP_MAPPED; 58 } 59 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); 60 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); 61 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); 62 iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); 63 } 64 65 xfs_extlen_t 66 xfs_eof_alignment( 67 struct xfs_inode *ip, 68 xfs_extlen_t extsize) 69 { 70 struct xfs_mount *mp = ip->i_mount; 71 xfs_extlen_t align = 0; 72 73 if (!XFS_IS_REALTIME_INODE(ip)) { 74 /* 75 * Round up the allocation request to a stripe unit 76 * (m_dalign) boundary if the file size is >= stripe unit 77 * size, and we are allocating past the allocation eof. 78 * 79 * If mounted with the "-o swalloc" option the alignment is 80 * increased from the strip unit size to the stripe width. 81 */ 82 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 83 align = mp->m_swidth; 84 else if (mp->m_dalign) 85 align = mp->m_dalign; 86 87 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 88 align = 0; 89 } 90 91 /* 92 * Always round up the allocation request to an extent boundary 93 * (when file on a real-time subvolume or has di_extsize hint). 94 */ 95 if (extsize) { 96 if (align) 97 align = roundup_64(align, extsize); 98 else 99 align = extsize; 100 } 101 102 return align; 103 } 104 105 STATIC int 106 xfs_iomap_eof_align_last_fsb( 107 struct xfs_inode *ip, 108 xfs_extlen_t extsize, 109 xfs_fileoff_t *last_fsb) 110 { 111 xfs_extlen_t align = xfs_eof_alignment(ip, extsize); 112 113 if (align) { 114 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); 115 int eof, error; 116 117 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 118 if (error) 119 return error; 120 if (eof) 121 *last_fsb = new_last_fsb; 122 } 123 return 0; 124 } 125 126 STATIC int 127 xfs_alert_fsblock_zero( 128 xfs_inode_t *ip, 129 xfs_bmbt_irec_t *imap) 130 { 131 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 132 "Access to block zero in inode %llu " 133 "start_block: %llx start_off: %llx " 134 "blkcnt: %llx extent-state: %x", 135 (unsigned long long)ip->i_ino, 136 (unsigned long long)imap->br_startblock, 137 (unsigned long long)imap->br_startoff, 138 (unsigned long long)imap->br_blockcount, 139 imap->br_state); 140 return -EFSCORRUPTED; 141 } 142 143 int 144 xfs_iomap_write_direct( 145 xfs_inode_t *ip, 146 xfs_off_t offset, 147 size_t count, 148 xfs_bmbt_irec_t *imap, 149 int nmaps) 150 { 151 xfs_mount_t *mp = ip->i_mount; 152 xfs_fileoff_t offset_fsb; 153 xfs_fileoff_t last_fsb; 154 xfs_filblks_t count_fsb, resaligned; 155 xfs_fsblock_t firstfsb; 156 xfs_extlen_t extsz; 157 int nimaps; 158 int quota_flag; 159 int rt; 160 xfs_trans_t *tp; 161 struct xfs_defer_ops dfops; 162 uint qblocks, resblks, resrtextents; 163 int error; 164 int lockmode; 165 int bmapi_flags = XFS_BMAPI_PREALLOC; 166 uint tflags = 0; 167 168 rt = XFS_IS_REALTIME_INODE(ip); 169 extsz = xfs_get_extsz_hint(ip); 170 lockmode = XFS_ILOCK_SHARED; /* locked by caller */ 171 172 ASSERT(xfs_isilocked(ip, lockmode)); 173 174 offset_fsb = XFS_B_TO_FSBT(mp, offset); 175 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 176 if ((offset + count) > XFS_ISIZE(ip)) { 177 /* 178 * Assert that the in-core extent list is present since this can 179 * call xfs_iread_extents() and we only have the ilock shared. 180 * This should be safe because the lock was held around a bmapi 181 * call in the caller and we only need it to access the in-core 182 * list. 183 */ 184 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & 185 XFS_IFEXTENTS); 186 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb); 187 if (error) 188 goto out_unlock; 189 } else { 190 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 191 last_fsb = min(last_fsb, (xfs_fileoff_t) 192 imap->br_blockcount + 193 imap->br_startoff); 194 } 195 count_fsb = last_fsb - offset_fsb; 196 ASSERT(count_fsb > 0); 197 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz); 198 199 if (unlikely(rt)) { 200 resrtextents = qblocks = resaligned; 201 resrtextents /= mp->m_sb.sb_rextsize; 202 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 203 quota_flag = XFS_QMOPT_RES_RTBLKS; 204 } else { 205 resrtextents = 0; 206 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 207 quota_flag = XFS_QMOPT_RES_REGBLKS; 208 } 209 210 /* 211 * Drop the shared lock acquired by the caller, attach the dquot if 212 * necessary and move on to transaction setup. 213 */ 214 xfs_iunlock(ip, lockmode); 215 error = xfs_qm_dqattach(ip); 216 if (error) 217 return error; 218 219 /* 220 * For DAX, we do not allocate unwritten extents, but instead we zero 221 * the block before we commit the transaction. Ideally we'd like to do 222 * this outside the transaction context, but if we commit and then crash 223 * we may not have zeroed the blocks and this will be exposed on 224 * recovery of the allocation. Hence we must zero before commit. 225 * 226 * Further, if we are mapping unwritten extents here, we need to zero 227 * and convert them to written so that we don't need an unwritten extent 228 * callback for DAX. This also means that we need to be able to dip into 229 * the reserve block pool for bmbt block allocation if there is no space 230 * left but we need to do unwritten extent conversion. 231 */ 232 if (IS_DAX(VFS_I(ip))) { 233 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; 234 if (imap->br_state == XFS_EXT_UNWRITTEN) { 235 tflags |= XFS_TRANS_RESERVE; 236 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 237 } 238 } 239 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, 240 tflags, &tp); 241 if (error) 242 return error; 243 244 lockmode = XFS_ILOCK_EXCL; 245 xfs_ilock(ip, lockmode); 246 247 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 248 if (error) 249 goto out_trans_cancel; 250 251 xfs_trans_ijoin(tp, ip, 0); 252 253 /* 254 * From this point onwards we overwrite the imap pointer that the 255 * caller gave to us. 256 */ 257 xfs_defer_init(&dfops, &firstfsb); 258 nimaps = 1; 259 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 260 bmapi_flags, &firstfsb, resblks, imap, 261 &nimaps, &dfops); 262 if (error) 263 goto out_bmap_cancel; 264 265 /* 266 * Complete the transaction 267 */ 268 error = xfs_defer_finish(&tp, &dfops); 269 if (error) 270 goto out_bmap_cancel; 271 272 error = xfs_trans_commit(tp); 273 if (error) 274 goto out_unlock; 275 276 /* 277 * Copy any maps to caller's array and return any error. 278 */ 279 if (nimaps == 0) { 280 error = -ENOSPC; 281 goto out_unlock; 282 } 283 284 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 285 error = xfs_alert_fsblock_zero(ip, imap); 286 287 out_unlock: 288 xfs_iunlock(ip, lockmode); 289 return error; 290 291 out_bmap_cancel: 292 xfs_defer_cancel(&dfops); 293 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 294 out_trans_cancel: 295 xfs_trans_cancel(tp); 296 goto out_unlock; 297 } 298 299 STATIC bool 300 xfs_quota_need_throttle( 301 struct xfs_inode *ip, 302 int type, 303 xfs_fsblock_t alloc_blocks) 304 { 305 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 306 307 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 308 return false; 309 310 /* no hi watermark, no throttle */ 311 if (!dq->q_prealloc_hi_wmark) 312 return false; 313 314 /* under the lo watermark, no throttle */ 315 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 316 return false; 317 318 return true; 319 } 320 321 STATIC void 322 xfs_quota_calc_throttle( 323 struct xfs_inode *ip, 324 int type, 325 xfs_fsblock_t *qblocks, 326 int *qshift, 327 int64_t *qfreesp) 328 { 329 int64_t freesp; 330 int shift = 0; 331 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 332 333 /* no dq, or over hi wmark, squash the prealloc completely */ 334 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 335 *qblocks = 0; 336 *qfreesp = 0; 337 return; 338 } 339 340 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 341 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 342 shift = 2; 343 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 344 shift += 2; 345 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 346 shift += 2; 347 } 348 349 if (freesp < *qfreesp) 350 *qfreesp = freesp; 351 352 /* only overwrite the throttle values if we are more aggressive */ 353 if ((freesp >> shift) < (*qblocks >> *qshift)) { 354 *qblocks = freesp; 355 *qshift = shift; 356 } 357 } 358 359 /* 360 * If we are doing a write at the end of the file and there are no allocations 361 * past this one, then extend the allocation out to the file system's write 362 * iosize. 363 * 364 * If we don't have a user specified preallocation size, dynamically increase 365 * the preallocation size as the size of the file grows. Cap the maximum size 366 * at a single extent or less if the filesystem is near full. The closer the 367 * filesystem is to full, the smaller the maximum prealocation. 368 * 369 * As an exception we don't do any preallocation at all if the file is smaller 370 * than the minimum preallocation and we are using the default dynamic 371 * preallocation scheme, as it is likely this is the only write to the file that 372 * is going to be done. 373 * 374 * We clean up any extra space left over when the file is closed in 375 * xfs_inactive(). 376 */ 377 STATIC xfs_fsblock_t 378 xfs_iomap_prealloc_size( 379 struct xfs_inode *ip, 380 loff_t offset, 381 loff_t count, 382 struct xfs_iext_cursor *icur) 383 { 384 struct xfs_mount *mp = ip->i_mount; 385 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 386 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 387 struct xfs_bmbt_irec prev; 388 int shift = 0; 389 int64_t freesp; 390 xfs_fsblock_t qblocks; 391 int qshift = 0; 392 xfs_fsblock_t alloc_blocks = 0; 393 394 if (offset + count <= XFS_ISIZE(ip)) 395 return 0; 396 397 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 398 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))) 399 return 0; 400 401 /* 402 * If an explicit allocsize is set, the file is small, or we 403 * are writing behind a hole, then use the minimum prealloc: 404 */ 405 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) || 406 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || 407 !xfs_iext_peek_prev_extent(ifp, icur, &prev) || 408 prev.br_startoff + prev.br_blockcount < offset_fsb) 409 return mp->m_writeio_blocks; 410 411 /* 412 * Determine the initial size of the preallocation. We are beyond the 413 * current EOF here, but we need to take into account whether this is 414 * a sparse write or an extending write when determining the 415 * preallocation size. Hence we need to look up the extent that ends 416 * at the current write offset and use the result to determine the 417 * preallocation size. 418 * 419 * If the extent is a hole, then preallocation is essentially disabled. 420 * Otherwise we take the size of the preceding data extent as the basis 421 * for the preallocation size. If the size of the extent is greater than 422 * half the maximum extent length, then use the current offset as the 423 * basis. This ensures that for large files the preallocation size 424 * always extends to MAXEXTLEN rather than falling short due to things 425 * like stripe unit/width alignment of real extents. 426 */ 427 if (prev.br_blockcount <= (MAXEXTLEN >> 1)) 428 alloc_blocks = prev.br_blockcount << 1; 429 else 430 alloc_blocks = XFS_B_TO_FSB(mp, offset); 431 if (!alloc_blocks) 432 goto check_writeio; 433 qblocks = alloc_blocks; 434 435 /* 436 * MAXEXTLEN is not a power of two value but we round the prealloc down 437 * to the nearest power of two value after throttling. To prevent the 438 * round down from unconditionally reducing the maximum supported prealloc 439 * size, we round up first, apply appropriate throttling, round down and 440 * cap the value to MAXEXTLEN. 441 */ 442 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 443 alloc_blocks); 444 445 freesp = percpu_counter_read_positive(&mp->m_fdblocks); 446 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 447 shift = 2; 448 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 449 shift++; 450 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 451 shift++; 452 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 453 shift++; 454 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 455 shift++; 456 } 457 458 /* 459 * Check each quota to cap the prealloc size, provide a shift value to 460 * throttle with and adjust amount of available space. 461 */ 462 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 463 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, 464 &freesp); 465 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 466 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, 467 &freesp); 468 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 469 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, 470 &freesp); 471 472 /* 473 * The final prealloc size is set to the minimum of free space available 474 * in each of the quotas and the overall filesystem. 475 * 476 * The shift throttle value is set to the maximum value as determined by 477 * the global low free space values and per-quota low free space values. 478 */ 479 alloc_blocks = min(alloc_blocks, qblocks); 480 shift = max(shift, qshift); 481 482 if (shift) 483 alloc_blocks >>= shift; 484 /* 485 * rounddown_pow_of_two() returns an undefined result if we pass in 486 * alloc_blocks = 0. 487 */ 488 if (alloc_blocks) 489 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 490 if (alloc_blocks > MAXEXTLEN) 491 alloc_blocks = MAXEXTLEN; 492 493 /* 494 * If we are still trying to allocate more space than is 495 * available, squash the prealloc hard. This can happen if we 496 * have a large file on a small filesystem and the above 497 * lowspace thresholds are smaller than MAXEXTLEN. 498 */ 499 while (alloc_blocks && alloc_blocks >= freesp) 500 alloc_blocks >>= 4; 501 check_writeio: 502 if (alloc_blocks < mp->m_writeio_blocks) 503 alloc_blocks = mp->m_writeio_blocks; 504 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 505 mp->m_writeio_blocks); 506 return alloc_blocks; 507 } 508 509 static int 510 xfs_file_iomap_begin_delay( 511 struct inode *inode, 512 loff_t offset, 513 loff_t count, 514 struct iomap *iomap) 515 { 516 struct xfs_inode *ip = XFS_I(inode); 517 struct xfs_mount *mp = ip->i_mount; 518 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 519 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 520 xfs_fileoff_t maxbytes_fsb = 521 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 522 xfs_fileoff_t end_fsb; 523 int error = 0, eof = 0; 524 struct xfs_bmbt_irec got; 525 struct xfs_iext_cursor icur; 526 xfs_fsblock_t prealloc_blocks = 0; 527 528 ASSERT(!XFS_IS_REALTIME_INODE(ip)); 529 ASSERT(!xfs_get_extsz_hint(ip)); 530 531 xfs_ilock(ip, XFS_ILOCK_EXCL); 532 533 if (unlikely(XFS_TEST_ERROR( 534 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 535 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 536 mp, XFS_ERRTAG_BMAPIFORMAT))) { 537 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 538 error = -EFSCORRUPTED; 539 goto out_unlock; 540 } 541 542 XFS_STATS_INC(mp, xs_blk_mapw); 543 544 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 545 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 546 if (error) 547 goto out_unlock; 548 } 549 550 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got); 551 if (!eof && got.br_startoff <= offset_fsb) { 552 if (xfs_is_reflink_inode(ip)) { 553 bool shared; 554 555 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), 556 maxbytes_fsb); 557 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb); 558 error = xfs_reflink_reserve_cow(ip, &got, &shared); 559 if (error) 560 goto out_unlock; 561 } 562 563 trace_xfs_iomap_found(ip, offset, count, 0, &got); 564 goto done; 565 } 566 567 error = xfs_qm_dqattach_locked(ip, false); 568 if (error) 569 goto out_unlock; 570 571 /* 572 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages 573 * to keep the chunks of work done where somewhat symmetric with the 574 * work writeback does. This is a completely arbitrary number pulled 575 * out of thin air as a best guess for initial testing. 576 * 577 * Note that the values needs to be less than 32-bits wide until 578 * the lower level functions are updated. 579 */ 580 count = min_t(loff_t, count, 1024 * PAGE_SIZE); 581 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); 582 583 if (eof) { 584 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, 585 &icur); 586 if (prealloc_blocks) { 587 xfs_extlen_t align; 588 xfs_off_t end_offset; 589 xfs_fileoff_t p_end_fsb; 590 591 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1); 592 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + 593 prealloc_blocks; 594 595 align = xfs_eof_alignment(ip, 0); 596 if (align) 597 p_end_fsb = roundup_64(p_end_fsb, align); 598 599 p_end_fsb = min(p_end_fsb, maxbytes_fsb); 600 ASSERT(p_end_fsb > offset_fsb); 601 prealloc_blocks = p_end_fsb - end_fsb; 602 } 603 } 604 605 retry: 606 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb, 607 end_fsb - offset_fsb, prealloc_blocks, &got, &icur, 608 eof); 609 switch (error) { 610 case 0: 611 break; 612 case -ENOSPC: 613 case -EDQUOT: 614 /* retry without any preallocation */ 615 trace_xfs_delalloc_enospc(ip, offset, count); 616 if (prealloc_blocks) { 617 prealloc_blocks = 0; 618 goto retry; 619 } 620 /*FALLTHRU*/ 621 default: 622 goto out_unlock; 623 } 624 625 /* 626 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch 627 * them out if the write happens to fail. 628 */ 629 iomap->flags = IOMAP_F_NEW; 630 trace_xfs_iomap_alloc(ip, offset, count, 0, &got); 631 done: 632 if (isnullstartblock(got.br_startblock)) 633 got.br_startblock = DELAYSTARTBLOCK; 634 635 if (!got.br_startblock) { 636 error = xfs_alert_fsblock_zero(ip, &got); 637 if (error) 638 goto out_unlock; 639 } 640 641 xfs_bmbt_to_iomap(ip, iomap, &got); 642 643 out_unlock: 644 xfs_iunlock(ip, XFS_ILOCK_EXCL); 645 return error; 646 } 647 648 /* 649 * Pass in a delayed allocate extent, convert it to real extents; 650 * return to the caller the extent we create which maps on top of 651 * the originating callers request. 652 * 653 * Called without a lock on the inode. 654 * 655 * We no longer bother to look at the incoming map - all we have to 656 * guarantee is that whatever we allocate fills the required range. 657 */ 658 int 659 xfs_iomap_write_allocate( 660 xfs_inode_t *ip, 661 int whichfork, 662 xfs_off_t offset, 663 xfs_bmbt_irec_t *imap) 664 { 665 xfs_mount_t *mp = ip->i_mount; 666 xfs_fileoff_t offset_fsb, last_block; 667 xfs_fileoff_t end_fsb, map_start_fsb; 668 xfs_fsblock_t first_block; 669 struct xfs_defer_ops dfops; 670 xfs_filblks_t count_fsb; 671 xfs_trans_t *tp; 672 int nimaps; 673 int error = 0; 674 int flags = XFS_BMAPI_DELALLOC; 675 int nres; 676 677 if (whichfork == XFS_COW_FORK) 678 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 679 680 /* 681 * Make sure that the dquots are there. 682 */ 683 error = xfs_qm_dqattach(ip); 684 if (error) 685 return error; 686 687 offset_fsb = XFS_B_TO_FSBT(mp, offset); 688 count_fsb = imap->br_blockcount; 689 map_start_fsb = imap->br_startoff; 690 691 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 692 693 while (count_fsb != 0) { 694 /* 695 * Set up a transaction with which to allocate the 696 * backing store for the file. Do allocations in a 697 * loop until we get some space in the range we are 698 * interested in. The other space that might be allocated 699 * is in the delayed allocation extent on which we sit 700 * but before our buffer starts. 701 */ 702 nimaps = 0; 703 while (nimaps == 0) { 704 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 705 /* 706 * We have already reserved space for the extent and any 707 * indirect blocks when creating the delalloc extent, 708 * there is no need to reserve space in this transaction 709 * again. 710 */ 711 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 712 0, XFS_TRANS_RESERVE, &tp); 713 if (error) 714 return error; 715 716 xfs_ilock(ip, XFS_ILOCK_EXCL); 717 xfs_trans_ijoin(tp, ip, 0); 718 719 xfs_defer_init(&dfops, &first_block); 720 721 /* 722 * it is possible that the extents have changed since 723 * we did the read call as we dropped the ilock for a 724 * while. We have to be careful about truncates or hole 725 * punchs here - we are not allowed to allocate 726 * non-delalloc blocks here. 727 * 728 * The only protection against truncation is the pages 729 * for the range we are being asked to convert are 730 * locked and hence a truncate will block on them 731 * first. 732 * 733 * As a result, if we go beyond the range we really 734 * need and hit an delalloc extent boundary followed by 735 * a hole while we have excess blocks in the map, we 736 * will fill the hole incorrectly and overrun the 737 * transaction reservation. 738 * 739 * Using a single map prevents this as we are forced to 740 * check each map we look for overlap with the desired 741 * range and abort as soon as we find it. Also, given 742 * that we only return a single map, having one beyond 743 * what we can return is probably a bit silly. 744 * 745 * We also need to check that we don't go beyond EOF; 746 * this is a truncate optimisation as a truncate sets 747 * the new file size before block on the pages we 748 * currently have locked under writeback. Because they 749 * are about to be tossed, we don't need to write them 750 * back.... 751 */ 752 nimaps = 1; 753 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 754 error = xfs_bmap_last_offset(ip, &last_block, 755 XFS_DATA_FORK); 756 if (error) 757 goto trans_cancel; 758 759 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 760 if ((map_start_fsb + count_fsb) > last_block) { 761 count_fsb = last_block - map_start_fsb; 762 if (count_fsb == 0) { 763 error = -EAGAIN; 764 goto trans_cancel; 765 } 766 } 767 768 /* 769 * From this point onwards we overwrite the imap 770 * pointer that the caller gave to us. 771 */ 772 error = xfs_bmapi_write(tp, ip, map_start_fsb, 773 count_fsb, flags, &first_block, 774 nres, imap, &nimaps, 775 &dfops); 776 if (error) 777 goto trans_cancel; 778 779 error = xfs_defer_finish(&tp, &dfops); 780 if (error) 781 goto trans_cancel; 782 783 error = xfs_trans_commit(tp); 784 if (error) 785 goto error0; 786 787 xfs_iunlock(ip, XFS_ILOCK_EXCL); 788 } 789 790 /* 791 * See if we were able to allocate an extent that 792 * covers at least part of the callers request 793 */ 794 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 795 return xfs_alert_fsblock_zero(ip, imap); 796 797 if ((offset_fsb >= imap->br_startoff) && 798 (offset_fsb < (imap->br_startoff + 799 imap->br_blockcount))) { 800 XFS_STATS_INC(mp, xs_xstrat_quick); 801 return 0; 802 } 803 804 /* 805 * So far we have not mapped the requested part of the 806 * file, just surrounding data, try again. 807 */ 808 count_fsb -= imap->br_blockcount; 809 map_start_fsb = imap->br_startoff + imap->br_blockcount; 810 } 811 812 trans_cancel: 813 xfs_defer_cancel(&dfops); 814 xfs_trans_cancel(tp); 815 error0: 816 xfs_iunlock(ip, XFS_ILOCK_EXCL); 817 return error; 818 } 819 820 int 821 xfs_iomap_write_unwritten( 822 xfs_inode_t *ip, 823 xfs_off_t offset, 824 xfs_off_t count, 825 bool update_isize) 826 { 827 xfs_mount_t *mp = ip->i_mount; 828 xfs_fileoff_t offset_fsb; 829 xfs_filblks_t count_fsb; 830 xfs_filblks_t numblks_fsb; 831 xfs_fsblock_t firstfsb; 832 int nimaps; 833 xfs_trans_t *tp; 834 xfs_bmbt_irec_t imap; 835 struct xfs_defer_ops dfops; 836 struct inode *inode = VFS_I(ip); 837 xfs_fsize_t i_size; 838 uint resblks; 839 int error; 840 841 trace_xfs_unwritten_convert(ip, offset, count); 842 843 offset_fsb = XFS_B_TO_FSBT(mp, offset); 844 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 845 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 846 847 /* 848 * Reserve enough blocks in this transaction for two complete extent 849 * btree splits. We may be converting the middle part of an unwritten 850 * extent and in this case we will insert two new extents in the btree 851 * each of which could cause a full split. 852 * 853 * This reservation amount will be used in the first call to 854 * xfs_bmbt_split() to select an AG with enough space to satisfy the 855 * rest of the operation. 856 */ 857 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 858 859 do { 860 /* 861 * Set up a transaction to convert the range of extents 862 * from unwritten to real. Do allocations in a loop until 863 * we have covered the range passed in. 864 * 865 * Note that we can't risk to recursing back into the filesystem 866 * here as we might be asked to write out the same inode that we 867 * complete here and might deadlock on the iolock. 868 */ 869 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 870 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp); 871 if (error) 872 return error; 873 874 xfs_ilock(ip, XFS_ILOCK_EXCL); 875 xfs_trans_ijoin(tp, ip, 0); 876 877 /* 878 * Modify the unwritten extent state of the buffer. 879 */ 880 xfs_defer_init(&dfops, &firstfsb); 881 nimaps = 1; 882 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 883 XFS_BMAPI_CONVERT, &firstfsb, resblks, 884 &imap, &nimaps, &dfops); 885 if (error) 886 goto error_on_bmapi_transaction; 887 888 /* 889 * Log the updated inode size as we go. We have to be careful 890 * to only log it up to the actual write offset if it is 891 * halfway into a block. 892 */ 893 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 894 if (i_size > offset + count) 895 i_size = offset + count; 896 if (update_isize && i_size > i_size_read(inode)) 897 i_size_write(inode, i_size); 898 i_size = xfs_new_eof(ip, i_size); 899 if (i_size) { 900 ip->i_d.di_size = i_size; 901 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 902 } 903 904 error = xfs_defer_finish(&tp, &dfops); 905 if (error) 906 goto error_on_bmapi_transaction; 907 908 error = xfs_trans_commit(tp); 909 xfs_iunlock(ip, XFS_ILOCK_EXCL); 910 if (error) 911 return error; 912 913 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 914 return xfs_alert_fsblock_zero(ip, &imap); 915 916 if ((numblks_fsb = imap.br_blockcount) == 0) { 917 /* 918 * The numblks_fsb value should always get 919 * smaller, otherwise the loop is stuck. 920 */ 921 ASSERT(imap.br_blockcount); 922 break; 923 } 924 offset_fsb += numblks_fsb; 925 count_fsb -= numblks_fsb; 926 } while (count_fsb > 0); 927 928 return 0; 929 930 error_on_bmapi_transaction: 931 xfs_defer_cancel(&dfops); 932 xfs_trans_cancel(tp); 933 xfs_iunlock(ip, XFS_ILOCK_EXCL); 934 return error; 935 } 936 937 static inline bool 938 imap_needs_alloc( 939 struct inode *inode, 940 struct xfs_bmbt_irec *imap, 941 int nimaps) 942 { 943 return !nimaps || 944 imap->br_startblock == HOLESTARTBLOCK || 945 imap->br_startblock == DELAYSTARTBLOCK || 946 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); 947 } 948 949 static inline bool 950 needs_cow_for_zeroing( 951 struct xfs_bmbt_irec *imap, 952 int nimaps) 953 { 954 return nimaps && 955 imap->br_startblock != HOLESTARTBLOCK && 956 imap->br_state != XFS_EXT_UNWRITTEN; 957 } 958 959 static int 960 xfs_ilock_for_iomap( 961 struct xfs_inode *ip, 962 unsigned flags, 963 unsigned *lockmode) 964 { 965 unsigned mode = XFS_ILOCK_SHARED; 966 967 /* 968 * COW writes may allocate delalloc space or convert unwritten COW 969 * extents, so we need to make sure to take the lock exclusively here. 970 */ 971 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) { 972 /* 973 * FIXME: It could still overwrite on unshared extents and not 974 * need allocation. 975 */ 976 if (flags & IOMAP_NOWAIT) 977 return -EAGAIN; 978 mode = XFS_ILOCK_EXCL; 979 } 980 981 /* 982 * Extents not yet cached requires exclusive access, don't block. This 983 * is an opencoded xfs_ilock_data_map_shared() call but with 984 * non-blocking behaviour. 985 */ 986 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) { 987 if (flags & IOMAP_NOWAIT) 988 return -EAGAIN; 989 mode = XFS_ILOCK_EXCL; 990 } 991 992 if (flags & IOMAP_NOWAIT) { 993 if (!xfs_ilock_nowait(ip, mode)) 994 return -EAGAIN; 995 } else { 996 xfs_ilock(ip, mode); 997 } 998 999 *lockmode = mode; 1000 return 0; 1001 } 1002 1003 static int 1004 xfs_file_iomap_begin( 1005 struct inode *inode, 1006 loff_t offset, 1007 loff_t length, 1008 unsigned flags, 1009 struct iomap *iomap) 1010 { 1011 struct xfs_inode *ip = XFS_I(inode); 1012 struct xfs_mount *mp = ip->i_mount; 1013 struct xfs_bmbt_irec imap; 1014 xfs_fileoff_t offset_fsb, end_fsb; 1015 int nimaps = 1, error = 0; 1016 bool shared = false, trimmed = false; 1017 unsigned lockmode; 1018 1019 if (XFS_FORCED_SHUTDOWN(mp)) 1020 return -EIO; 1021 1022 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) && 1023 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { 1024 /* Reserve delalloc blocks for regular writeback. */ 1025 return xfs_file_iomap_begin_delay(inode, offset, length, iomap); 1026 } 1027 1028 /* 1029 * Lock the inode in the manner required for the specified operation and 1030 * check for as many conditions that would result in blocking as 1031 * possible. This removes most of the non-blocking checks from the 1032 * mapping code below. 1033 */ 1034 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 1035 if (error) 1036 return error; 1037 1038 ASSERT(offset <= mp->m_super->s_maxbytes); 1039 if (offset > mp->m_super->s_maxbytes - length) 1040 length = mp->m_super->s_maxbytes - offset; 1041 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1042 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1043 1044 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1045 &nimaps, 0); 1046 if (error) 1047 goto out_unlock; 1048 1049 if (flags & IOMAP_REPORT) { 1050 /* Trim the mapping to the nearest shared extent boundary. */ 1051 error = xfs_reflink_trim_around_shared(ip, &imap, &shared, 1052 &trimmed); 1053 if (error) 1054 goto out_unlock; 1055 } 1056 1057 /* Non-modifying mapping requested, so we are done */ 1058 if (!(flags & (IOMAP_WRITE | IOMAP_ZERO))) 1059 goto out_found; 1060 1061 /* 1062 * Break shared extents if necessary. Checks for non-blocking IO have 1063 * been done up front, so we don't need to do them here. 1064 */ 1065 if (xfs_is_reflink_inode(ip)) { 1066 /* if zeroing doesn't need COW allocation, then we are done. */ 1067 if ((flags & IOMAP_ZERO) && 1068 !needs_cow_for_zeroing(&imap, nimaps)) 1069 goto out_found; 1070 1071 if (flags & IOMAP_DIRECT) { 1072 /* may drop and re-acquire the ilock */ 1073 error = xfs_reflink_allocate_cow(ip, &imap, &shared, 1074 &lockmode); 1075 if (error) 1076 goto out_unlock; 1077 } else { 1078 error = xfs_reflink_reserve_cow(ip, &imap, &shared); 1079 if (error) 1080 goto out_unlock; 1081 } 1082 1083 end_fsb = imap.br_startoff + imap.br_blockcount; 1084 length = XFS_FSB_TO_B(mp, end_fsb) - offset; 1085 } 1086 1087 /* Don't need to allocate over holes when doing zeroing operations. */ 1088 if (flags & IOMAP_ZERO) 1089 goto out_found; 1090 1091 if (!imap_needs_alloc(inode, &imap, nimaps)) 1092 goto out_found; 1093 1094 /* If nowait is set bail since we are going to make allocations. */ 1095 if (flags & IOMAP_NOWAIT) { 1096 error = -EAGAIN; 1097 goto out_unlock; 1098 } 1099 1100 /* 1101 * We cap the maximum length we map to a sane size to keep the chunks 1102 * of work done where somewhat symmetric with the work writeback does. 1103 * This is a completely arbitrary number pulled out of thin air as a 1104 * best guess for initial testing. 1105 * 1106 * Note that the values needs to be less than 32-bits wide until the 1107 * lower level functions are updated. 1108 */ 1109 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 1110 1111 /* 1112 * xfs_iomap_write_direct() expects the shared lock. It is unlocked on 1113 * return. 1114 */ 1115 if (lockmode == XFS_ILOCK_EXCL) 1116 xfs_ilock_demote(ip, lockmode); 1117 error = xfs_iomap_write_direct(ip, offset, length, &imap, 1118 nimaps); 1119 if (error) 1120 return error; 1121 1122 iomap->flags = IOMAP_F_NEW; 1123 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); 1124 1125 out_finish: 1126 if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields 1127 & ~XFS_ILOG_TIMESTAMP)) 1128 iomap->flags |= IOMAP_F_DIRTY; 1129 1130 xfs_bmbt_to_iomap(ip, iomap, &imap); 1131 1132 if (shared) 1133 iomap->flags |= IOMAP_F_SHARED; 1134 return 0; 1135 1136 out_found: 1137 ASSERT(nimaps); 1138 xfs_iunlock(ip, lockmode); 1139 trace_xfs_iomap_found(ip, offset, length, 0, &imap); 1140 goto out_finish; 1141 1142 out_unlock: 1143 xfs_iunlock(ip, lockmode); 1144 return error; 1145 } 1146 1147 static int 1148 xfs_file_iomap_end_delalloc( 1149 struct xfs_inode *ip, 1150 loff_t offset, 1151 loff_t length, 1152 ssize_t written, 1153 struct iomap *iomap) 1154 { 1155 struct xfs_mount *mp = ip->i_mount; 1156 xfs_fileoff_t start_fsb; 1157 xfs_fileoff_t end_fsb; 1158 int error = 0; 1159 1160 /* 1161 * Behave as if the write failed if drop writes is enabled. Set the NEW 1162 * flag to force delalloc cleanup. 1163 */ 1164 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) { 1165 iomap->flags |= IOMAP_F_NEW; 1166 written = 0; 1167 } 1168 1169 /* 1170 * start_fsb refers to the first unused block after a short write. If 1171 * nothing was written, round offset down to point at the first block in 1172 * the range. 1173 */ 1174 if (unlikely(!written)) 1175 start_fsb = XFS_B_TO_FSBT(mp, offset); 1176 else 1177 start_fsb = XFS_B_TO_FSB(mp, offset + written); 1178 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1179 1180 /* 1181 * Trim delalloc blocks if they were allocated by this write and we 1182 * didn't manage to write the whole range. 1183 * 1184 * We don't need to care about racing delalloc as we hold i_mutex 1185 * across the reserve/allocate/unreserve calls. If there are delalloc 1186 * blocks in the range, they are ours. 1187 */ 1188 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { 1189 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), 1190 XFS_FSB_TO_B(mp, end_fsb) - 1); 1191 1192 xfs_ilock(ip, XFS_ILOCK_EXCL); 1193 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1194 end_fsb - start_fsb); 1195 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1196 1197 if (error && !XFS_FORCED_SHUTDOWN(mp)) { 1198 xfs_alert(mp, "%s: unable to clean up ino %lld", 1199 __func__, ip->i_ino); 1200 return error; 1201 } 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int 1208 xfs_file_iomap_end( 1209 struct inode *inode, 1210 loff_t offset, 1211 loff_t length, 1212 ssize_t written, 1213 unsigned flags, 1214 struct iomap *iomap) 1215 { 1216 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1217 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1218 length, written, iomap); 1219 return 0; 1220 } 1221 1222 const struct iomap_ops xfs_iomap_ops = { 1223 .iomap_begin = xfs_file_iomap_begin, 1224 .iomap_end = xfs_file_iomap_end, 1225 }; 1226 1227 static int 1228 xfs_xattr_iomap_begin( 1229 struct inode *inode, 1230 loff_t offset, 1231 loff_t length, 1232 unsigned flags, 1233 struct iomap *iomap) 1234 { 1235 struct xfs_inode *ip = XFS_I(inode); 1236 struct xfs_mount *mp = ip->i_mount; 1237 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1238 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1239 struct xfs_bmbt_irec imap; 1240 int nimaps = 1, error = 0; 1241 unsigned lockmode; 1242 1243 if (XFS_FORCED_SHUTDOWN(mp)) 1244 return -EIO; 1245 1246 lockmode = xfs_ilock_attr_map_shared(ip); 1247 1248 /* if there are no attribute fork or extents, return ENOENT */ 1249 if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { 1250 error = -ENOENT; 1251 goto out_unlock; 1252 } 1253 1254 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); 1255 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1256 &nimaps, XFS_BMAPI_ATTRFORK); 1257 out_unlock: 1258 xfs_iunlock(ip, lockmode); 1259 1260 if (!error) { 1261 ASSERT(nimaps); 1262 xfs_bmbt_to_iomap(ip, iomap, &imap); 1263 } 1264 1265 return error; 1266 } 1267 1268 const struct iomap_ops xfs_xattr_iomap_ops = { 1269 .iomap_begin = xfs_xattr_iomap_begin, 1270 }; 1271