1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * Copyright (c) 2016 Christoph Hellwig. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/iomap.h> 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_shared.h" 23 #include "xfs_format.h" 24 #include "xfs_log_format.h" 25 #include "xfs_trans_resv.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_inode.h" 29 #include "xfs_btree.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_bmap.h" 32 #include "xfs_bmap_util.h" 33 #include "xfs_errortag.h" 34 #include "xfs_error.h" 35 #include "xfs_trans.h" 36 #include "xfs_trans_space.h" 37 #include "xfs_iomap.h" 38 #include "xfs_trace.h" 39 #include "xfs_icache.h" 40 #include "xfs_quota.h" 41 #include "xfs_dquot_item.h" 42 #include "xfs_dquot.h" 43 #include "xfs_reflink.h" 44 45 46 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 47 << mp->m_writeio_log) 48 49 void 50 xfs_bmbt_to_iomap( 51 struct xfs_inode *ip, 52 struct iomap *iomap, 53 struct xfs_bmbt_irec *imap) 54 { 55 struct xfs_mount *mp = ip->i_mount; 56 57 if (imap->br_startblock == HOLESTARTBLOCK) { 58 iomap->addr = IOMAP_NULL_ADDR; 59 iomap->type = IOMAP_HOLE; 60 } else if (imap->br_startblock == DELAYSTARTBLOCK) { 61 iomap->addr = IOMAP_NULL_ADDR; 62 iomap->type = IOMAP_DELALLOC; 63 } else { 64 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock)); 65 if (imap->br_state == XFS_EXT_UNWRITTEN) 66 iomap->type = IOMAP_UNWRITTEN; 67 else 68 iomap->type = IOMAP_MAPPED; 69 } 70 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); 71 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); 72 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); 73 iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); 74 } 75 76 xfs_extlen_t 77 xfs_eof_alignment( 78 struct xfs_inode *ip, 79 xfs_extlen_t extsize) 80 { 81 struct xfs_mount *mp = ip->i_mount; 82 xfs_extlen_t align = 0; 83 84 if (!XFS_IS_REALTIME_INODE(ip)) { 85 /* 86 * Round up the allocation request to a stripe unit 87 * (m_dalign) boundary if the file size is >= stripe unit 88 * size, and we are allocating past the allocation eof. 89 * 90 * If mounted with the "-o swalloc" option the alignment is 91 * increased from the strip unit size to the stripe width. 92 */ 93 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 94 align = mp->m_swidth; 95 else if (mp->m_dalign) 96 align = mp->m_dalign; 97 98 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 99 align = 0; 100 } 101 102 /* 103 * Always round up the allocation request to an extent boundary 104 * (when file on a real-time subvolume or has di_extsize hint). 105 */ 106 if (extsize) { 107 if (align) 108 align = roundup_64(align, extsize); 109 else 110 align = extsize; 111 } 112 113 return align; 114 } 115 116 STATIC int 117 xfs_iomap_eof_align_last_fsb( 118 struct xfs_inode *ip, 119 xfs_extlen_t extsize, 120 xfs_fileoff_t *last_fsb) 121 { 122 xfs_extlen_t align = xfs_eof_alignment(ip, extsize); 123 124 if (align) { 125 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); 126 int eof, error; 127 128 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 129 if (error) 130 return error; 131 if (eof) 132 *last_fsb = new_last_fsb; 133 } 134 return 0; 135 } 136 137 STATIC int 138 xfs_alert_fsblock_zero( 139 xfs_inode_t *ip, 140 xfs_bmbt_irec_t *imap) 141 { 142 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 143 "Access to block zero in inode %llu " 144 "start_block: %llx start_off: %llx " 145 "blkcnt: %llx extent-state: %x", 146 (unsigned long long)ip->i_ino, 147 (unsigned long long)imap->br_startblock, 148 (unsigned long long)imap->br_startoff, 149 (unsigned long long)imap->br_blockcount, 150 imap->br_state); 151 return -EFSCORRUPTED; 152 } 153 154 int 155 xfs_iomap_write_direct( 156 xfs_inode_t *ip, 157 xfs_off_t offset, 158 size_t count, 159 xfs_bmbt_irec_t *imap, 160 int nmaps) 161 { 162 xfs_mount_t *mp = ip->i_mount; 163 xfs_fileoff_t offset_fsb; 164 xfs_fileoff_t last_fsb; 165 xfs_filblks_t count_fsb, resaligned; 166 xfs_fsblock_t firstfsb; 167 xfs_extlen_t extsz; 168 int nimaps; 169 int quota_flag; 170 int rt; 171 xfs_trans_t *tp; 172 struct xfs_defer_ops dfops; 173 uint qblocks, resblks, resrtextents; 174 int error; 175 int lockmode; 176 int bmapi_flags = XFS_BMAPI_PREALLOC; 177 uint tflags = 0; 178 179 rt = XFS_IS_REALTIME_INODE(ip); 180 extsz = xfs_get_extsz_hint(ip); 181 lockmode = XFS_ILOCK_SHARED; /* locked by caller */ 182 183 ASSERT(xfs_isilocked(ip, lockmode)); 184 185 offset_fsb = XFS_B_TO_FSBT(mp, offset); 186 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 187 if ((offset + count) > XFS_ISIZE(ip)) { 188 /* 189 * Assert that the in-core extent list is present since this can 190 * call xfs_iread_extents() and we only have the ilock shared. 191 * This should be safe because the lock was held around a bmapi 192 * call in the caller and we only need it to access the in-core 193 * list. 194 */ 195 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & 196 XFS_IFEXTENTS); 197 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb); 198 if (error) 199 goto out_unlock; 200 } else { 201 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 202 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 203 imap->br_blockcount + 204 imap->br_startoff); 205 } 206 count_fsb = last_fsb - offset_fsb; 207 ASSERT(count_fsb > 0); 208 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz); 209 210 if (unlikely(rt)) { 211 resrtextents = qblocks = resaligned; 212 resrtextents /= mp->m_sb.sb_rextsize; 213 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 214 quota_flag = XFS_QMOPT_RES_RTBLKS; 215 } else { 216 resrtextents = 0; 217 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 218 quota_flag = XFS_QMOPT_RES_REGBLKS; 219 } 220 221 /* 222 * Drop the shared lock acquired by the caller, attach the dquot if 223 * necessary and move on to transaction setup. 224 */ 225 xfs_iunlock(ip, lockmode); 226 error = xfs_qm_dqattach(ip, 0); 227 if (error) 228 return error; 229 230 /* 231 * For DAX, we do not allocate unwritten extents, but instead we zero 232 * the block before we commit the transaction. Ideally we'd like to do 233 * this outside the transaction context, but if we commit and then crash 234 * we may not have zeroed the blocks and this will be exposed on 235 * recovery of the allocation. Hence we must zero before commit. 236 * 237 * Further, if we are mapping unwritten extents here, we need to zero 238 * and convert them to written so that we don't need an unwritten extent 239 * callback for DAX. This also means that we need to be able to dip into 240 * the reserve block pool for bmbt block allocation if there is no space 241 * left but we need to do unwritten extent conversion. 242 */ 243 if (IS_DAX(VFS_I(ip))) { 244 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; 245 if (imap->br_state == XFS_EXT_UNWRITTEN) { 246 tflags |= XFS_TRANS_RESERVE; 247 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 248 } 249 } 250 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, 251 tflags, &tp); 252 if (error) 253 return error; 254 255 lockmode = XFS_ILOCK_EXCL; 256 xfs_ilock(ip, lockmode); 257 258 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 259 if (error) 260 goto out_trans_cancel; 261 262 xfs_trans_ijoin(tp, ip, 0); 263 264 /* 265 * From this point onwards we overwrite the imap pointer that the 266 * caller gave to us. 267 */ 268 xfs_defer_init(&dfops, &firstfsb); 269 nimaps = 1; 270 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 271 bmapi_flags, &firstfsb, resblks, imap, 272 &nimaps, &dfops); 273 if (error) 274 goto out_bmap_cancel; 275 276 /* 277 * Complete the transaction 278 */ 279 error = xfs_defer_finish(&tp, &dfops); 280 if (error) 281 goto out_bmap_cancel; 282 283 error = xfs_trans_commit(tp); 284 if (error) 285 goto out_unlock; 286 287 /* 288 * Copy any maps to caller's array and return any error. 289 */ 290 if (nimaps == 0) { 291 error = -ENOSPC; 292 goto out_unlock; 293 } 294 295 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 296 error = xfs_alert_fsblock_zero(ip, imap); 297 298 out_unlock: 299 xfs_iunlock(ip, lockmode); 300 return error; 301 302 out_bmap_cancel: 303 xfs_defer_cancel(&dfops); 304 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 305 out_trans_cancel: 306 xfs_trans_cancel(tp); 307 goto out_unlock; 308 } 309 310 STATIC bool 311 xfs_quota_need_throttle( 312 struct xfs_inode *ip, 313 int type, 314 xfs_fsblock_t alloc_blocks) 315 { 316 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 317 318 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 319 return false; 320 321 /* no hi watermark, no throttle */ 322 if (!dq->q_prealloc_hi_wmark) 323 return false; 324 325 /* under the lo watermark, no throttle */ 326 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 327 return false; 328 329 return true; 330 } 331 332 STATIC void 333 xfs_quota_calc_throttle( 334 struct xfs_inode *ip, 335 int type, 336 xfs_fsblock_t *qblocks, 337 int *qshift, 338 int64_t *qfreesp) 339 { 340 int64_t freesp; 341 int shift = 0; 342 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 343 344 /* no dq, or over hi wmark, squash the prealloc completely */ 345 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 346 *qblocks = 0; 347 *qfreesp = 0; 348 return; 349 } 350 351 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 352 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 353 shift = 2; 354 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 355 shift += 2; 356 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 357 shift += 2; 358 } 359 360 if (freesp < *qfreesp) 361 *qfreesp = freesp; 362 363 /* only overwrite the throttle values if we are more aggressive */ 364 if ((freesp >> shift) < (*qblocks >> *qshift)) { 365 *qblocks = freesp; 366 *qshift = shift; 367 } 368 } 369 370 /* 371 * If we are doing a write at the end of the file and there are no allocations 372 * past this one, then extend the allocation out to the file system's write 373 * iosize. 374 * 375 * If we don't have a user specified preallocation size, dynamically increase 376 * the preallocation size as the size of the file grows. Cap the maximum size 377 * at a single extent or less if the filesystem is near full. The closer the 378 * filesystem is to full, the smaller the maximum prealocation. 379 * 380 * As an exception we don't do any preallocation at all if the file is smaller 381 * than the minimum preallocation and we are using the default dynamic 382 * preallocation scheme, as it is likely this is the only write to the file that 383 * is going to be done. 384 * 385 * We clean up any extra space left over when the file is closed in 386 * xfs_inactive(). 387 */ 388 STATIC xfs_fsblock_t 389 xfs_iomap_prealloc_size( 390 struct xfs_inode *ip, 391 loff_t offset, 392 loff_t count, 393 struct xfs_iext_cursor *icur) 394 { 395 struct xfs_mount *mp = ip->i_mount; 396 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 397 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 398 struct xfs_bmbt_irec prev; 399 int shift = 0; 400 int64_t freesp; 401 xfs_fsblock_t qblocks; 402 int qshift = 0; 403 xfs_fsblock_t alloc_blocks = 0; 404 405 if (offset + count <= XFS_ISIZE(ip)) 406 return 0; 407 408 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 409 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))) 410 return 0; 411 412 /* 413 * If an explicit allocsize is set, the file is small, or we 414 * are writing behind a hole, then use the minimum prealloc: 415 */ 416 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) || 417 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || 418 !xfs_iext_peek_prev_extent(ifp, icur, &prev) || 419 prev.br_startoff + prev.br_blockcount < offset_fsb) 420 return mp->m_writeio_blocks; 421 422 /* 423 * Determine the initial size of the preallocation. We are beyond the 424 * current EOF here, but we need to take into account whether this is 425 * a sparse write or an extending write when determining the 426 * preallocation size. Hence we need to look up the extent that ends 427 * at the current write offset and use the result to determine the 428 * preallocation size. 429 * 430 * If the extent is a hole, then preallocation is essentially disabled. 431 * Otherwise we take the size of the preceding data extent as the basis 432 * for the preallocation size. If the size of the extent is greater than 433 * half the maximum extent length, then use the current offset as the 434 * basis. This ensures that for large files the preallocation size 435 * always extends to MAXEXTLEN rather than falling short due to things 436 * like stripe unit/width alignment of real extents. 437 */ 438 if (prev.br_blockcount <= (MAXEXTLEN >> 1)) 439 alloc_blocks = prev.br_blockcount << 1; 440 else 441 alloc_blocks = XFS_B_TO_FSB(mp, offset); 442 if (!alloc_blocks) 443 goto check_writeio; 444 qblocks = alloc_blocks; 445 446 /* 447 * MAXEXTLEN is not a power of two value but we round the prealloc down 448 * to the nearest power of two value after throttling. To prevent the 449 * round down from unconditionally reducing the maximum supported prealloc 450 * size, we round up first, apply appropriate throttling, round down and 451 * cap the value to MAXEXTLEN. 452 */ 453 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 454 alloc_blocks); 455 456 freesp = percpu_counter_read_positive(&mp->m_fdblocks); 457 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 458 shift = 2; 459 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 460 shift++; 461 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 462 shift++; 463 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 464 shift++; 465 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 466 shift++; 467 } 468 469 /* 470 * Check each quota to cap the prealloc size, provide a shift value to 471 * throttle with and adjust amount of available space. 472 */ 473 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 474 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, 475 &freesp); 476 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 477 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, 478 &freesp); 479 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 480 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, 481 &freesp); 482 483 /* 484 * The final prealloc size is set to the minimum of free space available 485 * in each of the quotas and the overall filesystem. 486 * 487 * The shift throttle value is set to the maximum value as determined by 488 * the global low free space values and per-quota low free space values. 489 */ 490 alloc_blocks = MIN(alloc_blocks, qblocks); 491 shift = MAX(shift, qshift); 492 493 if (shift) 494 alloc_blocks >>= shift; 495 /* 496 * rounddown_pow_of_two() returns an undefined result if we pass in 497 * alloc_blocks = 0. 498 */ 499 if (alloc_blocks) 500 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 501 if (alloc_blocks > MAXEXTLEN) 502 alloc_blocks = MAXEXTLEN; 503 504 /* 505 * If we are still trying to allocate more space than is 506 * available, squash the prealloc hard. This can happen if we 507 * have a large file on a small filesystem and the above 508 * lowspace thresholds are smaller than MAXEXTLEN. 509 */ 510 while (alloc_blocks && alloc_blocks >= freesp) 511 alloc_blocks >>= 4; 512 check_writeio: 513 if (alloc_blocks < mp->m_writeio_blocks) 514 alloc_blocks = mp->m_writeio_blocks; 515 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 516 mp->m_writeio_blocks); 517 return alloc_blocks; 518 } 519 520 static int 521 xfs_file_iomap_begin_delay( 522 struct inode *inode, 523 loff_t offset, 524 loff_t count, 525 struct iomap *iomap) 526 { 527 struct xfs_inode *ip = XFS_I(inode); 528 struct xfs_mount *mp = ip->i_mount; 529 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 530 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 531 xfs_fileoff_t maxbytes_fsb = 532 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 533 xfs_fileoff_t end_fsb; 534 int error = 0, eof = 0; 535 struct xfs_bmbt_irec got; 536 struct xfs_iext_cursor icur; 537 xfs_fsblock_t prealloc_blocks = 0; 538 539 ASSERT(!XFS_IS_REALTIME_INODE(ip)); 540 ASSERT(!xfs_get_extsz_hint(ip)); 541 542 xfs_ilock(ip, XFS_ILOCK_EXCL); 543 544 if (unlikely(XFS_TEST_ERROR( 545 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 546 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 547 mp, XFS_ERRTAG_BMAPIFORMAT))) { 548 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 549 error = -EFSCORRUPTED; 550 goto out_unlock; 551 } 552 553 XFS_STATS_INC(mp, xs_blk_mapw); 554 555 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 556 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 557 if (error) 558 goto out_unlock; 559 } 560 561 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got); 562 if (!eof && got.br_startoff <= offset_fsb) { 563 if (xfs_is_reflink_inode(ip)) { 564 bool shared; 565 566 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), 567 maxbytes_fsb); 568 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb); 569 error = xfs_reflink_reserve_cow(ip, &got, &shared); 570 if (error) 571 goto out_unlock; 572 } 573 574 trace_xfs_iomap_found(ip, offset, count, 0, &got); 575 goto done; 576 } 577 578 error = xfs_qm_dqattach_locked(ip, 0); 579 if (error) 580 goto out_unlock; 581 582 /* 583 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages 584 * to keep the chunks of work done where somewhat symmetric with the 585 * work writeback does. This is a completely arbitrary number pulled 586 * out of thin air as a best guess for initial testing. 587 * 588 * Note that the values needs to be less than 32-bits wide until 589 * the lower level functions are updated. 590 */ 591 count = min_t(loff_t, count, 1024 * PAGE_SIZE); 592 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); 593 594 if (eof) { 595 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, 596 &icur); 597 if (prealloc_blocks) { 598 xfs_extlen_t align; 599 xfs_off_t end_offset; 600 xfs_fileoff_t p_end_fsb; 601 602 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1); 603 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + 604 prealloc_blocks; 605 606 align = xfs_eof_alignment(ip, 0); 607 if (align) 608 p_end_fsb = roundup_64(p_end_fsb, align); 609 610 p_end_fsb = min(p_end_fsb, maxbytes_fsb); 611 ASSERT(p_end_fsb > offset_fsb); 612 prealloc_blocks = p_end_fsb - end_fsb; 613 } 614 } 615 616 retry: 617 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb, 618 end_fsb - offset_fsb, prealloc_blocks, &got, &icur, 619 eof); 620 switch (error) { 621 case 0: 622 break; 623 case -ENOSPC: 624 case -EDQUOT: 625 /* retry without any preallocation */ 626 trace_xfs_delalloc_enospc(ip, offset, count); 627 if (prealloc_blocks) { 628 prealloc_blocks = 0; 629 goto retry; 630 } 631 /*FALLTHRU*/ 632 default: 633 goto out_unlock; 634 } 635 636 /* 637 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch 638 * them out if the write happens to fail. 639 */ 640 iomap->flags = IOMAP_F_NEW; 641 trace_xfs_iomap_alloc(ip, offset, count, 0, &got); 642 done: 643 if (isnullstartblock(got.br_startblock)) 644 got.br_startblock = DELAYSTARTBLOCK; 645 646 if (!got.br_startblock) { 647 error = xfs_alert_fsblock_zero(ip, &got); 648 if (error) 649 goto out_unlock; 650 } 651 652 xfs_bmbt_to_iomap(ip, iomap, &got); 653 654 out_unlock: 655 xfs_iunlock(ip, XFS_ILOCK_EXCL); 656 return error; 657 } 658 659 /* 660 * Pass in a delayed allocate extent, convert it to real extents; 661 * return to the caller the extent we create which maps on top of 662 * the originating callers request. 663 * 664 * Called without a lock on the inode. 665 * 666 * We no longer bother to look at the incoming map - all we have to 667 * guarantee is that whatever we allocate fills the required range. 668 */ 669 int 670 xfs_iomap_write_allocate( 671 xfs_inode_t *ip, 672 int whichfork, 673 xfs_off_t offset, 674 xfs_bmbt_irec_t *imap) 675 { 676 xfs_mount_t *mp = ip->i_mount; 677 xfs_fileoff_t offset_fsb, last_block; 678 xfs_fileoff_t end_fsb, map_start_fsb; 679 xfs_fsblock_t first_block; 680 struct xfs_defer_ops dfops; 681 xfs_filblks_t count_fsb; 682 xfs_trans_t *tp; 683 int nimaps; 684 int error = 0; 685 int flags = XFS_BMAPI_DELALLOC; 686 int nres; 687 688 if (whichfork == XFS_COW_FORK) 689 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC; 690 691 /* 692 * Make sure that the dquots are there. 693 */ 694 error = xfs_qm_dqattach(ip, 0); 695 if (error) 696 return error; 697 698 offset_fsb = XFS_B_TO_FSBT(mp, offset); 699 count_fsb = imap->br_blockcount; 700 map_start_fsb = imap->br_startoff; 701 702 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 703 704 while (count_fsb != 0) { 705 /* 706 * Set up a transaction with which to allocate the 707 * backing store for the file. Do allocations in a 708 * loop until we get some space in the range we are 709 * interested in. The other space that might be allocated 710 * is in the delayed allocation extent on which we sit 711 * but before our buffer starts. 712 */ 713 nimaps = 0; 714 while (nimaps == 0) { 715 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 716 /* 717 * We have already reserved space for the extent and any 718 * indirect blocks when creating the delalloc extent, 719 * there is no need to reserve space in this transaction 720 * again. 721 */ 722 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 723 0, XFS_TRANS_RESERVE, &tp); 724 if (error) 725 return error; 726 727 xfs_ilock(ip, XFS_ILOCK_EXCL); 728 xfs_trans_ijoin(tp, ip, 0); 729 730 xfs_defer_init(&dfops, &first_block); 731 732 /* 733 * it is possible that the extents have changed since 734 * we did the read call as we dropped the ilock for a 735 * while. We have to be careful about truncates or hole 736 * punchs here - we are not allowed to allocate 737 * non-delalloc blocks here. 738 * 739 * The only protection against truncation is the pages 740 * for the range we are being asked to convert are 741 * locked and hence a truncate will block on them 742 * first. 743 * 744 * As a result, if we go beyond the range we really 745 * need and hit an delalloc extent boundary followed by 746 * a hole while we have excess blocks in the map, we 747 * will fill the hole incorrectly and overrun the 748 * transaction reservation. 749 * 750 * Using a single map prevents this as we are forced to 751 * check each map we look for overlap with the desired 752 * range and abort as soon as we find it. Also, given 753 * that we only return a single map, having one beyond 754 * what we can return is probably a bit silly. 755 * 756 * We also need to check that we don't go beyond EOF; 757 * this is a truncate optimisation as a truncate sets 758 * the new file size before block on the pages we 759 * currently have locked under writeback. Because they 760 * are about to be tossed, we don't need to write them 761 * back.... 762 */ 763 nimaps = 1; 764 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 765 error = xfs_bmap_last_offset(ip, &last_block, 766 XFS_DATA_FORK); 767 if (error) 768 goto trans_cancel; 769 770 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 771 if ((map_start_fsb + count_fsb) > last_block) { 772 count_fsb = last_block - map_start_fsb; 773 if (count_fsb == 0) { 774 error = -EAGAIN; 775 goto trans_cancel; 776 } 777 } 778 779 /* 780 * From this point onwards we overwrite the imap 781 * pointer that the caller gave to us. 782 */ 783 error = xfs_bmapi_write(tp, ip, map_start_fsb, 784 count_fsb, flags, &first_block, 785 nres, imap, &nimaps, 786 &dfops); 787 if (error) 788 goto trans_cancel; 789 790 error = xfs_defer_finish(&tp, &dfops); 791 if (error) 792 goto trans_cancel; 793 794 error = xfs_trans_commit(tp); 795 if (error) 796 goto error0; 797 798 xfs_iunlock(ip, XFS_ILOCK_EXCL); 799 } 800 801 /* 802 * See if we were able to allocate an extent that 803 * covers at least part of the callers request 804 */ 805 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 806 return xfs_alert_fsblock_zero(ip, imap); 807 808 if ((offset_fsb >= imap->br_startoff) && 809 (offset_fsb < (imap->br_startoff + 810 imap->br_blockcount))) { 811 XFS_STATS_INC(mp, xs_xstrat_quick); 812 return 0; 813 } 814 815 /* 816 * So far we have not mapped the requested part of the 817 * file, just surrounding data, try again. 818 */ 819 count_fsb -= imap->br_blockcount; 820 map_start_fsb = imap->br_startoff + imap->br_blockcount; 821 } 822 823 trans_cancel: 824 xfs_defer_cancel(&dfops); 825 xfs_trans_cancel(tp); 826 error0: 827 xfs_iunlock(ip, XFS_ILOCK_EXCL); 828 return error; 829 } 830 831 int 832 xfs_iomap_write_unwritten( 833 xfs_inode_t *ip, 834 xfs_off_t offset, 835 xfs_off_t count, 836 bool update_isize) 837 { 838 xfs_mount_t *mp = ip->i_mount; 839 xfs_fileoff_t offset_fsb; 840 xfs_filblks_t count_fsb; 841 xfs_filblks_t numblks_fsb; 842 xfs_fsblock_t firstfsb; 843 int nimaps; 844 xfs_trans_t *tp; 845 xfs_bmbt_irec_t imap; 846 struct xfs_defer_ops dfops; 847 struct inode *inode = VFS_I(ip); 848 xfs_fsize_t i_size; 849 uint resblks; 850 int error; 851 852 trace_xfs_unwritten_convert(ip, offset, count); 853 854 offset_fsb = XFS_B_TO_FSBT(mp, offset); 855 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 856 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 857 858 /* 859 * Reserve enough blocks in this transaction for two complete extent 860 * btree splits. We may be converting the middle part of an unwritten 861 * extent and in this case we will insert two new extents in the btree 862 * each of which could cause a full split. 863 * 864 * This reservation amount will be used in the first call to 865 * xfs_bmbt_split() to select an AG with enough space to satisfy the 866 * rest of the operation. 867 */ 868 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 869 870 do { 871 /* 872 * Set up a transaction to convert the range of extents 873 * from unwritten to real. Do allocations in a loop until 874 * we have covered the range passed in. 875 * 876 * Note that we can't risk to recursing back into the filesystem 877 * here as we might be asked to write out the same inode that we 878 * complete here and might deadlock on the iolock. 879 */ 880 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 881 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp); 882 if (error) 883 return error; 884 885 xfs_ilock(ip, XFS_ILOCK_EXCL); 886 xfs_trans_ijoin(tp, ip, 0); 887 888 /* 889 * Modify the unwritten extent state of the buffer. 890 */ 891 xfs_defer_init(&dfops, &firstfsb); 892 nimaps = 1; 893 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 894 XFS_BMAPI_CONVERT, &firstfsb, resblks, 895 &imap, &nimaps, &dfops); 896 if (error) 897 goto error_on_bmapi_transaction; 898 899 /* 900 * Log the updated inode size as we go. We have to be careful 901 * to only log it up to the actual write offset if it is 902 * halfway into a block. 903 */ 904 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 905 if (i_size > offset + count) 906 i_size = offset + count; 907 if (update_isize && i_size > i_size_read(inode)) 908 i_size_write(inode, i_size); 909 i_size = xfs_new_eof(ip, i_size); 910 if (i_size) { 911 ip->i_d.di_size = i_size; 912 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 913 } 914 915 error = xfs_defer_finish(&tp, &dfops); 916 if (error) 917 goto error_on_bmapi_transaction; 918 919 error = xfs_trans_commit(tp); 920 xfs_iunlock(ip, XFS_ILOCK_EXCL); 921 if (error) 922 return error; 923 924 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 925 return xfs_alert_fsblock_zero(ip, &imap); 926 927 if ((numblks_fsb = imap.br_blockcount) == 0) { 928 /* 929 * The numblks_fsb value should always get 930 * smaller, otherwise the loop is stuck. 931 */ 932 ASSERT(imap.br_blockcount); 933 break; 934 } 935 offset_fsb += numblks_fsb; 936 count_fsb -= numblks_fsb; 937 } while (count_fsb > 0); 938 939 return 0; 940 941 error_on_bmapi_transaction: 942 xfs_defer_cancel(&dfops); 943 xfs_trans_cancel(tp); 944 xfs_iunlock(ip, XFS_ILOCK_EXCL); 945 return error; 946 } 947 948 static inline bool imap_needs_alloc(struct inode *inode, 949 struct xfs_bmbt_irec *imap, int nimaps) 950 { 951 return !nimaps || 952 imap->br_startblock == HOLESTARTBLOCK || 953 imap->br_startblock == DELAYSTARTBLOCK || 954 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); 955 } 956 957 static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) 958 { 959 /* 960 * COW writes will allocate delalloc space, so we need to make sure 961 * to take the lock exclusively here. 962 */ 963 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) 964 return true; 965 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE)) 966 return true; 967 return false; 968 } 969 970 static int 971 xfs_file_iomap_begin( 972 struct inode *inode, 973 loff_t offset, 974 loff_t length, 975 unsigned flags, 976 struct iomap *iomap) 977 { 978 struct xfs_inode *ip = XFS_I(inode); 979 struct xfs_mount *mp = ip->i_mount; 980 struct xfs_bmbt_irec imap; 981 xfs_fileoff_t offset_fsb, end_fsb; 982 int nimaps = 1, error = 0; 983 bool shared = false, trimmed = false; 984 unsigned lockmode; 985 986 if (XFS_FORCED_SHUTDOWN(mp)) 987 return -EIO; 988 989 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) && 990 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { 991 /* Reserve delalloc blocks for regular writeback. */ 992 return xfs_file_iomap_begin_delay(inode, offset, length, iomap); 993 } 994 995 if (need_excl_ilock(ip, flags)) { 996 lockmode = XFS_ILOCK_EXCL; 997 xfs_ilock(ip, XFS_ILOCK_EXCL); 998 } else { 999 lockmode = xfs_ilock_data_map_shared(ip); 1000 } 1001 1002 if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) { 1003 error = -EAGAIN; 1004 goto out_unlock; 1005 } 1006 1007 ASSERT(offset <= mp->m_super->s_maxbytes); 1008 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 1009 length = mp->m_super->s_maxbytes - offset; 1010 offset_fsb = XFS_B_TO_FSBT(mp, offset); 1011 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1012 1013 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1014 &nimaps, 0); 1015 if (error) 1016 goto out_unlock; 1017 1018 if (flags & IOMAP_REPORT) { 1019 /* Trim the mapping to the nearest shared extent boundary. */ 1020 error = xfs_reflink_trim_around_shared(ip, &imap, &shared, 1021 &trimmed); 1022 if (error) 1023 goto out_unlock; 1024 } 1025 1026 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 1027 if (flags & IOMAP_DIRECT) { 1028 /* 1029 * A reflinked inode will result in CoW alloc. 1030 * FIXME: It could still overwrite on unshared extents 1031 * and not need allocation. 1032 */ 1033 if (flags & IOMAP_NOWAIT) { 1034 error = -EAGAIN; 1035 goto out_unlock; 1036 } 1037 /* may drop and re-acquire the ilock */ 1038 error = xfs_reflink_allocate_cow(ip, &imap, &shared, 1039 &lockmode); 1040 if (error) 1041 goto out_unlock; 1042 } else { 1043 error = xfs_reflink_reserve_cow(ip, &imap, &shared); 1044 if (error) 1045 goto out_unlock; 1046 } 1047 1048 end_fsb = imap.br_startoff + imap.br_blockcount; 1049 length = XFS_FSB_TO_B(mp, end_fsb) - offset; 1050 } 1051 1052 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) { 1053 /* 1054 * If nowait is set bail since we are going to make 1055 * allocations. 1056 */ 1057 if (flags & IOMAP_NOWAIT) { 1058 error = -EAGAIN; 1059 goto out_unlock; 1060 } 1061 /* 1062 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES 1063 * pages to keep the chunks of work done where somewhat symmetric 1064 * with the work writeback does. This is a completely arbitrary 1065 * number pulled out of thin air as a best guess for initial 1066 * testing. 1067 * 1068 * Note that the values needs to be less than 32-bits wide until 1069 * the lower level functions are updated. 1070 */ 1071 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 1072 /* 1073 * xfs_iomap_write_direct() expects the shared lock. It 1074 * is unlocked on return. 1075 */ 1076 if (lockmode == XFS_ILOCK_EXCL) 1077 xfs_ilock_demote(ip, lockmode); 1078 error = xfs_iomap_write_direct(ip, offset, length, &imap, 1079 nimaps); 1080 if (error) 1081 return error; 1082 1083 iomap->flags = IOMAP_F_NEW; 1084 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); 1085 } else { 1086 ASSERT(nimaps); 1087 1088 xfs_iunlock(ip, lockmode); 1089 trace_xfs_iomap_found(ip, offset, length, 0, &imap); 1090 } 1091 1092 xfs_bmbt_to_iomap(ip, iomap, &imap); 1093 1094 if (shared) 1095 iomap->flags |= IOMAP_F_SHARED; 1096 return 0; 1097 out_unlock: 1098 xfs_iunlock(ip, lockmode); 1099 return error; 1100 } 1101 1102 static int 1103 xfs_file_iomap_end_delalloc( 1104 struct xfs_inode *ip, 1105 loff_t offset, 1106 loff_t length, 1107 ssize_t written, 1108 struct iomap *iomap) 1109 { 1110 struct xfs_mount *mp = ip->i_mount; 1111 xfs_fileoff_t start_fsb; 1112 xfs_fileoff_t end_fsb; 1113 int error = 0; 1114 1115 /* 1116 * Behave as if the write failed if drop writes is enabled. Set the NEW 1117 * flag to force delalloc cleanup. 1118 */ 1119 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) { 1120 iomap->flags |= IOMAP_F_NEW; 1121 written = 0; 1122 } 1123 1124 /* 1125 * start_fsb refers to the first unused block after a short write. If 1126 * nothing was written, round offset down to point at the first block in 1127 * the range. 1128 */ 1129 if (unlikely(!written)) 1130 start_fsb = XFS_B_TO_FSBT(mp, offset); 1131 else 1132 start_fsb = XFS_B_TO_FSB(mp, offset + written); 1133 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1134 1135 /* 1136 * Trim delalloc blocks if they were allocated by this write and we 1137 * didn't manage to write the whole range. 1138 * 1139 * We don't need to care about racing delalloc as we hold i_mutex 1140 * across the reserve/allocate/unreserve calls. If there are delalloc 1141 * blocks in the range, they are ours. 1142 */ 1143 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) { 1144 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), 1145 XFS_FSB_TO_B(mp, end_fsb) - 1); 1146 1147 xfs_ilock(ip, XFS_ILOCK_EXCL); 1148 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1149 end_fsb - start_fsb); 1150 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1151 1152 if (error && !XFS_FORCED_SHUTDOWN(mp)) { 1153 xfs_alert(mp, "%s: unable to clean up ino %lld", 1154 __func__, ip->i_ino); 1155 return error; 1156 } 1157 } 1158 1159 return 0; 1160 } 1161 1162 static int 1163 xfs_file_iomap_end( 1164 struct inode *inode, 1165 loff_t offset, 1166 loff_t length, 1167 ssize_t written, 1168 unsigned flags, 1169 struct iomap *iomap) 1170 { 1171 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1172 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1173 length, written, iomap); 1174 return 0; 1175 } 1176 1177 const struct iomap_ops xfs_iomap_ops = { 1178 .iomap_begin = xfs_file_iomap_begin, 1179 .iomap_end = xfs_file_iomap_end, 1180 }; 1181 1182 static int 1183 xfs_xattr_iomap_begin( 1184 struct inode *inode, 1185 loff_t offset, 1186 loff_t length, 1187 unsigned flags, 1188 struct iomap *iomap) 1189 { 1190 struct xfs_inode *ip = XFS_I(inode); 1191 struct xfs_mount *mp = ip->i_mount; 1192 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1193 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1194 struct xfs_bmbt_irec imap; 1195 int nimaps = 1, error = 0; 1196 unsigned lockmode; 1197 1198 if (XFS_FORCED_SHUTDOWN(mp)) 1199 return -EIO; 1200 1201 lockmode = xfs_ilock_attr_map_shared(ip); 1202 1203 /* if there are no attribute fork or extents, return ENOENT */ 1204 if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { 1205 error = -ENOENT; 1206 goto out_unlock; 1207 } 1208 1209 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); 1210 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1211 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); 1212 out_unlock: 1213 xfs_iunlock(ip, lockmode); 1214 1215 if (!error) { 1216 ASSERT(nimaps); 1217 xfs_bmbt_to_iomap(ip, iomap, &imap); 1218 } 1219 1220 return error; 1221 } 1222 1223 const struct iomap_ops xfs_xattr_iomap_ops = { 1224 .iomap_begin = xfs_xattr_iomap_begin, 1225 }; 1226