1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * Copyright (c) 2016 Christoph Hellwig. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include <linux/iomap.h> 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_shared.h" 23 #include "xfs_format.h" 24 #include "xfs_log_format.h" 25 #include "xfs_trans_resv.h" 26 #include "xfs_mount.h" 27 #include "xfs_defer.h" 28 #include "xfs_inode.h" 29 #include "xfs_btree.h" 30 #include "xfs_bmap_btree.h" 31 #include "xfs_bmap.h" 32 #include "xfs_bmap_util.h" 33 #include "xfs_error.h" 34 #include "xfs_trans.h" 35 #include "xfs_trans_space.h" 36 #include "xfs_iomap.h" 37 #include "xfs_trace.h" 38 #include "xfs_icache.h" 39 #include "xfs_quota.h" 40 #include "xfs_dquot_item.h" 41 #include "xfs_dquot.h" 42 43 44 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 45 << mp->m_writeio_log) 46 47 void 48 xfs_bmbt_to_iomap( 49 struct xfs_inode *ip, 50 struct iomap *iomap, 51 struct xfs_bmbt_irec *imap) 52 { 53 struct xfs_mount *mp = ip->i_mount; 54 55 if (imap->br_startblock == HOLESTARTBLOCK) { 56 iomap->blkno = IOMAP_NULL_BLOCK; 57 iomap->type = IOMAP_HOLE; 58 } else if (imap->br_startblock == DELAYSTARTBLOCK) { 59 iomap->blkno = IOMAP_NULL_BLOCK; 60 iomap->type = IOMAP_DELALLOC; 61 } else { 62 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock); 63 if (imap->br_state == XFS_EXT_UNWRITTEN) 64 iomap->type = IOMAP_UNWRITTEN; 65 else 66 iomap->type = IOMAP_MAPPED; 67 } 68 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); 69 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); 70 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); 71 } 72 73 static xfs_extlen_t 74 xfs_eof_alignment( 75 struct xfs_inode *ip, 76 xfs_extlen_t extsize) 77 { 78 struct xfs_mount *mp = ip->i_mount; 79 xfs_extlen_t align = 0; 80 81 if (!XFS_IS_REALTIME_INODE(ip)) { 82 /* 83 * Round up the allocation request to a stripe unit 84 * (m_dalign) boundary if the file size is >= stripe unit 85 * size, and we are allocating past the allocation eof. 86 * 87 * If mounted with the "-o swalloc" option the alignment is 88 * increased from the strip unit size to the stripe width. 89 */ 90 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 91 align = mp->m_swidth; 92 else if (mp->m_dalign) 93 align = mp->m_dalign; 94 95 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 96 align = 0; 97 } 98 99 /* 100 * Always round up the allocation request to an extent boundary 101 * (when file on a real-time subvolume or has di_extsize hint). 102 */ 103 if (extsize) { 104 if (align) 105 align = roundup_64(align, extsize); 106 else 107 align = extsize; 108 } 109 110 return align; 111 } 112 113 STATIC int 114 xfs_iomap_eof_align_last_fsb( 115 struct xfs_inode *ip, 116 xfs_extlen_t extsize, 117 xfs_fileoff_t *last_fsb) 118 { 119 xfs_extlen_t align = xfs_eof_alignment(ip, extsize); 120 121 if (align) { 122 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); 123 int eof, error; 124 125 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 126 if (error) 127 return error; 128 if (eof) 129 *last_fsb = new_last_fsb; 130 } 131 return 0; 132 } 133 134 STATIC int 135 xfs_alert_fsblock_zero( 136 xfs_inode_t *ip, 137 xfs_bmbt_irec_t *imap) 138 { 139 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 140 "Access to block zero in inode %llu " 141 "start_block: %llx start_off: %llx " 142 "blkcnt: %llx extent-state: %x", 143 (unsigned long long)ip->i_ino, 144 (unsigned long long)imap->br_startblock, 145 (unsigned long long)imap->br_startoff, 146 (unsigned long long)imap->br_blockcount, 147 imap->br_state); 148 return -EFSCORRUPTED; 149 } 150 151 int 152 xfs_iomap_write_direct( 153 xfs_inode_t *ip, 154 xfs_off_t offset, 155 size_t count, 156 xfs_bmbt_irec_t *imap, 157 int nmaps) 158 { 159 xfs_mount_t *mp = ip->i_mount; 160 xfs_fileoff_t offset_fsb; 161 xfs_fileoff_t last_fsb; 162 xfs_filblks_t count_fsb, resaligned; 163 xfs_fsblock_t firstfsb; 164 xfs_extlen_t extsz, temp; 165 int nimaps; 166 int quota_flag; 167 int rt; 168 xfs_trans_t *tp; 169 struct xfs_defer_ops dfops; 170 uint qblocks, resblks, resrtextents; 171 int error; 172 int lockmode; 173 int bmapi_flags = XFS_BMAPI_PREALLOC; 174 uint tflags = 0; 175 176 rt = XFS_IS_REALTIME_INODE(ip); 177 extsz = xfs_get_extsz_hint(ip); 178 lockmode = XFS_ILOCK_SHARED; /* locked by caller */ 179 180 ASSERT(xfs_isilocked(ip, lockmode)); 181 182 offset_fsb = XFS_B_TO_FSBT(mp, offset); 183 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 184 if ((offset + count) > XFS_ISIZE(ip)) { 185 /* 186 * Assert that the in-core extent list is present since this can 187 * call xfs_iread_extents() and we only have the ilock shared. 188 * This should be safe because the lock was held around a bmapi 189 * call in the caller and we only need it to access the in-core 190 * list. 191 */ 192 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & 193 XFS_IFEXTENTS); 194 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb); 195 if (error) 196 goto out_unlock; 197 } else { 198 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 199 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 200 imap->br_blockcount + 201 imap->br_startoff); 202 } 203 count_fsb = last_fsb - offset_fsb; 204 ASSERT(count_fsb > 0); 205 206 resaligned = count_fsb; 207 if (unlikely(extsz)) { 208 if ((temp = do_mod(offset_fsb, extsz))) 209 resaligned += temp; 210 if ((temp = do_mod(resaligned, extsz))) 211 resaligned += extsz - temp; 212 } 213 214 if (unlikely(rt)) { 215 resrtextents = qblocks = resaligned; 216 resrtextents /= mp->m_sb.sb_rextsize; 217 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 218 quota_flag = XFS_QMOPT_RES_RTBLKS; 219 } else { 220 resrtextents = 0; 221 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 222 quota_flag = XFS_QMOPT_RES_REGBLKS; 223 } 224 225 /* 226 * Drop the shared lock acquired by the caller, attach the dquot if 227 * necessary and move on to transaction setup. 228 */ 229 xfs_iunlock(ip, lockmode); 230 error = xfs_qm_dqattach(ip, 0); 231 if (error) 232 return error; 233 234 /* 235 * For DAX, we do not allocate unwritten extents, but instead we zero 236 * the block before we commit the transaction. Ideally we'd like to do 237 * this outside the transaction context, but if we commit and then crash 238 * we may not have zeroed the blocks and this will be exposed on 239 * recovery of the allocation. Hence we must zero before commit. 240 * 241 * Further, if we are mapping unwritten extents here, we need to zero 242 * and convert them to written so that we don't need an unwritten extent 243 * callback for DAX. This also means that we need to be able to dip into 244 * the reserve block pool for bmbt block allocation if there is no space 245 * left but we need to do unwritten extent conversion. 246 */ 247 if (IS_DAX(VFS_I(ip))) { 248 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; 249 if (ISUNWRITTEN(imap)) { 250 tflags |= XFS_TRANS_RESERVE; 251 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 252 } 253 } 254 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents, 255 tflags, &tp); 256 if (error) 257 return error; 258 259 lockmode = XFS_ILOCK_EXCL; 260 xfs_ilock(ip, lockmode); 261 262 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 263 if (error) 264 goto out_trans_cancel; 265 266 xfs_trans_ijoin(tp, ip, 0); 267 268 /* 269 * From this point onwards we overwrite the imap pointer that the 270 * caller gave to us. 271 */ 272 xfs_defer_init(&dfops, &firstfsb); 273 nimaps = 1; 274 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 275 bmapi_flags, &firstfsb, resblks, imap, 276 &nimaps, &dfops); 277 if (error) 278 goto out_bmap_cancel; 279 280 /* 281 * Complete the transaction 282 */ 283 error = xfs_defer_finish(&tp, &dfops, NULL); 284 if (error) 285 goto out_bmap_cancel; 286 287 error = xfs_trans_commit(tp); 288 if (error) 289 goto out_unlock; 290 291 /* 292 * Copy any maps to caller's array and return any error. 293 */ 294 if (nimaps == 0) { 295 error = -ENOSPC; 296 goto out_unlock; 297 } 298 299 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 300 error = xfs_alert_fsblock_zero(ip, imap); 301 302 out_unlock: 303 xfs_iunlock(ip, lockmode); 304 return error; 305 306 out_bmap_cancel: 307 xfs_defer_cancel(&dfops); 308 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 309 out_trans_cancel: 310 xfs_trans_cancel(tp); 311 goto out_unlock; 312 } 313 314 STATIC bool 315 xfs_quota_need_throttle( 316 struct xfs_inode *ip, 317 int type, 318 xfs_fsblock_t alloc_blocks) 319 { 320 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 321 322 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 323 return false; 324 325 /* no hi watermark, no throttle */ 326 if (!dq->q_prealloc_hi_wmark) 327 return false; 328 329 /* under the lo watermark, no throttle */ 330 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 331 return false; 332 333 return true; 334 } 335 336 STATIC void 337 xfs_quota_calc_throttle( 338 struct xfs_inode *ip, 339 int type, 340 xfs_fsblock_t *qblocks, 341 int *qshift, 342 int64_t *qfreesp) 343 { 344 int64_t freesp; 345 int shift = 0; 346 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 347 348 /* no dq, or over hi wmark, squash the prealloc completely */ 349 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 350 *qblocks = 0; 351 *qfreesp = 0; 352 return; 353 } 354 355 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 356 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 357 shift = 2; 358 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 359 shift += 2; 360 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 361 shift += 2; 362 } 363 364 if (freesp < *qfreesp) 365 *qfreesp = freesp; 366 367 /* only overwrite the throttle values if we are more aggressive */ 368 if ((freesp >> shift) < (*qblocks >> *qshift)) { 369 *qblocks = freesp; 370 *qshift = shift; 371 } 372 } 373 374 /* 375 * If we are doing a write at the end of the file and there are no allocations 376 * past this one, then extend the allocation out to the file system's write 377 * iosize. 378 * 379 * If we don't have a user specified preallocation size, dynamically increase 380 * the preallocation size as the size of the file grows. Cap the maximum size 381 * at a single extent or less if the filesystem is near full. The closer the 382 * filesystem is to full, the smaller the maximum prealocation. 383 * 384 * As an exception we don't do any preallocation at all if the file is smaller 385 * than the minimum preallocation and we are using the default dynamic 386 * preallocation scheme, as it is likely this is the only write to the file that 387 * is going to be done. 388 * 389 * We clean up any extra space left over when the file is closed in 390 * xfs_inactive(). 391 */ 392 STATIC xfs_fsblock_t 393 xfs_iomap_prealloc_size( 394 struct xfs_inode *ip, 395 loff_t offset, 396 loff_t count, 397 xfs_extnum_t idx, 398 struct xfs_bmbt_irec *prev) 399 { 400 struct xfs_mount *mp = ip->i_mount; 401 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 402 int shift = 0; 403 int64_t freesp; 404 xfs_fsblock_t qblocks; 405 int qshift = 0; 406 xfs_fsblock_t alloc_blocks = 0; 407 408 if (offset + count <= XFS_ISIZE(ip)) 409 return 0; 410 411 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 412 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks))) 413 return 0; 414 415 /* 416 * If an explicit allocsize is set, the file is small, or we 417 * are writing behind a hole, then use the minimum prealloc: 418 */ 419 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) || 420 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || 421 idx == 0 || 422 prev->br_startoff + prev->br_blockcount < offset_fsb) 423 return mp->m_writeio_blocks; 424 425 /* 426 * Determine the initial size of the preallocation. We are beyond the 427 * current EOF here, but we need to take into account whether this is 428 * a sparse write or an extending write when determining the 429 * preallocation size. Hence we need to look up the extent that ends 430 * at the current write offset and use the result to determine the 431 * preallocation size. 432 * 433 * If the extent is a hole, then preallocation is essentially disabled. 434 * Otherwise we take the size of the preceding data extent as the basis 435 * for the preallocation size. If the size of the extent is greater than 436 * half the maximum extent length, then use the current offset as the 437 * basis. This ensures that for large files the preallocation size 438 * always extends to MAXEXTLEN rather than falling short due to things 439 * like stripe unit/width alignment of real extents. 440 */ 441 if (prev->br_blockcount <= (MAXEXTLEN >> 1)) 442 alloc_blocks = prev->br_blockcount << 1; 443 else 444 alloc_blocks = XFS_B_TO_FSB(mp, offset); 445 if (!alloc_blocks) 446 goto check_writeio; 447 qblocks = alloc_blocks; 448 449 /* 450 * MAXEXTLEN is not a power of two value but we round the prealloc down 451 * to the nearest power of two value after throttling. To prevent the 452 * round down from unconditionally reducing the maximum supported prealloc 453 * size, we round up first, apply appropriate throttling, round down and 454 * cap the value to MAXEXTLEN. 455 */ 456 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 457 alloc_blocks); 458 459 freesp = percpu_counter_read_positive(&mp->m_fdblocks); 460 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 461 shift = 2; 462 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 463 shift++; 464 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 465 shift++; 466 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 467 shift++; 468 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 469 shift++; 470 } 471 472 /* 473 * Check each quota to cap the prealloc size, provide a shift value to 474 * throttle with and adjust amount of available space. 475 */ 476 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 477 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, 478 &freesp); 479 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 480 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, 481 &freesp); 482 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 483 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, 484 &freesp); 485 486 /* 487 * The final prealloc size is set to the minimum of free space available 488 * in each of the quotas and the overall filesystem. 489 * 490 * The shift throttle value is set to the maximum value as determined by 491 * the global low free space values and per-quota low free space values. 492 */ 493 alloc_blocks = MIN(alloc_blocks, qblocks); 494 shift = MAX(shift, qshift); 495 496 if (shift) 497 alloc_blocks >>= shift; 498 /* 499 * rounddown_pow_of_two() returns an undefined result if we pass in 500 * alloc_blocks = 0. 501 */ 502 if (alloc_blocks) 503 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 504 if (alloc_blocks > MAXEXTLEN) 505 alloc_blocks = MAXEXTLEN; 506 507 /* 508 * If we are still trying to allocate more space than is 509 * available, squash the prealloc hard. This can happen if we 510 * have a large file on a small filesystem and the above 511 * lowspace thresholds are smaller than MAXEXTLEN. 512 */ 513 while (alloc_blocks && alloc_blocks >= freesp) 514 alloc_blocks >>= 4; 515 check_writeio: 516 if (alloc_blocks < mp->m_writeio_blocks) 517 alloc_blocks = mp->m_writeio_blocks; 518 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 519 mp->m_writeio_blocks); 520 return alloc_blocks; 521 } 522 523 static int 524 xfs_file_iomap_begin_delay( 525 struct inode *inode, 526 loff_t offset, 527 loff_t count, 528 unsigned flags, 529 struct iomap *iomap) 530 { 531 struct xfs_inode *ip = XFS_I(inode); 532 struct xfs_mount *mp = ip->i_mount; 533 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 534 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 535 xfs_fileoff_t maxbytes_fsb = 536 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 537 xfs_fileoff_t end_fsb, orig_end_fsb; 538 int error = 0, eof = 0; 539 struct xfs_bmbt_irec got; 540 struct xfs_bmbt_irec prev; 541 xfs_extnum_t idx; 542 543 ASSERT(!XFS_IS_REALTIME_INODE(ip)); 544 ASSERT(!xfs_get_extsz_hint(ip)); 545 546 xfs_ilock(ip, XFS_ILOCK_EXCL); 547 548 if (unlikely(XFS_TEST_ERROR( 549 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && 550 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 551 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 552 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 553 error = -EFSCORRUPTED; 554 goto out_unlock; 555 } 556 557 XFS_STATS_INC(mp, xs_blk_mapw); 558 559 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 560 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 561 if (error) 562 goto out_unlock; 563 } 564 565 xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx, 566 &got, &prev); 567 if (!eof && got.br_startoff <= offset_fsb) { 568 trace_xfs_iomap_found(ip, offset, count, 0, &got); 569 goto done; 570 } 571 572 error = xfs_qm_dqattach_locked(ip, 0); 573 if (error) 574 goto out_unlock; 575 576 /* 577 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages 578 * to keep the chunks of work done where somewhat symmetric with the 579 * work writeback does. This is a completely arbitrary number pulled 580 * out of thin air as a best guess for initial testing. 581 * 582 * Note that the values needs to be less than 32-bits wide until 583 * the lower level functions are updated. 584 */ 585 count = min_t(loff_t, count, 1024 * PAGE_SIZE); 586 end_fsb = orig_end_fsb = 587 min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb); 588 589 if (eof) { 590 xfs_fsblock_t prealloc_blocks; 591 592 prealloc_blocks = 593 xfs_iomap_prealloc_size(ip, offset, count, idx, &prev); 594 if (prealloc_blocks) { 595 xfs_extlen_t align; 596 xfs_off_t end_offset; 597 598 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1); 599 end_fsb = XFS_B_TO_FSBT(mp, end_offset) + 600 prealloc_blocks; 601 602 align = xfs_eof_alignment(ip, 0); 603 if (align) 604 end_fsb = roundup_64(end_fsb, align); 605 606 end_fsb = min(end_fsb, maxbytes_fsb); 607 ASSERT(end_fsb > offset_fsb); 608 } 609 } 610 611 retry: 612 error = xfs_bmapi_reserve_delalloc(ip, offset_fsb, 613 end_fsb - offset_fsb, &got, 614 &prev, &idx, eof); 615 switch (error) { 616 case 0: 617 break; 618 case -ENOSPC: 619 case -EDQUOT: 620 /* retry without any preallocation */ 621 trace_xfs_delalloc_enospc(ip, offset, count); 622 if (end_fsb != orig_end_fsb) { 623 end_fsb = orig_end_fsb; 624 goto retry; 625 } 626 /*FALLTHRU*/ 627 default: 628 goto out_unlock; 629 } 630 631 /* 632 * Tag the inode as speculatively preallocated so we can reclaim this 633 * space on demand, if necessary. 634 */ 635 if (end_fsb != orig_end_fsb) 636 xfs_inode_set_eofblocks_tag(ip); 637 638 trace_xfs_iomap_alloc(ip, offset, count, 0, &got); 639 done: 640 if (isnullstartblock(got.br_startblock)) 641 got.br_startblock = DELAYSTARTBLOCK; 642 643 if (!got.br_startblock) { 644 error = xfs_alert_fsblock_zero(ip, &got); 645 if (error) 646 goto out_unlock; 647 } 648 649 xfs_bmbt_to_iomap(ip, iomap, &got); 650 651 out_unlock: 652 xfs_iunlock(ip, XFS_ILOCK_EXCL); 653 return error; 654 } 655 656 /* 657 * Pass in a delayed allocate extent, convert it to real extents; 658 * return to the caller the extent we create which maps on top of 659 * the originating callers request. 660 * 661 * Called without a lock on the inode. 662 * 663 * We no longer bother to look at the incoming map - all we have to 664 * guarantee is that whatever we allocate fills the required range. 665 */ 666 int 667 xfs_iomap_write_allocate( 668 xfs_inode_t *ip, 669 xfs_off_t offset, 670 xfs_bmbt_irec_t *imap) 671 { 672 xfs_mount_t *mp = ip->i_mount; 673 xfs_fileoff_t offset_fsb, last_block; 674 xfs_fileoff_t end_fsb, map_start_fsb; 675 xfs_fsblock_t first_block; 676 struct xfs_defer_ops dfops; 677 xfs_filblks_t count_fsb; 678 xfs_trans_t *tp; 679 int nimaps; 680 int error = 0; 681 int nres; 682 683 /* 684 * Make sure that the dquots are there. 685 */ 686 error = xfs_qm_dqattach(ip, 0); 687 if (error) 688 return error; 689 690 offset_fsb = XFS_B_TO_FSBT(mp, offset); 691 count_fsb = imap->br_blockcount; 692 map_start_fsb = imap->br_startoff; 693 694 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 695 696 while (count_fsb != 0) { 697 /* 698 * Set up a transaction with which to allocate the 699 * backing store for the file. Do allocations in a 700 * loop until we get some space in the range we are 701 * interested in. The other space that might be allocated 702 * is in the delayed allocation extent on which we sit 703 * but before our buffer starts. 704 */ 705 nimaps = 0; 706 while (nimaps == 0) { 707 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 708 /* 709 * We have already reserved space for the extent and any 710 * indirect blocks when creating the delalloc extent, 711 * there is no need to reserve space in this transaction 712 * again. 713 */ 714 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 715 0, XFS_TRANS_RESERVE, &tp); 716 if (error) 717 return error; 718 719 xfs_ilock(ip, XFS_ILOCK_EXCL); 720 xfs_trans_ijoin(tp, ip, 0); 721 722 xfs_defer_init(&dfops, &first_block); 723 724 /* 725 * it is possible that the extents have changed since 726 * we did the read call as we dropped the ilock for a 727 * while. We have to be careful about truncates or hole 728 * punchs here - we are not allowed to allocate 729 * non-delalloc blocks here. 730 * 731 * The only protection against truncation is the pages 732 * for the range we are being asked to convert are 733 * locked and hence a truncate will block on them 734 * first. 735 * 736 * As a result, if we go beyond the range we really 737 * need and hit an delalloc extent boundary followed by 738 * a hole while we have excess blocks in the map, we 739 * will fill the hole incorrectly and overrun the 740 * transaction reservation. 741 * 742 * Using a single map prevents this as we are forced to 743 * check each map we look for overlap with the desired 744 * range and abort as soon as we find it. Also, given 745 * that we only return a single map, having one beyond 746 * what we can return is probably a bit silly. 747 * 748 * We also need to check that we don't go beyond EOF; 749 * this is a truncate optimisation as a truncate sets 750 * the new file size before block on the pages we 751 * currently have locked under writeback. Because they 752 * are about to be tossed, we don't need to write them 753 * back.... 754 */ 755 nimaps = 1; 756 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 757 error = xfs_bmap_last_offset(ip, &last_block, 758 XFS_DATA_FORK); 759 if (error) 760 goto trans_cancel; 761 762 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 763 if ((map_start_fsb + count_fsb) > last_block) { 764 count_fsb = last_block - map_start_fsb; 765 if (count_fsb == 0) { 766 error = -EAGAIN; 767 goto trans_cancel; 768 } 769 } 770 771 /* 772 * From this point onwards we overwrite the imap 773 * pointer that the caller gave to us. 774 */ 775 error = xfs_bmapi_write(tp, ip, map_start_fsb, 776 count_fsb, 0, &first_block, 777 nres, imap, &nimaps, 778 &dfops); 779 if (error) 780 goto trans_cancel; 781 782 error = xfs_defer_finish(&tp, &dfops, NULL); 783 if (error) 784 goto trans_cancel; 785 786 error = xfs_trans_commit(tp); 787 if (error) 788 goto error0; 789 790 xfs_iunlock(ip, XFS_ILOCK_EXCL); 791 } 792 793 /* 794 * See if we were able to allocate an extent that 795 * covers at least part of the callers request 796 */ 797 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 798 return xfs_alert_fsblock_zero(ip, imap); 799 800 if ((offset_fsb >= imap->br_startoff) && 801 (offset_fsb < (imap->br_startoff + 802 imap->br_blockcount))) { 803 XFS_STATS_INC(mp, xs_xstrat_quick); 804 return 0; 805 } 806 807 /* 808 * So far we have not mapped the requested part of the 809 * file, just surrounding data, try again. 810 */ 811 count_fsb -= imap->br_blockcount; 812 map_start_fsb = imap->br_startoff + imap->br_blockcount; 813 } 814 815 trans_cancel: 816 xfs_defer_cancel(&dfops); 817 xfs_trans_cancel(tp); 818 error0: 819 xfs_iunlock(ip, XFS_ILOCK_EXCL); 820 return error; 821 } 822 823 int 824 xfs_iomap_write_unwritten( 825 xfs_inode_t *ip, 826 xfs_off_t offset, 827 xfs_off_t count) 828 { 829 xfs_mount_t *mp = ip->i_mount; 830 xfs_fileoff_t offset_fsb; 831 xfs_filblks_t count_fsb; 832 xfs_filblks_t numblks_fsb; 833 xfs_fsblock_t firstfsb; 834 int nimaps; 835 xfs_trans_t *tp; 836 xfs_bmbt_irec_t imap; 837 struct xfs_defer_ops dfops; 838 xfs_fsize_t i_size; 839 uint resblks; 840 int error; 841 842 trace_xfs_unwritten_convert(ip, offset, count); 843 844 offset_fsb = XFS_B_TO_FSBT(mp, offset); 845 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 846 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 847 848 /* 849 * Reserve enough blocks in this transaction for two complete extent 850 * btree splits. We may be converting the middle part of an unwritten 851 * extent and in this case we will insert two new extents in the btree 852 * each of which could cause a full split. 853 * 854 * This reservation amount will be used in the first call to 855 * xfs_bmbt_split() to select an AG with enough space to satisfy the 856 * rest of the operation. 857 */ 858 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 859 860 do { 861 /* 862 * Set up a transaction to convert the range of extents 863 * from unwritten to real. Do allocations in a loop until 864 * we have covered the range passed in. 865 * 866 * Note that we can't risk to recursing back into the filesystem 867 * here as we might be asked to write out the same inode that we 868 * complete here and might deadlock on the iolock. 869 */ 870 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 871 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp); 872 if (error) 873 return error; 874 875 xfs_ilock(ip, XFS_ILOCK_EXCL); 876 xfs_trans_ijoin(tp, ip, 0); 877 878 /* 879 * Modify the unwritten extent state of the buffer. 880 */ 881 xfs_defer_init(&dfops, &firstfsb); 882 nimaps = 1; 883 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 884 XFS_BMAPI_CONVERT, &firstfsb, resblks, 885 &imap, &nimaps, &dfops); 886 if (error) 887 goto error_on_bmapi_transaction; 888 889 /* 890 * Log the updated inode size as we go. We have to be careful 891 * to only log it up to the actual write offset if it is 892 * halfway into a block. 893 */ 894 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 895 if (i_size > offset + count) 896 i_size = offset + count; 897 898 i_size = xfs_new_eof(ip, i_size); 899 if (i_size) { 900 ip->i_d.di_size = i_size; 901 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 902 } 903 904 error = xfs_defer_finish(&tp, &dfops, NULL); 905 if (error) 906 goto error_on_bmapi_transaction; 907 908 error = xfs_trans_commit(tp); 909 xfs_iunlock(ip, XFS_ILOCK_EXCL); 910 if (error) 911 return error; 912 913 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 914 return xfs_alert_fsblock_zero(ip, &imap); 915 916 if ((numblks_fsb = imap.br_blockcount) == 0) { 917 /* 918 * The numblks_fsb value should always get 919 * smaller, otherwise the loop is stuck. 920 */ 921 ASSERT(imap.br_blockcount); 922 break; 923 } 924 offset_fsb += numblks_fsb; 925 count_fsb -= numblks_fsb; 926 } while (count_fsb > 0); 927 928 return 0; 929 930 error_on_bmapi_transaction: 931 xfs_defer_cancel(&dfops); 932 xfs_trans_cancel(tp); 933 xfs_iunlock(ip, XFS_ILOCK_EXCL); 934 return error; 935 } 936 937 static inline bool imap_needs_alloc(struct inode *inode, 938 struct xfs_bmbt_irec *imap, int nimaps) 939 { 940 return !nimaps || 941 imap->br_startblock == HOLESTARTBLOCK || 942 imap->br_startblock == DELAYSTARTBLOCK || 943 (IS_DAX(inode) && ISUNWRITTEN(imap)); 944 } 945 946 static int 947 xfs_file_iomap_begin( 948 struct inode *inode, 949 loff_t offset, 950 loff_t length, 951 unsigned flags, 952 struct iomap *iomap) 953 { 954 struct xfs_inode *ip = XFS_I(inode); 955 struct xfs_mount *mp = ip->i_mount; 956 struct xfs_bmbt_irec imap; 957 xfs_fileoff_t offset_fsb, end_fsb; 958 int nimaps = 1, error = 0; 959 unsigned lockmode; 960 961 if (XFS_FORCED_SHUTDOWN(mp)) 962 return -EIO; 963 964 if ((flags & IOMAP_WRITE) && 965 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) { 966 return xfs_file_iomap_begin_delay(inode, offset, length, flags, 967 iomap); 968 } 969 970 lockmode = xfs_ilock_data_map_shared(ip); 971 972 ASSERT(offset <= mp->m_super->s_maxbytes); 973 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 974 length = mp->m_super->s_maxbytes - offset; 975 offset_fsb = XFS_B_TO_FSBT(mp, offset); 976 end_fsb = XFS_B_TO_FSB(mp, offset + length); 977 978 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 979 &nimaps, XFS_BMAPI_ENTIRE); 980 if (error) { 981 xfs_iunlock(ip, lockmode); 982 return error; 983 } 984 985 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) { 986 /* 987 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES 988 * pages to keep the chunks of work done where somewhat symmetric 989 * with the work writeback does. This is a completely arbitrary 990 * number pulled out of thin air as a best guess for initial 991 * testing. 992 * 993 * Note that the values needs to be less than 32-bits wide until 994 * the lower level functions are updated. 995 */ 996 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 997 /* 998 * xfs_iomap_write_direct() expects the shared lock. It 999 * is unlocked on return. 1000 */ 1001 if (lockmode == XFS_ILOCK_EXCL) 1002 xfs_ilock_demote(ip, lockmode); 1003 error = xfs_iomap_write_direct(ip, offset, length, &imap, 1004 nimaps); 1005 if (error) 1006 return error; 1007 1008 iomap->flags = IOMAP_F_NEW; 1009 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); 1010 } else { 1011 ASSERT(nimaps); 1012 1013 xfs_iunlock(ip, lockmode); 1014 trace_xfs_iomap_found(ip, offset, length, 0, &imap); 1015 } 1016 1017 xfs_bmbt_to_iomap(ip, iomap, &imap); 1018 return 0; 1019 } 1020 1021 static int 1022 xfs_file_iomap_end_delalloc( 1023 struct xfs_inode *ip, 1024 loff_t offset, 1025 loff_t length, 1026 ssize_t written) 1027 { 1028 struct xfs_mount *mp = ip->i_mount; 1029 xfs_fileoff_t start_fsb; 1030 xfs_fileoff_t end_fsb; 1031 int error = 0; 1032 1033 start_fsb = XFS_B_TO_FSB(mp, offset + written); 1034 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1035 1036 /* 1037 * Trim back delalloc blocks if we didn't manage to write the whole 1038 * range reserved. 1039 * 1040 * We don't need to care about racing delalloc as we hold i_mutex 1041 * across the reserve/allocate/unreserve calls. If there are delalloc 1042 * blocks in the range, they are ours. 1043 */ 1044 if (start_fsb < end_fsb) { 1045 xfs_ilock(ip, XFS_ILOCK_EXCL); 1046 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1047 end_fsb - start_fsb); 1048 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1049 1050 if (error && !XFS_FORCED_SHUTDOWN(mp)) { 1051 xfs_alert(mp, "%s: unable to clean up ino %lld", 1052 __func__, ip->i_ino); 1053 return error; 1054 } 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int 1061 xfs_file_iomap_end( 1062 struct inode *inode, 1063 loff_t offset, 1064 loff_t length, 1065 ssize_t written, 1066 unsigned flags, 1067 struct iomap *iomap) 1068 { 1069 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1070 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1071 length, written); 1072 return 0; 1073 } 1074 1075 struct iomap_ops xfs_iomap_ops = { 1076 .iomap_begin = xfs_file_iomap_begin, 1077 .iomap_end = xfs_file_iomap_end, 1078 }; 1079 1080 static int 1081 xfs_xattr_iomap_begin( 1082 struct inode *inode, 1083 loff_t offset, 1084 loff_t length, 1085 unsigned flags, 1086 struct iomap *iomap) 1087 { 1088 struct xfs_inode *ip = XFS_I(inode); 1089 struct xfs_mount *mp = ip->i_mount; 1090 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1091 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1092 struct xfs_bmbt_irec imap; 1093 int nimaps = 1, error = 0; 1094 unsigned lockmode; 1095 1096 if (XFS_FORCED_SHUTDOWN(mp)) 1097 return -EIO; 1098 1099 lockmode = xfs_ilock_data_map_shared(ip); 1100 1101 /* if there are no attribute fork or extents, return ENOENT */ 1102 if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { 1103 error = -ENOENT; 1104 goto out_unlock; 1105 } 1106 1107 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); 1108 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1109 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); 1110 out_unlock: 1111 xfs_iunlock(ip, lockmode); 1112 1113 if (!error) { 1114 ASSERT(nimaps); 1115 xfs_bmbt_to_iomap(ip, iomap, &imap); 1116 } 1117 1118 return error; 1119 } 1120 1121 struct iomap_ops xfs_xattr_iomap_ops = { 1122 .iomap_begin = xfs_xattr_iomap_begin, 1123 }; 1124