1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_btree.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_bmap.h" 29 #include "xfs_bmap_util.h" 30 #include "xfs_error.h" 31 #include "xfs_trans.h" 32 #include "xfs_trans_space.h" 33 #include "xfs_iomap.h" 34 #include "xfs_trace.h" 35 #include "xfs_icache.h" 36 #include "xfs_quota.h" 37 #include "xfs_dquot_item.h" 38 #include "xfs_dquot.h" 39 40 41 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 42 << mp->m_writeio_log) 43 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 44 45 STATIC int 46 xfs_iomap_eof_align_last_fsb( 47 xfs_mount_t *mp, 48 xfs_inode_t *ip, 49 xfs_extlen_t extsize, 50 xfs_fileoff_t *last_fsb) 51 { 52 xfs_extlen_t align = 0; 53 int eof, error; 54 55 if (!XFS_IS_REALTIME_INODE(ip)) { 56 /* 57 * Round up the allocation request to a stripe unit 58 * (m_dalign) boundary if the file size is >= stripe unit 59 * size, and we are allocating past the allocation eof. 60 * 61 * If mounted with the "-o swalloc" option the alignment is 62 * increased from the strip unit size to the stripe width. 63 */ 64 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 65 align = mp->m_swidth; 66 else if (mp->m_dalign) 67 align = mp->m_dalign; 68 69 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 70 align = 0; 71 } 72 73 /* 74 * Always round up the allocation request to an extent boundary 75 * (when file on a real-time subvolume or has di_extsize hint). 76 */ 77 if (extsize) { 78 if (align) 79 align = roundup_64(align, extsize); 80 else 81 align = extsize; 82 } 83 84 if (align) { 85 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align); 86 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 87 if (error) 88 return error; 89 if (eof) 90 *last_fsb = new_last_fsb; 91 } 92 return 0; 93 } 94 95 STATIC int 96 xfs_alert_fsblock_zero( 97 xfs_inode_t *ip, 98 xfs_bmbt_irec_t *imap) 99 { 100 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 101 "Access to block zero in inode %llu " 102 "start_block: %llx start_off: %llx " 103 "blkcnt: %llx extent-state: %x", 104 (unsigned long long)ip->i_ino, 105 (unsigned long long)imap->br_startblock, 106 (unsigned long long)imap->br_startoff, 107 (unsigned long long)imap->br_blockcount, 108 imap->br_state); 109 return -EFSCORRUPTED; 110 } 111 112 int 113 xfs_iomap_write_direct( 114 xfs_inode_t *ip, 115 xfs_off_t offset, 116 size_t count, 117 xfs_bmbt_irec_t *imap, 118 int nmaps) 119 { 120 xfs_mount_t *mp = ip->i_mount; 121 xfs_fileoff_t offset_fsb; 122 xfs_fileoff_t last_fsb; 123 xfs_filblks_t count_fsb, resaligned; 124 xfs_fsblock_t firstfsb; 125 xfs_extlen_t extsz, temp; 126 int nimaps; 127 int quota_flag; 128 int rt; 129 xfs_trans_t *tp; 130 xfs_bmap_free_t free_list; 131 uint qblocks, resblks, resrtextents; 132 int committed; 133 int error; 134 135 error = xfs_qm_dqattach(ip, 0); 136 if (error) 137 return error; 138 139 rt = XFS_IS_REALTIME_INODE(ip); 140 extsz = xfs_get_extsz_hint(ip); 141 142 offset_fsb = XFS_B_TO_FSBT(mp, offset); 143 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 144 if ((offset + count) > XFS_ISIZE(ip)) { 145 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 146 if (error) 147 return error; 148 } else { 149 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 150 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 151 imap->br_blockcount + 152 imap->br_startoff); 153 } 154 count_fsb = last_fsb - offset_fsb; 155 ASSERT(count_fsb > 0); 156 157 resaligned = count_fsb; 158 if (unlikely(extsz)) { 159 if ((temp = do_mod(offset_fsb, extsz))) 160 resaligned += temp; 161 if ((temp = do_mod(resaligned, extsz))) 162 resaligned += extsz - temp; 163 } 164 165 if (unlikely(rt)) { 166 resrtextents = qblocks = resaligned; 167 resrtextents /= mp->m_sb.sb_rextsize; 168 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 169 quota_flag = XFS_QMOPT_RES_RTBLKS; 170 } else { 171 resrtextents = 0; 172 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 173 quota_flag = XFS_QMOPT_RES_REGBLKS; 174 } 175 176 /* 177 * Allocate and setup the transaction 178 */ 179 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 180 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 181 resblks, resrtextents); 182 /* 183 * Check for running out of space, note: need lock to return 184 */ 185 if (error) { 186 xfs_trans_cancel(tp, 0); 187 return error; 188 } 189 190 xfs_ilock(ip, XFS_ILOCK_EXCL); 191 192 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 193 if (error) 194 goto out_trans_cancel; 195 196 xfs_trans_ijoin(tp, ip, 0); 197 198 /* 199 * From this point onwards we overwrite the imap pointer that the 200 * caller gave to us. 201 */ 202 xfs_bmap_init(&free_list, &firstfsb); 203 nimaps = 1; 204 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 205 XFS_BMAPI_PREALLOC, &firstfsb, 0, 206 imap, &nimaps, &free_list); 207 if (error) 208 goto out_bmap_cancel; 209 210 /* 211 * Complete the transaction 212 */ 213 error = xfs_bmap_finish(&tp, &free_list, &committed); 214 if (error) 215 goto out_bmap_cancel; 216 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 217 if (error) 218 goto out_unlock; 219 220 /* 221 * Copy any maps to caller's array and return any error. 222 */ 223 if (nimaps == 0) { 224 error = -ENOSPC; 225 goto out_unlock; 226 } 227 228 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 229 error = xfs_alert_fsblock_zero(ip, imap); 230 231 out_unlock: 232 xfs_iunlock(ip, XFS_ILOCK_EXCL); 233 return error; 234 235 out_bmap_cancel: 236 xfs_bmap_cancel(&free_list); 237 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 238 out_trans_cancel: 239 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 240 goto out_unlock; 241 } 242 243 /* 244 * If the caller is doing a write at the end of the file, then extend the 245 * allocation out to the file system's write iosize. We clean up any extra 246 * space left over when the file is closed in xfs_inactive(). 247 * 248 * If we find we already have delalloc preallocation beyond EOF, don't do more 249 * preallocation as it it not needed. 250 */ 251 STATIC int 252 xfs_iomap_eof_want_preallocate( 253 xfs_mount_t *mp, 254 xfs_inode_t *ip, 255 xfs_off_t offset, 256 size_t count, 257 xfs_bmbt_irec_t *imap, 258 int nimaps, 259 int *prealloc) 260 { 261 xfs_fileoff_t start_fsb; 262 xfs_filblks_t count_fsb; 263 int n, error, imaps; 264 int found_delalloc = 0; 265 266 *prealloc = 0; 267 if (offset + count <= XFS_ISIZE(ip)) 268 return 0; 269 270 /* 271 * If the file is smaller than the minimum prealloc and we are using 272 * dynamic preallocation, don't do any preallocation at all as it is 273 * likely this is the only write to the file that is going to be done. 274 */ 275 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 276 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)) 277 return 0; 278 279 /* 280 * If there are any real blocks past eof, then don't 281 * do any speculative allocation. 282 */ 283 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 284 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 285 while (count_fsb > 0) { 286 imaps = nimaps; 287 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, 288 0); 289 if (error) 290 return error; 291 for (n = 0; n < imaps; n++) { 292 if ((imap[n].br_startblock != HOLESTARTBLOCK) && 293 (imap[n].br_startblock != DELAYSTARTBLOCK)) 294 return 0; 295 start_fsb += imap[n].br_blockcount; 296 count_fsb -= imap[n].br_blockcount; 297 298 if (imap[n].br_startblock == DELAYSTARTBLOCK) 299 found_delalloc = 1; 300 } 301 } 302 if (!found_delalloc) 303 *prealloc = 1; 304 return 0; 305 } 306 307 /* 308 * Determine the initial size of the preallocation. We are beyond the current 309 * EOF here, but we need to take into account whether this is a sparse write or 310 * an extending write when determining the preallocation size. Hence we need to 311 * look up the extent that ends at the current write offset and use the result 312 * to determine the preallocation size. 313 * 314 * If the extent is a hole, then preallocation is essentially disabled. 315 * Otherwise we take the size of the preceeding data extent as the basis for the 316 * preallocation size. If the size of the extent is greater than half the 317 * maximum extent length, then use the current offset as the basis. This ensures 318 * that for large files the preallocation size always extends to MAXEXTLEN 319 * rather than falling short due to things like stripe unit/width alignment of 320 * real extents. 321 */ 322 STATIC xfs_fsblock_t 323 xfs_iomap_eof_prealloc_initial_size( 324 struct xfs_mount *mp, 325 struct xfs_inode *ip, 326 xfs_off_t offset, 327 xfs_bmbt_irec_t *imap, 328 int nimaps) 329 { 330 xfs_fileoff_t start_fsb; 331 int imaps = 1; 332 int error; 333 334 ASSERT(nimaps >= imaps); 335 336 /* if we are using a specific prealloc size, return now */ 337 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 338 return 0; 339 340 /* If the file is small, then use the minimum prealloc */ 341 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign)) 342 return 0; 343 344 /* 345 * As we write multiple pages, the offset will always align to the 346 * start of a page and hence point to a hole at EOF. i.e. if the size is 347 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096) 348 * will return FSB 1. Hence if there are blocks in the file, we want to 349 * point to the block prior to the EOF block and not the hole that maps 350 * directly at @offset. 351 */ 352 start_fsb = XFS_B_TO_FSB(mp, offset); 353 if (start_fsb) 354 start_fsb--; 355 error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE); 356 if (error) 357 return 0; 358 359 ASSERT(imaps == 1); 360 if (imap[0].br_startblock == HOLESTARTBLOCK) 361 return 0; 362 if (imap[0].br_blockcount <= (MAXEXTLEN >> 1)) 363 return imap[0].br_blockcount << 1; 364 return XFS_B_TO_FSB(mp, offset); 365 } 366 367 STATIC bool 368 xfs_quota_need_throttle( 369 struct xfs_inode *ip, 370 int type, 371 xfs_fsblock_t alloc_blocks) 372 { 373 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 374 375 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 376 return false; 377 378 /* no hi watermark, no throttle */ 379 if (!dq->q_prealloc_hi_wmark) 380 return false; 381 382 /* under the lo watermark, no throttle */ 383 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 384 return false; 385 386 return true; 387 } 388 389 STATIC void 390 xfs_quota_calc_throttle( 391 struct xfs_inode *ip, 392 int type, 393 xfs_fsblock_t *qblocks, 394 int *qshift, 395 int64_t *qfreesp) 396 { 397 int64_t freesp; 398 int shift = 0; 399 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 400 401 /* no dq, or over hi wmark, squash the prealloc completely */ 402 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 403 *qblocks = 0; 404 *qfreesp = 0; 405 return; 406 } 407 408 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 409 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 410 shift = 2; 411 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 412 shift += 2; 413 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 414 shift += 2; 415 } 416 417 if (freesp < *qfreesp) 418 *qfreesp = freesp; 419 420 /* only overwrite the throttle values if we are more aggressive */ 421 if ((freesp >> shift) < (*qblocks >> *qshift)) { 422 *qblocks = freesp; 423 *qshift = shift; 424 } 425 } 426 427 /* 428 * If we don't have a user specified preallocation size, dynamically increase 429 * the preallocation size as the size of the file grows. Cap the maximum size 430 * at a single extent or less if the filesystem is near full. The closer the 431 * filesystem is to full, the smaller the maximum prealocation. 432 */ 433 STATIC xfs_fsblock_t 434 xfs_iomap_prealloc_size( 435 struct xfs_mount *mp, 436 struct xfs_inode *ip, 437 xfs_off_t offset, 438 struct xfs_bmbt_irec *imap, 439 int nimaps) 440 { 441 xfs_fsblock_t alloc_blocks = 0; 442 int shift = 0; 443 int64_t freesp; 444 xfs_fsblock_t qblocks; 445 int qshift = 0; 446 447 alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset, 448 imap, nimaps); 449 if (!alloc_blocks) 450 goto check_writeio; 451 qblocks = alloc_blocks; 452 453 /* 454 * MAXEXTLEN is not a power of two value but we round the prealloc down 455 * to the nearest power of two value after throttling. To prevent the 456 * round down from unconditionally reducing the maximum supported prealloc 457 * size, we round up first, apply appropriate throttling, round down and 458 * cap the value to MAXEXTLEN. 459 */ 460 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 461 alloc_blocks); 462 463 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 464 freesp = mp->m_sb.sb_fdblocks; 465 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 466 shift = 2; 467 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 468 shift++; 469 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 470 shift++; 471 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 472 shift++; 473 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 474 shift++; 475 } 476 477 /* 478 * Check each quota to cap the prealloc size, provide a shift value to 479 * throttle with and adjust amount of available space. 480 */ 481 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 482 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, 483 &freesp); 484 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 485 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, 486 &freesp); 487 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 488 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, 489 &freesp); 490 491 /* 492 * The final prealloc size is set to the minimum of free space available 493 * in each of the quotas and the overall filesystem. 494 * 495 * The shift throttle value is set to the maximum value as determined by 496 * the global low free space values and per-quota low free space values. 497 */ 498 alloc_blocks = MIN(alloc_blocks, qblocks); 499 shift = MAX(shift, qshift); 500 501 if (shift) 502 alloc_blocks >>= shift; 503 /* 504 * rounddown_pow_of_two() returns an undefined result if we pass in 505 * alloc_blocks = 0. 506 */ 507 if (alloc_blocks) 508 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 509 if (alloc_blocks > MAXEXTLEN) 510 alloc_blocks = MAXEXTLEN; 511 512 /* 513 * If we are still trying to allocate more space than is 514 * available, squash the prealloc hard. This can happen if we 515 * have a large file on a small filesystem and the above 516 * lowspace thresholds are smaller than MAXEXTLEN. 517 */ 518 while (alloc_blocks && alloc_blocks >= freesp) 519 alloc_blocks >>= 4; 520 521 check_writeio: 522 if (alloc_blocks < mp->m_writeio_blocks) 523 alloc_blocks = mp->m_writeio_blocks; 524 525 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 526 mp->m_writeio_blocks); 527 528 return alloc_blocks; 529 } 530 531 int 532 xfs_iomap_write_delay( 533 xfs_inode_t *ip, 534 xfs_off_t offset, 535 size_t count, 536 xfs_bmbt_irec_t *ret_imap) 537 { 538 xfs_mount_t *mp = ip->i_mount; 539 xfs_fileoff_t offset_fsb; 540 xfs_fileoff_t last_fsb; 541 xfs_off_t aligned_offset; 542 xfs_fileoff_t ioalign; 543 xfs_extlen_t extsz; 544 int nimaps; 545 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 546 int prealloc; 547 int error; 548 549 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 550 551 /* 552 * Make sure that the dquots are there. This doesn't hold 553 * the ilock across a disk read. 554 */ 555 error = xfs_qm_dqattach_locked(ip, 0); 556 if (error) 557 return error; 558 559 extsz = xfs_get_extsz_hint(ip); 560 offset_fsb = XFS_B_TO_FSBT(mp, offset); 561 562 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 563 imap, XFS_WRITE_IMAPS, &prealloc); 564 if (error) 565 return error; 566 567 retry: 568 if (prealloc) { 569 xfs_fsblock_t alloc_blocks; 570 571 alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap, 572 XFS_WRITE_IMAPS); 573 574 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 575 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 576 last_fsb = ioalign + alloc_blocks; 577 } else { 578 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 579 } 580 581 if (prealloc || extsz) { 582 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 583 if (error) 584 return error; 585 } 586 587 /* 588 * Make sure preallocation does not create extents beyond the range we 589 * actually support in this filesystem. 590 */ 591 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)) 592 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 593 594 ASSERT(last_fsb > offset_fsb); 595 596 nimaps = XFS_WRITE_IMAPS; 597 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, 598 imap, &nimaps, XFS_BMAPI_ENTIRE); 599 switch (error) { 600 case 0: 601 case -ENOSPC: 602 case -EDQUOT: 603 break; 604 default: 605 return error; 606 } 607 608 /* 609 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry 610 * without EOF preallocation. 611 */ 612 if (nimaps == 0) { 613 trace_xfs_delalloc_enospc(ip, offset, count); 614 if (prealloc) { 615 prealloc = 0; 616 error = 0; 617 goto retry; 618 } 619 return error ? error : -ENOSPC; 620 } 621 622 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 623 return xfs_alert_fsblock_zero(ip, &imap[0]); 624 625 /* 626 * Tag the inode as speculatively preallocated so we can reclaim this 627 * space on demand, if necessary. 628 */ 629 if (prealloc) 630 xfs_inode_set_eofblocks_tag(ip); 631 632 *ret_imap = imap[0]; 633 return 0; 634 } 635 636 /* 637 * Pass in a delayed allocate extent, convert it to real extents; 638 * return to the caller the extent we create which maps on top of 639 * the originating callers request. 640 * 641 * Called without a lock on the inode. 642 * 643 * We no longer bother to look at the incoming map - all we have to 644 * guarantee is that whatever we allocate fills the required range. 645 */ 646 int 647 xfs_iomap_write_allocate( 648 xfs_inode_t *ip, 649 xfs_off_t offset, 650 xfs_bmbt_irec_t *imap) 651 { 652 xfs_mount_t *mp = ip->i_mount; 653 xfs_fileoff_t offset_fsb, last_block; 654 xfs_fileoff_t end_fsb, map_start_fsb; 655 xfs_fsblock_t first_block; 656 xfs_bmap_free_t free_list; 657 xfs_filblks_t count_fsb; 658 xfs_trans_t *tp; 659 int nimaps, committed; 660 int error = 0; 661 int nres; 662 663 /* 664 * Make sure that the dquots are there. 665 */ 666 error = xfs_qm_dqattach(ip, 0); 667 if (error) 668 return error; 669 670 offset_fsb = XFS_B_TO_FSBT(mp, offset); 671 count_fsb = imap->br_blockcount; 672 map_start_fsb = imap->br_startoff; 673 674 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 675 676 while (count_fsb != 0) { 677 /* 678 * Set up a transaction with which to allocate the 679 * backing store for the file. Do allocations in a 680 * loop until we get some space in the range we are 681 * interested in. The other space that might be allocated 682 * is in the delayed allocation extent on which we sit 683 * but before our buffer starts. 684 */ 685 686 nimaps = 0; 687 while (nimaps == 0) { 688 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 689 tp->t_flags |= XFS_TRANS_RESERVE; 690 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 691 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 692 nres, 0); 693 if (error) { 694 xfs_trans_cancel(tp, 0); 695 return error; 696 } 697 xfs_ilock(ip, XFS_ILOCK_EXCL); 698 xfs_trans_ijoin(tp, ip, 0); 699 700 xfs_bmap_init(&free_list, &first_block); 701 702 /* 703 * it is possible that the extents have changed since 704 * we did the read call as we dropped the ilock for a 705 * while. We have to be careful about truncates or hole 706 * punchs here - we are not allowed to allocate 707 * non-delalloc blocks here. 708 * 709 * The only protection against truncation is the pages 710 * for the range we are being asked to convert are 711 * locked and hence a truncate will block on them 712 * first. 713 * 714 * As a result, if we go beyond the range we really 715 * need and hit an delalloc extent boundary followed by 716 * a hole while we have excess blocks in the map, we 717 * will fill the hole incorrectly and overrun the 718 * transaction reservation. 719 * 720 * Using a single map prevents this as we are forced to 721 * check each map we look for overlap with the desired 722 * range and abort as soon as we find it. Also, given 723 * that we only return a single map, having one beyond 724 * what we can return is probably a bit silly. 725 * 726 * We also need to check that we don't go beyond EOF; 727 * this is a truncate optimisation as a truncate sets 728 * the new file size before block on the pages we 729 * currently have locked under writeback. Because they 730 * are about to be tossed, we don't need to write them 731 * back.... 732 */ 733 nimaps = 1; 734 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 735 error = xfs_bmap_last_offset(ip, &last_block, 736 XFS_DATA_FORK); 737 if (error) 738 goto trans_cancel; 739 740 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 741 if ((map_start_fsb + count_fsb) > last_block) { 742 count_fsb = last_block - map_start_fsb; 743 if (count_fsb == 0) { 744 error = -EAGAIN; 745 goto trans_cancel; 746 } 747 } 748 749 /* 750 * From this point onwards we overwrite the imap 751 * pointer that the caller gave to us. 752 */ 753 error = xfs_bmapi_write(tp, ip, map_start_fsb, 754 count_fsb, 0, 755 &first_block, 1, 756 imap, &nimaps, &free_list); 757 if (error) 758 goto trans_cancel; 759 760 error = xfs_bmap_finish(&tp, &free_list, &committed); 761 if (error) 762 goto trans_cancel; 763 764 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 765 if (error) 766 goto error0; 767 768 xfs_iunlock(ip, XFS_ILOCK_EXCL); 769 } 770 771 /* 772 * See if we were able to allocate an extent that 773 * covers at least part of the callers request 774 */ 775 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 776 return xfs_alert_fsblock_zero(ip, imap); 777 778 if ((offset_fsb >= imap->br_startoff) && 779 (offset_fsb < (imap->br_startoff + 780 imap->br_blockcount))) { 781 XFS_STATS_INC(xs_xstrat_quick); 782 return 0; 783 } 784 785 /* 786 * So far we have not mapped the requested part of the 787 * file, just surrounding data, try again. 788 */ 789 count_fsb -= imap->br_blockcount; 790 map_start_fsb = imap->br_startoff + imap->br_blockcount; 791 } 792 793 trans_cancel: 794 xfs_bmap_cancel(&free_list); 795 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 796 error0: 797 xfs_iunlock(ip, XFS_ILOCK_EXCL); 798 return error; 799 } 800 801 int 802 xfs_iomap_write_unwritten( 803 xfs_inode_t *ip, 804 xfs_off_t offset, 805 size_t count) 806 { 807 xfs_mount_t *mp = ip->i_mount; 808 xfs_fileoff_t offset_fsb; 809 xfs_filblks_t count_fsb; 810 xfs_filblks_t numblks_fsb; 811 xfs_fsblock_t firstfsb; 812 int nimaps; 813 xfs_trans_t *tp; 814 xfs_bmbt_irec_t imap; 815 xfs_bmap_free_t free_list; 816 xfs_fsize_t i_size; 817 uint resblks; 818 int committed; 819 int error; 820 821 trace_xfs_unwritten_convert(ip, offset, count); 822 823 offset_fsb = XFS_B_TO_FSBT(mp, offset); 824 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 825 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 826 827 /* 828 * Reserve enough blocks in this transaction for two complete extent 829 * btree splits. We may be converting the middle part of an unwritten 830 * extent and in this case we will insert two new extents in the btree 831 * each of which could cause a full split. 832 * 833 * This reservation amount will be used in the first call to 834 * xfs_bmbt_split() to select an AG with enough space to satisfy the 835 * rest of the operation. 836 */ 837 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 838 839 do { 840 /* 841 * set up a transaction to convert the range of extents 842 * from unwritten to real. Do allocations in a loop until 843 * we have covered the range passed in. 844 * 845 * Note that we open code the transaction allocation here 846 * to pass KM_NOFS--we can't risk to recursing back into 847 * the filesystem here as we might be asked to write out 848 * the same inode that we complete here and might deadlock 849 * on the iolock. 850 */ 851 sb_start_intwrite(mp->m_super); 852 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); 853 tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; 854 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, 855 resblks, 0); 856 if (error) { 857 xfs_trans_cancel(tp, 0); 858 return error; 859 } 860 861 xfs_ilock(ip, XFS_ILOCK_EXCL); 862 xfs_trans_ijoin(tp, ip, 0); 863 864 /* 865 * Modify the unwritten extent state of the buffer. 866 */ 867 xfs_bmap_init(&free_list, &firstfsb); 868 nimaps = 1; 869 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 870 XFS_BMAPI_CONVERT, &firstfsb, 871 1, &imap, &nimaps, &free_list); 872 if (error) 873 goto error_on_bmapi_transaction; 874 875 /* 876 * Log the updated inode size as we go. We have to be careful 877 * to only log it up to the actual write offset if it is 878 * halfway into a block. 879 */ 880 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 881 if (i_size > offset + count) 882 i_size = offset + count; 883 884 i_size = xfs_new_eof(ip, i_size); 885 if (i_size) { 886 ip->i_d.di_size = i_size; 887 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 888 } 889 890 error = xfs_bmap_finish(&tp, &free_list, &committed); 891 if (error) 892 goto error_on_bmapi_transaction; 893 894 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 895 xfs_iunlock(ip, XFS_ILOCK_EXCL); 896 if (error) 897 return error; 898 899 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 900 return xfs_alert_fsblock_zero(ip, &imap); 901 902 if ((numblks_fsb = imap.br_blockcount) == 0) { 903 /* 904 * The numblks_fsb value should always get 905 * smaller, otherwise the loop is stuck. 906 */ 907 ASSERT(imap.br_blockcount); 908 break; 909 } 910 offset_fsb += numblks_fsb; 911 count_fsb -= numblks_fsb; 912 } while (count_fsb > 0); 913 914 return 0; 915 916 error_on_bmapi_transaction: 917 xfs_bmap_cancel(&free_list); 918 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 919 xfs_iunlock(ip, XFS_ILOCK_EXCL); 920 return error; 921 } 922