1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_log.h" 21 #include "xfs_trans.h" 22 #include "xfs_sb.h" 23 #include "xfs_ag.h" 24 #include "xfs_alloc.h" 25 #include "xfs_quota.h" 26 #include "xfs_mount.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_alloc_btree.h" 29 #include "xfs_ialloc_btree.h" 30 #include "xfs_dinode.h" 31 #include "xfs_inode.h" 32 #include "xfs_inode_item.h" 33 #include "xfs_btree.h" 34 #include "xfs_bmap.h" 35 #include "xfs_rtalloc.h" 36 #include "xfs_error.h" 37 #include "xfs_itable.h" 38 #include "xfs_attr.h" 39 #include "xfs_buf_item.h" 40 #include "xfs_trans_space.h" 41 #include "xfs_utils.h" 42 #include "xfs_iomap.h" 43 #include "xfs_trace.h" 44 #include "xfs_icache.h" 45 #include "xfs_dquot_item.h" 46 #include "xfs_dquot.h" 47 48 49 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 50 << mp->m_writeio_log) 51 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 52 53 STATIC int 54 xfs_iomap_eof_align_last_fsb( 55 xfs_mount_t *mp, 56 xfs_inode_t *ip, 57 xfs_extlen_t extsize, 58 xfs_fileoff_t *last_fsb) 59 { 60 xfs_fileoff_t new_last_fsb = 0; 61 xfs_extlen_t align = 0; 62 int eof, error; 63 64 if (!XFS_IS_REALTIME_INODE(ip)) { 65 /* 66 * Round up the allocation request to a stripe unit 67 * (m_dalign) boundary if the file size is >= stripe unit 68 * size, and we are allocating past the allocation eof. 69 * 70 * If mounted with the "-o swalloc" option the alignment is 71 * increased from the strip unit size to the stripe width. 72 */ 73 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) 74 align = mp->m_swidth; 75 else if (mp->m_dalign) 76 align = mp->m_dalign; 77 78 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align)) 79 new_last_fsb = roundup_64(*last_fsb, align); 80 } 81 82 /* 83 * Always round up the allocation request to an extent boundary 84 * (when file on a real-time subvolume or has di_extsize hint). 85 */ 86 if (extsize) { 87 if (new_last_fsb) 88 align = roundup_64(new_last_fsb, extsize); 89 else 90 align = extsize; 91 new_last_fsb = roundup_64(*last_fsb, align); 92 } 93 94 if (new_last_fsb) { 95 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 96 if (error) 97 return error; 98 if (eof) 99 *last_fsb = new_last_fsb; 100 } 101 return 0; 102 } 103 104 STATIC int 105 xfs_alert_fsblock_zero( 106 xfs_inode_t *ip, 107 xfs_bmbt_irec_t *imap) 108 { 109 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 110 "Access to block zero in inode %llu " 111 "start_block: %llx start_off: %llx " 112 "blkcnt: %llx extent-state: %x\n", 113 (unsigned long long)ip->i_ino, 114 (unsigned long long)imap->br_startblock, 115 (unsigned long long)imap->br_startoff, 116 (unsigned long long)imap->br_blockcount, 117 imap->br_state); 118 return EFSCORRUPTED; 119 } 120 121 int 122 xfs_iomap_write_direct( 123 xfs_inode_t *ip, 124 xfs_off_t offset, 125 size_t count, 126 xfs_bmbt_irec_t *imap, 127 int nmaps) 128 { 129 xfs_mount_t *mp = ip->i_mount; 130 xfs_fileoff_t offset_fsb; 131 xfs_fileoff_t last_fsb; 132 xfs_filblks_t count_fsb, resaligned; 133 xfs_fsblock_t firstfsb; 134 xfs_extlen_t extsz, temp; 135 int nimaps; 136 int bmapi_flag; 137 int quota_flag; 138 int rt; 139 xfs_trans_t *tp; 140 xfs_bmap_free_t free_list; 141 uint qblocks, resblks, resrtextents; 142 int committed; 143 int error; 144 145 error = xfs_qm_dqattach(ip, 0); 146 if (error) 147 return XFS_ERROR(error); 148 149 rt = XFS_IS_REALTIME_INODE(ip); 150 extsz = xfs_get_extsz_hint(ip); 151 152 offset_fsb = XFS_B_TO_FSBT(mp, offset); 153 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 154 if ((offset + count) > XFS_ISIZE(ip)) { 155 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 156 if (error) 157 return XFS_ERROR(error); 158 } else { 159 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 160 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 161 imap->br_blockcount + 162 imap->br_startoff); 163 } 164 count_fsb = last_fsb - offset_fsb; 165 ASSERT(count_fsb > 0); 166 167 resaligned = count_fsb; 168 if (unlikely(extsz)) { 169 if ((temp = do_mod(offset_fsb, extsz))) 170 resaligned += temp; 171 if ((temp = do_mod(resaligned, extsz))) 172 resaligned += extsz - temp; 173 } 174 175 if (unlikely(rt)) { 176 resrtextents = qblocks = resaligned; 177 resrtextents /= mp->m_sb.sb_rextsize; 178 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 179 quota_flag = XFS_QMOPT_RES_RTBLKS; 180 } else { 181 resrtextents = 0; 182 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 183 quota_flag = XFS_QMOPT_RES_REGBLKS; 184 } 185 186 /* 187 * Allocate and setup the transaction 188 */ 189 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 190 error = xfs_trans_reserve(tp, resblks, 191 XFS_WRITE_LOG_RES(mp), resrtextents, 192 XFS_TRANS_PERM_LOG_RES, 193 XFS_WRITE_LOG_COUNT); 194 /* 195 * Check for running out of space, note: need lock to return 196 */ 197 if (error) { 198 xfs_trans_cancel(tp, 0); 199 return XFS_ERROR(error); 200 } 201 202 xfs_ilock(ip, XFS_ILOCK_EXCL); 203 204 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 205 if (error) 206 goto out_trans_cancel; 207 208 xfs_trans_ijoin(tp, ip, 0); 209 210 bmapi_flag = 0; 211 if (offset < XFS_ISIZE(ip) || extsz) 212 bmapi_flag |= XFS_BMAPI_PREALLOC; 213 214 /* 215 * From this point onwards we overwrite the imap pointer that the 216 * caller gave to us. 217 */ 218 xfs_bmap_init(&free_list, &firstfsb); 219 nimaps = 1; 220 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag, 221 &firstfsb, 0, imap, &nimaps, &free_list); 222 if (error) 223 goto out_bmap_cancel; 224 225 /* 226 * Complete the transaction 227 */ 228 error = xfs_bmap_finish(&tp, &free_list, &committed); 229 if (error) 230 goto out_bmap_cancel; 231 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 232 if (error) 233 goto out_unlock; 234 235 /* 236 * Copy any maps to caller's array and return any error. 237 */ 238 if (nimaps == 0) { 239 error = XFS_ERROR(ENOSPC); 240 goto out_unlock; 241 } 242 243 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 244 error = xfs_alert_fsblock_zero(ip, imap); 245 246 out_unlock: 247 xfs_iunlock(ip, XFS_ILOCK_EXCL); 248 return error; 249 250 out_bmap_cancel: 251 xfs_bmap_cancel(&free_list); 252 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); 253 out_trans_cancel: 254 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 255 goto out_unlock; 256 } 257 258 /* 259 * If the caller is doing a write at the end of the file, then extend the 260 * allocation out to the file system's write iosize. We clean up any extra 261 * space left over when the file is closed in xfs_inactive(). 262 * 263 * If we find we already have delalloc preallocation beyond EOF, don't do more 264 * preallocation as it it not needed. 265 */ 266 STATIC int 267 xfs_iomap_eof_want_preallocate( 268 xfs_mount_t *mp, 269 xfs_inode_t *ip, 270 xfs_off_t offset, 271 size_t count, 272 xfs_bmbt_irec_t *imap, 273 int nimaps, 274 int *prealloc) 275 { 276 xfs_fileoff_t start_fsb; 277 xfs_filblks_t count_fsb; 278 xfs_fsblock_t firstblock; 279 int n, error, imaps; 280 int found_delalloc = 0; 281 282 *prealloc = 0; 283 if (offset + count <= XFS_ISIZE(ip)) 284 return 0; 285 286 /* 287 * If the file is smaller than the minimum prealloc and we are using 288 * dynamic preallocation, don't do any preallocation at all as it is 289 * likely this is the only write to the file that is going to be done. 290 */ 291 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) && 292 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)) 293 return 0; 294 295 /* 296 * If there are any real blocks past eof, then don't 297 * do any speculative allocation. 298 */ 299 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1))); 300 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 301 while (count_fsb > 0) { 302 imaps = nimaps; 303 firstblock = NULLFSBLOCK; 304 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, 305 0); 306 if (error) 307 return error; 308 for (n = 0; n < imaps; n++) { 309 if ((imap[n].br_startblock != HOLESTARTBLOCK) && 310 (imap[n].br_startblock != DELAYSTARTBLOCK)) 311 return 0; 312 start_fsb += imap[n].br_blockcount; 313 count_fsb -= imap[n].br_blockcount; 314 315 if (imap[n].br_startblock == DELAYSTARTBLOCK) 316 found_delalloc = 1; 317 } 318 } 319 if (!found_delalloc) 320 *prealloc = 1; 321 return 0; 322 } 323 324 /* 325 * Determine the initial size of the preallocation. We are beyond the current 326 * EOF here, but we need to take into account whether this is a sparse write or 327 * an extending write when determining the preallocation size. Hence we need to 328 * look up the extent that ends at the current write offset and use the result 329 * to determine the preallocation size. 330 * 331 * If the extent is a hole, then preallocation is essentially disabled. 332 * Otherwise we take the size of the preceeding data extent as the basis for the 333 * preallocation size. If the size of the extent is greater than half the 334 * maximum extent length, then use the current offset as the basis. This ensures 335 * that for large files the preallocation size always extends to MAXEXTLEN 336 * rather than falling short due to things like stripe unit/width alignment of 337 * real extents. 338 */ 339 STATIC xfs_fsblock_t 340 xfs_iomap_eof_prealloc_initial_size( 341 struct xfs_mount *mp, 342 struct xfs_inode *ip, 343 xfs_off_t offset, 344 xfs_bmbt_irec_t *imap, 345 int nimaps) 346 { 347 xfs_fileoff_t start_fsb; 348 int imaps = 1; 349 int error; 350 351 ASSERT(nimaps >= imaps); 352 353 /* if we are using a specific prealloc size, return now */ 354 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 355 return 0; 356 357 /* If the file is small, then use the minimum prealloc */ 358 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign)) 359 return 0; 360 361 /* 362 * As we write multiple pages, the offset will always align to the 363 * start of a page and hence point to a hole at EOF. i.e. if the size is 364 * 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096) 365 * will return FSB 1. Hence if there are blocks in the file, we want to 366 * point to the block prior to the EOF block and not the hole that maps 367 * directly at @offset. 368 */ 369 start_fsb = XFS_B_TO_FSB(mp, offset); 370 if (start_fsb) 371 start_fsb--; 372 error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE); 373 if (error) 374 return 0; 375 376 ASSERT(imaps == 1); 377 if (imap[0].br_startblock == HOLESTARTBLOCK) 378 return 0; 379 if (imap[0].br_blockcount <= (MAXEXTLEN >> 1)) 380 return imap[0].br_blockcount << 1; 381 return XFS_B_TO_FSB(mp, offset); 382 } 383 384 STATIC bool 385 xfs_quota_need_throttle( 386 struct xfs_inode *ip, 387 int type, 388 xfs_fsblock_t alloc_blocks) 389 { 390 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 391 392 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 393 return false; 394 395 /* no hi watermark, no throttle */ 396 if (!dq->q_prealloc_hi_wmark) 397 return false; 398 399 /* under the lo watermark, no throttle */ 400 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) 401 return false; 402 403 return true; 404 } 405 406 STATIC void 407 xfs_quota_calc_throttle( 408 struct xfs_inode *ip, 409 int type, 410 xfs_fsblock_t *qblocks, 411 int *qshift) 412 { 413 int64_t freesp; 414 int shift = 0; 415 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 416 417 /* over hi wmark, squash the prealloc completely */ 418 if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 419 *qblocks = 0; 420 return; 421 } 422 423 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; 424 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { 425 shift = 2; 426 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) 427 shift += 2; 428 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 429 shift += 2; 430 } 431 432 /* only overwrite the throttle values if we are more aggressive */ 433 if ((freesp >> shift) < (*qblocks >> *qshift)) { 434 *qblocks = freesp; 435 *qshift = shift; 436 } 437 } 438 439 /* 440 * If we don't have a user specified preallocation size, dynamically increase 441 * the preallocation size as the size of the file grows. Cap the maximum size 442 * at a single extent or less if the filesystem is near full. The closer the 443 * filesystem is to full, the smaller the maximum prealocation. 444 */ 445 STATIC xfs_fsblock_t 446 xfs_iomap_prealloc_size( 447 struct xfs_mount *mp, 448 struct xfs_inode *ip, 449 xfs_off_t offset, 450 struct xfs_bmbt_irec *imap, 451 int nimaps) 452 { 453 xfs_fsblock_t alloc_blocks = 0; 454 int shift = 0; 455 int64_t freesp; 456 xfs_fsblock_t qblocks; 457 int qshift = 0; 458 459 alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset, 460 imap, nimaps); 461 if (!alloc_blocks) 462 goto check_writeio; 463 qblocks = alloc_blocks; 464 465 /* 466 * MAXEXTLEN is not a power of two value but we round the prealloc down 467 * to the nearest power of two value after throttling. To prevent the 468 * round down from unconditionally reducing the maximum supported prealloc 469 * size, we round up first, apply appropriate throttling, round down and 470 * cap the value to MAXEXTLEN. 471 */ 472 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 473 alloc_blocks); 474 475 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 476 freesp = mp->m_sb.sb_fdblocks; 477 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 478 shift = 2; 479 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 480 shift++; 481 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) 482 shift++; 483 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) 484 shift++; 485 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) 486 shift++; 487 } 488 489 /* 490 * Check each quota to cap the prealloc size and provide a shift 491 * value to throttle with. 492 */ 493 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 494 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift); 495 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 496 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift); 497 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 498 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift); 499 500 /* 501 * The final prealloc size is set to the minimum of free space available 502 * in each of the quotas and the overall filesystem. 503 * 504 * The shift throttle value is set to the maximum value as determined by 505 * the global low free space values and per-quota low free space values. 506 */ 507 alloc_blocks = MIN(alloc_blocks, qblocks); 508 shift = MAX(shift, qshift); 509 510 if (shift) 511 alloc_blocks >>= shift; 512 /* 513 * rounddown_pow_of_two() returns an undefined result if we pass in 514 * alloc_blocks = 0. 515 */ 516 if (alloc_blocks) 517 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 518 if (alloc_blocks > MAXEXTLEN) 519 alloc_blocks = MAXEXTLEN; 520 521 /* 522 * If we are still trying to allocate more space than is 523 * available, squash the prealloc hard. This can happen if we 524 * have a large file on a small filesystem and the above 525 * lowspace thresholds are smaller than MAXEXTLEN. 526 */ 527 while (alloc_blocks && alloc_blocks >= freesp) 528 alloc_blocks >>= 4; 529 530 check_writeio: 531 if (alloc_blocks < mp->m_writeio_blocks) 532 alloc_blocks = mp->m_writeio_blocks; 533 534 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 535 mp->m_writeio_blocks); 536 537 return alloc_blocks; 538 } 539 540 int 541 xfs_iomap_write_delay( 542 xfs_inode_t *ip, 543 xfs_off_t offset, 544 size_t count, 545 xfs_bmbt_irec_t *ret_imap) 546 { 547 xfs_mount_t *mp = ip->i_mount; 548 xfs_fileoff_t offset_fsb; 549 xfs_fileoff_t last_fsb; 550 xfs_off_t aligned_offset; 551 xfs_fileoff_t ioalign; 552 xfs_extlen_t extsz; 553 int nimaps; 554 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 555 int prealloc; 556 int error; 557 558 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 559 560 /* 561 * Make sure that the dquots are there. This doesn't hold 562 * the ilock across a disk read. 563 */ 564 error = xfs_qm_dqattach_locked(ip, 0); 565 if (error) 566 return XFS_ERROR(error); 567 568 extsz = xfs_get_extsz_hint(ip); 569 offset_fsb = XFS_B_TO_FSBT(mp, offset); 570 571 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 572 imap, XFS_WRITE_IMAPS, &prealloc); 573 if (error) 574 return error; 575 576 retry: 577 if (prealloc) { 578 xfs_fsblock_t alloc_blocks; 579 580 alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap, 581 XFS_WRITE_IMAPS); 582 583 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 584 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 585 last_fsb = ioalign + alloc_blocks; 586 } else { 587 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 588 } 589 590 if (prealloc || extsz) { 591 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 592 if (error) 593 return error; 594 } 595 596 /* 597 * Make sure preallocation does not create extents beyond the range we 598 * actually support in this filesystem. 599 */ 600 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)) 601 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 602 603 ASSERT(last_fsb > offset_fsb); 604 605 nimaps = XFS_WRITE_IMAPS; 606 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, 607 imap, &nimaps, XFS_BMAPI_ENTIRE); 608 switch (error) { 609 case 0: 610 case ENOSPC: 611 case EDQUOT: 612 break; 613 default: 614 return XFS_ERROR(error); 615 } 616 617 /* 618 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry 619 * without EOF preallocation. 620 */ 621 if (nimaps == 0) { 622 trace_xfs_delalloc_enospc(ip, offset, count); 623 if (prealloc) { 624 prealloc = 0; 625 error = 0; 626 goto retry; 627 } 628 return XFS_ERROR(error ? error : ENOSPC); 629 } 630 631 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 632 return xfs_alert_fsblock_zero(ip, &imap[0]); 633 634 /* 635 * Tag the inode as speculatively preallocated so we can reclaim this 636 * space on demand, if necessary. 637 */ 638 if (prealloc) 639 xfs_inode_set_eofblocks_tag(ip); 640 641 *ret_imap = imap[0]; 642 return 0; 643 } 644 645 /* 646 * Pass in a delayed allocate extent, convert it to real extents; 647 * return to the caller the extent we create which maps on top of 648 * the originating callers request. 649 * 650 * Called without a lock on the inode. 651 * 652 * We no longer bother to look at the incoming map - all we have to 653 * guarantee is that whatever we allocate fills the required range. 654 */ 655 int 656 xfs_iomap_write_allocate( 657 xfs_inode_t *ip, 658 xfs_off_t offset, 659 size_t count, 660 xfs_bmbt_irec_t *imap) 661 { 662 xfs_mount_t *mp = ip->i_mount; 663 xfs_fileoff_t offset_fsb, last_block; 664 xfs_fileoff_t end_fsb, map_start_fsb; 665 xfs_fsblock_t first_block; 666 xfs_bmap_free_t free_list; 667 xfs_filblks_t count_fsb; 668 xfs_trans_t *tp; 669 int nimaps, committed; 670 int error = 0; 671 int nres; 672 673 /* 674 * Make sure that the dquots are there. 675 */ 676 error = xfs_qm_dqattach(ip, 0); 677 if (error) 678 return XFS_ERROR(error); 679 680 offset_fsb = XFS_B_TO_FSBT(mp, offset); 681 count_fsb = imap->br_blockcount; 682 map_start_fsb = imap->br_startoff; 683 684 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); 685 686 while (count_fsb != 0) { 687 /* 688 * Set up a transaction with which to allocate the 689 * backing store for the file. Do allocations in a 690 * loop until we get some space in the range we are 691 * interested in. The other space that might be allocated 692 * is in the delayed allocation extent on which we sit 693 * but before our buffer starts. 694 */ 695 696 nimaps = 0; 697 while (nimaps == 0) { 698 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 699 tp->t_flags |= XFS_TRANS_RESERVE; 700 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 701 error = xfs_trans_reserve(tp, nres, 702 XFS_WRITE_LOG_RES(mp), 703 0, XFS_TRANS_PERM_LOG_RES, 704 XFS_WRITE_LOG_COUNT); 705 if (error) { 706 xfs_trans_cancel(tp, 0); 707 return XFS_ERROR(error); 708 } 709 xfs_ilock(ip, XFS_ILOCK_EXCL); 710 xfs_trans_ijoin(tp, ip, 0); 711 712 xfs_bmap_init(&free_list, &first_block); 713 714 /* 715 * it is possible that the extents have changed since 716 * we did the read call as we dropped the ilock for a 717 * while. We have to be careful about truncates or hole 718 * punchs here - we are not allowed to allocate 719 * non-delalloc blocks here. 720 * 721 * The only protection against truncation is the pages 722 * for the range we are being asked to convert are 723 * locked and hence a truncate will block on them 724 * first. 725 * 726 * As a result, if we go beyond the range we really 727 * need and hit an delalloc extent boundary followed by 728 * a hole while we have excess blocks in the map, we 729 * will fill the hole incorrectly and overrun the 730 * transaction reservation. 731 * 732 * Using a single map prevents this as we are forced to 733 * check each map we look for overlap with the desired 734 * range and abort as soon as we find it. Also, given 735 * that we only return a single map, having one beyond 736 * what we can return is probably a bit silly. 737 * 738 * We also need to check that we don't go beyond EOF; 739 * this is a truncate optimisation as a truncate sets 740 * the new file size before block on the pages we 741 * currently have locked under writeback. Because they 742 * are about to be tossed, we don't need to write them 743 * back.... 744 */ 745 nimaps = 1; 746 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 747 error = xfs_bmap_last_offset(NULL, ip, &last_block, 748 XFS_DATA_FORK); 749 if (error) 750 goto trans_cancel; 751 752 last_block = XFS_FILEOFF_MAX(last_block, end_fsb); 753 if ((map_start_fsb + count_fsb) > last_block) { 754 count_fsb = last_block - map_start_fsb; 755 if (count_fsb == 0) { 756 error = EAGAIN; 757 goto trans_cancel; 758 } 759 } 760 761 /* 762 * From this point onwards we overwrite the imap 763 * pointer that the caller gave to us. 764 */ 765 error = xfs_bmapi_write(tp, ip, map_start_fsb, 766 count_fsb, 767 XFS_BMAPI_STACK_SWITCH, 768 &first_block, 1, 769 imap, &nimaps, &free_list); 770 if (error) 771 goto trans_cancel; 772 773 error = xfs_bmap_finish(&tp, &free_list, &committed); 774 if (error) 775 goto trans_cancel; 776 777 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 778 if (error) 779 goto error0; 780 781 xfs_iunlock(ip, XFS_ILOCK_EXCL); 782 } 783 784 /* 785 * See if we were able to allocate an extent that 786 * covers at least part of the callers request 787 */ 788 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 789 return xfs_alert_fsblock_zero(ip, imap); 790 791 if ((offset_fsb >= imap->br_startoff) && 792 (offset_fsb < (imap->br_startoff + 793 imap->br_blockcount))) { 794 XFS_STATS_INC(xs_xstrat_quick); 795 return 0; 796 } 797 798 /* 799 * So far we have not mapped the requested part of the 800 * file, just surrounding data, try again. 801 */ 802 count_fsb -= imap->br_blockcount; 803 map_start_fsb = imap->br_startoff + imap->br_blockcount; 804 } 805 806 trans_cancel: 807 xfs_bmap_cancel(&free_list); 808 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 809 error0: 810 xfs_iunlock(ip, XFS_ILOCK_EXCL); 811 return XFS_ERROR(error); 812 } 813 814 int 815 xfs_iomap_write_unwritten( 816 xfs_inode_t *ip, 817 xfs_off_t offset, 818 size_t count) 819 { 820 xfs_mount_t *mp = ip->i_mount; 821 xfs_fileoff_t offset_fsb; 822 xfs_filblks_t count_fsb; 823 xfs_filblks_t numblks_fsb; 824 xfs_fsblock_t firstfsb; 825 int nimaps; 826 xfs_trans_t *tp; 827 xfs_bmbt_irec_t imap; 828 xfs_bmap_free_t free_list; 829 xfs_fsize_t i_size; 830 uint resblks; 831 int committed; 832 int error; 833 834 trace_xfs_unwritten_convert(ip, offset, count); 835 836 offset_fsb = XFS_B_TO_FSBT(mp, offset); 837 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 838 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 839 840 /* 841 * Reserve enough blocks in this transaction for two complete extent 842 * btree splits. We may be converting the middle part of an unwritten 843 * extent and in this case we will insert two new extents in the btree 844 * each of which could cause a full split. 845 * 846 * This reservation amount will be used in the first call to 847 * xfs_bmbt_split() to select an AG with enough space to satisfy the 848 * rest of the operation. 849 */ 850 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 851 852 do { 853 /* 854 * set up a transaction to convert the range of extents 855 * from unwritten to real. Do allocations in a loop until 856 * we have covered the range passed in. 857 * 858 * Note that we open code the transaction allocation here 859 * to pass KM_NOFS--we can't risk to recursing back into 860 * the filesystem here as we might be asked to write out 861 * the same inode that we complete here and might deadlock 862 * on the iolock. 863 */ 864 sb_start_intwrite(mp->m_super); 865 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); 866 tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; 867 error = xfs_trans_reserve(tp, resblks, 868 XFS_WRITE_LOG_RES(mp), 0, 869 XFS_TRANS_PERM_LOG_RES, 870 XFS_WRITE_LOG_COUNT); 871 if (error) { 872 xfs_trans_cancel(tp, 0); 873 return XFS_ERROR(error); 874 } 875 876 xfs_ilock(ip, XFS_ILOCK_EXCL); 877 xfs_trans_ijoin(tp, ip, 0); 878 879 /* 880 * Modify the unwritten extent state of the buffer. 881 */ 882 xfs_bmap_init(&free_list, &firstfsb); 883 nimaps = 1; 884 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 885 XFS_BMAPI_CONVERT, &firstfsb, 886 1, &imap, &nimaps, &free_list); 887 if (error) 888 goto error_on_bmapi_transaction; 889 890 /* 891 * Log the updated inode size as we go. We have to be careful 892 * to only log it up to the actual write offset if it is 893 * halfway into a block. 894 */ 895 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 896 if (i_size > offset + count) 897 i_size = offset + count; 898 899 i_size = xfs_new_eof(ip, i_size); 900 if (i_size) { 901 ip->i_d.di_size = i_size; 902 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 903 } 904 905 error = xfs_bmap_finish(&tp, &free_list, &committed); 906 if (error) 907 goto error_on_bmapi_transaction; 908 909 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 910 xfs_iunlock(ip, XFS_ILOCK_EXCL); 911 if (error) 912 return XFS_ERROR(error); 913 914 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 915 return xfs_alert_fsblock_zero(ip, &imap); 916 917 if ((numblks_fsb = imap.br_blockcount) == 0) { 918 /* 919 * The numblks_fsb value should always get 920 * smaller, otherwise the loop is stuck. 921 */ 922 ASSERT(imap.br_blockcount); 923 break; 924 } 925 offset_fsb += numblks_fsb; 926 count_fsb -= numblks_fsb; 927 } while (count_fsb > 0); 928 929 return 0; 930 931 error_on_bmapi_transaction: 932 xfs_bmap_cancel(&free_list); 933 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 934 xfs_iunlock(ip, XFS_ILOCK_EXCL); 935 return XFS_ERROR(error); 936 } 937