1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2016-2023 Christoph Hellwig. 5 */ 6 #include <linux/iomap.h> 7 #include <linux/buffer_head.h> 8 #include <linux/writeback.h> 9 #include <linux/swap.h> 10 #include <linux/migrate.h> 11 #include "internal.h" 12 #include "trace.h" 13 14 #include "../internal.h" 15 16 /* 17 * Structure allocated for each folio to track per-block uptodate, dirty state 18 * and I/O completions. 19 */ 20 struct iomap_folio_state { 21 spinlock_t state_lock; 22 unsigned int read_bytes_pending; 23 atomic_t write_bytes_pending; 24 25 /* 26 * Each block has two bits in this bitmap: 27 * Bits [0..blocks_per_folio) has the uptodate status. 28 * Bits [b_p_f...(2*b_p_f)) has the dirty status. 29 */ 30 unsigned long state[]; 31 }; 32 33 static inline bool ifs_is_fully_uptodate(struct folio *folio, 34 struct iomap_folio_state *ifs) 35 { 36 struct inode *inode = folio->mapping->host; 37 38 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); 39 } 40 41 /* 42 * Find the next uptodate block in the folio. end_blk is inclusive. 43 * If no uptodate block is found, this will return end_blk + 1. 44 */ 45 static unsigned ifs_next_uptodate_block(struct folio *folio, 46 unsigned start_blk, unsigned end_blk) 47 { 48 struct iomap_folio_state *ifs = folio->private; 49 50 return find_next_bit(ifs->state, end_blk + 1, start_blk); 51 } 52 53 /* 54 * Find the next non-uptodate block in the folio. end_blk is inclusive. 55 * If no non-uptodate block is found, this will return end_blk + 1. 56 */ 57 static unsigned ifs_next_nonuptodate_block(struct folio *folio, 58 unsigned start_blk, unsigned end_blk) 59 { 60 struct iomap_folio_state *ifs = folio->private; 61 62 return find_next_zero_bit(ifs->state, end_blk + 1, start_blk); 63 } 64 65 static bool ifs_set_range_uptodate(struct folio *folio, 66 struct iomap_folio_state *ifs, size_t off, size_t len) 67 { 68 struct inode *inode = folio->mapping->host; 69 unsigned int first_blk = off >> inode->i_blkbits; 70 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 71 unsigned int nr_blks = last_blk - first_blk + 1; 72 73 bitmap_set(ifs->state, first_blk, nr_blks); 74 return ifs_is_fully_uptodate(folio, ifs); 75 } 76 77 static void iomap_set_range_uptodate(struct folio *folio, size_t off, 78 size_t len) 79 { 80 struct iomap_folio_state *ifs = folio->private; 81 unsigned long flags; 82 bool uptodate = true; 83 84 if (folio_test_uptodate(folio)) 85 return; 86 87 if (ifs) { 88 spin_lock_irqsave(&ifs->state_lock, flags); 89 uptodate = ifs_set_range_uptodate(folio, ifs, off, len); 90 spin_unlock_irqrestore(&ifs->state_lock, flags); 91 } 92 93 if (uptodate) 94 folio_mark_uptodate(folio); 95 } 96 97 /* 98 * Find the next dirty block in the folio. end_blk is inclusive. 99 * If no dirty block is found, this will return end_blk + 1. 100 */ 101 static unsigned ifs_next_dirty_block(struct folio *folio, 102 unsigned start_blk, unsigned end_blk) 103 { 104 struct iomap_folio_state *ifs = folio->private; 105 struct inode *inode = folio->mapping->host; 106 unsigned int blks = i_blocks_per_folio(inode, folio); 107 108 return find_next_bit(ifs->state, blks + end_blk + 1, 109 blks + start_blk) - blks; 110 } 111 112 /* 113 * Find the next clean block in the folio. end_blk is inclusive. 114 * If no clean block is found, this will return end_blk + 1. 115 */ 116 static unsigned ifs_next_clean_block(struct folio *folio, 117 unsigned start_blk, unsigned end_blk) 118 { 119 struct iomap_folio_state *ifs = folio->private; 120 struct inode *inode = folio->mapping->host; 121 unsigned int blks = i_blocks_per_folio(inode, folio); 122 123 return find_next_zero_bit(ifs->state, blks + end_blk + 1, 124 blks + start_blk) - blks; 125 } 126 127 static unsigned ifs_find_dirty_range(struct folio *folio, 128 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end) 129 { 130 struct inode *inode = folio->mapping->host; 131 unsigned start_blk = 132 offset_in_folio(folio, *range_start) >> inode->i_blkbits; 133 unsigned end_blk = min_not_zero( 134 offset_in_folio(folio, range_end) >> inode->i_blkbits, 135 i_blocks_per_folio(inode, folio)) - 1; 136 unsigned nblks; 137 138 start_blk = ifs_next_dirty_block(folio, start_blk, end_blk); 139 if (start_blk > end_blk) 140 return 0; 141 if (start_blk == end_blk) 142 nblks = 1; 143 else 144 nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) - 145 start_blk; 146 147 *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits); 148 return nblks << inode->i_blkbits; 149 } 150 151 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start, 152 u64 range_end) 153 { 154 struct iomap_folio_state *ifs = folio->private; 155 156 if (*range_start >= range_end) 157 return 0; 158 159 if (ifs) 160 return ifs_find_dirty_range(folio, ifs, range_start, range_end); 161 return range_end - *range_start; 162 } 163 164 static void ifs_clear_range_dirty(struct folio *folio, 165 struct iomap_folio_state *ifs, size_t off, size_t len) 166 { 167 struct inode *inode = folio->mapping->host; 168 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); 169 unsigned int first_blk = (off >> inode->i_blkbits); 170 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 171 unsigned int nr_blks = last_blk - first_blk + 1; 172 unsigned long flags; 173 174 spin_lock_irqsave(&ifs->state_lock, flags); 175 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks); 176 spin_unlock_irqrestore(&ifs->state_lock, flags); 177 } 178 179 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len) 180 { 181 struct iomap_folio_state *ifs = folio->private; 182 183 if (ifs) 184 ifs_clear_range_dirty(folio, ifs, off, len); 185 } 186 187 static void ifs_set_range_dirty(struct folio *folio, 188 struct iomap_folio_state *ifs, size_t off, size_t len) 189 { 190 struct inode *inode = folio->mapping->host; 191 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); 192 unsigned int first_blk = (off >> inode->i_blkbits); 193 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 194 unsigned int nr_blks = last_blk - first_blk + 1; 195 unsigned long flags; 196 197 spin_lock_irqsave(&ifs->state_lock, flags); 198 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks); 199 spin_unlock_irqrestore(&ifs->state_lock, flags); 200 } 201 202 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len) 203 { 204 struct iomap_folio_state *ifs = folio->private; 205 206 if (ifs) 207 ifs_set_range_dirty(folio, ifs, off, len); 208 } 209 210 static struct iomap_folio_state *ifs_alloc(struct inode *inode, 211 struct folio *folio, unsigned int flags) 212 { 213 struct iomap_folio_state *ifs = folio->private; 214 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 215 gfp_t gfp; 216 217 if (ifs || nr_blocks <= 1) 218 return ifs; 219 220 if (flags & IOMAP_NOWAIT) 221 gfp = GFP_NOWAIT; 222 else 223 gfp = GFP_NOFS | __GFP_NOFAIL; 224 225 /* 226 * ifs->state tracks two sets of state flags when the 227 * filesystem block size is smaller than the folio size. 228 * The first state tracks per-block uptodate and the 229 * second tracks per-block dirty state. 230 */ 231 ifs = kzalloc(struct_size(ifs, state, 232 BITS_TO_LONGS(2 * nr_blocks)), gfp); 233 if (!ifs) 234 return ifs; 235 236 spin_lock_init(&ifs->state_lock); 237 if (folio_test_uptodate(folio)) 238 bitmap_set(ifs->state, 0, nr_blocks); 239 if (folio_test_dirty(folio)) 240 bitmap_set(ifs->state, nr_blocks, nr_blocks); 241 folio_attach_private(folio, ifs); 242 243 return ifs; 244 } 245 246 static void ifs_free(struct folio *folio) 247 { 248 struct iomap_folio_state *ifs = folio_detach_private(folio); 249 250 if (!ifs) 251 return; 252 WARN_ON_ONCE(ifs->read_bytes_pending != 0); 253 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); 254 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) != 255 folio_test_uptodate(folio)); 256 kfree(ifs); 257 } 258 259 /* 260 * Calculate how many bytes to truncate based off the number of blocks to 261 * truncate and the end position to start truncating from. 262 */ 263 static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits, 264 unsigned blocks_truncated) 265 { 266 unsigned block_size = 1 << block_bits; 267 unsigned block_offset = end_pos & (block_size - 1); 268 269 if (!block_offset) 270 return blocks_truncated << block_bits; 271 272 return ((blocks_truncated - 1) << block_bits) + block_offset; 273 } 274 275 /* 276 * Calculate the range inside the folio that we actually need to read. 277 */ 278 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 279 loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 280 { 281 struct iomap_folio_state *ifs = folio->private; 282 loff_t orig_pos = *pos; 283 loff_t isize = i_size_read(inode); 284 unsigned block_bits = inode->i_blkbits; 285 unsigned block_size = (1 << block_bits); 286 size_t poff = offset_in_folio(folio, *pos); 287 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 288 size_t orig_plen = plen; 289 unsigned first = poff >> block_bits; 290 unsigned last = (poff + plen - 1) >> block_bits; 291 292 /* 293 * If the block size is smaller than the page size, we need to check the 294 * per-block uptodate status and adjust the offset and length if needed 295 * to avoid reading in already uptodate ranges. 296 */ 297 if (ifs) { 298 unsigned int next, blocks_skipped; 299 300 next = ifs_next_nonuptodate_block(folio, first, last); 301 blocks_skipped = next - first; 302 303 if (blocks_skipped) { 304 unsigned long block_offset = *pos & (block_size - 1); 305 unsigned bytes_skipped = 306 (blocks_skipped << block_bits) - block_offset; 307 308 *pos += bytes_skipped; 309 poff += bytes_skipped; 310 plen -= bytes_skipped; 311 } 312 first = next; 313 314 /* truncate len if we find any trailing uptodate block(s) */ 315 if (++next <= last) { 316 next = ifs_next_uptodate_block(folio, next, last); 317 if (next <= last) { 318 plen -= iomap_bytes_to_truncate(*pos + plen, 319 block_bits, last - next + 1); 320 last = next - 1; 321 } 322 } 323 } 324 325 /* 326 * If the extent spans the block that contains the i_size, we need to 327 * handle both halves separately so that we properly zero data in the 328 * page cache for blocks that are entirely outside of i_size. 329 */ 330 if (orig_pos <= isize && orig_pos + orig_plen > isize) { 331 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 332 333 if (first <= end && last > end) 334 plen -= iomap_bytes_to_truncate(*pos + plen, block_bits, 335 last - end); 336 } 337 338 *offp = poff; 339 *lenp = plen; 340 } 341 342 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 343 loff_t pos) 344 { 345 const struct iomap *srcmap = iomap_iter_srcmap(iter); 346 347 return srcmap->type != IOMAP_MAPPED || 348 (srcmap->flags & IOMAP_F_NEW) || 349 pos >= i_size_read(iter->inode); 350 } 351 352 /** 353 * iomap_read_inline_data - copy inline data into the page cache 354 * @iter: iteration structure 355 * @folio: folio to copy to 356 * 357 * Copy the inline data in @iter into @folio and zero out the rest of the folio. 358 * Only a single IOMAP_INLINE extent is allowed at the end of each file. 359 * Returns zero for success to complete the read, or the usual negative errno. 360 */ 361 static int iomap_read_inline_data(const struct iomap_iter *iter, 362 struct folio *folio) 363 { 364 const struct iomap *iomap = iomap_iter_srcmap(iter); 365 size_t size = i_size_read(iter->inode) - iomap->offset; 366 size_t offset = offset_in_folio(folio, iomap->offset); 367 368 if (WARN_ON_ONCE(!iomap->inline_data)) 369 return -EIO; 370 371 if (folio_test_uptodate(folio)) 372 return 0; 373 374 if (WARN_ON_ONCE(size > iomap->length)) 375 return -EIO; 376 if (offset > 0) 377 ifs_alloc(iter->inode, folio, iter->flags); 378 379 folio_fill_tail(folio, offset, iomap->inline_data, size); 380 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset); 381 return 0; 382 } 383 384 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len, 385 int error) 386 { 387 struct iomap_folio_state *ifs = folio->private; 388 bool uptodate = !error; 389 bool finished = true; 390 391 if (ifs) { 392 unsigned long flags; 393 394 spin_lock_irqsave(&ifs->state_lock, flags); 395 if (!error) 396 uptodate = ifs_set_range_uptodate(folio, ifs, off, len); 397 ifs->read_bytes_pending -= len; 398 finished = !ifs->read_bytes_pending; 399 spin_unlock_irqrestore(&ifs->state_lock, flags); 400 } 401 402 if (finished) 403 folio_end_read(folio, uptodate); 404 } 405 EXPORT_SYMBOL_GPL(iomap_finish_folio_read); 406 407 static void iomap_read_init(struct folio *folio) 408 { 409 struct iomap_folio_state *ifs = folio->private; 410 411 if (ifs) { 412 size_t len = folio_size(folio); 413 414 /* 415 * ifs->read_bytes_pending is used to track how many bytes are 416 * read in asynchronously by the IO helper. We need to track 417 * this so that we can know when the IO helper has finished 418 * reading in all the necessary ranges of the folio and can end 419 * the read. 420 * 421 * Increase ->read_bytes_pending by the folio size to start, and 422 * add a +1 bias. We'll subtract the bias and any uptodate / 423 * zeroed ranges that did not require IO in iomap_read_end() 424 * after we're done processing the folio. 425 * 426 * We do this because otherwise, we would have to increment 427 * ifs->read_bytes_pending every time a range in the folio needs 428 * to be read in, which can get expensive since the spinlock 429 * needs to be held whenever modifying ifs->read_bytes_pending. 430 * 431 * We add the bias to ensure the read has not been ended on the 432 * folio when iomap_read_end() is called, even if the IO helper 433 * has already finished reading in the entire folio. 434 */ 435 spin_lock_irq(&ifs->state_lock); 436 WARN_ON_ONCE(ifs->read_bytes_pending != 0); 437 ifs->read_bytes_pending = len + 1; 438 spin_unlock_irq(&ifs->state_lock); 439 } 440 } 441 442 /* 443 * This ends IO if no bytes were submitted to an IO helper. 444 * 445 * Otherwise, this calibrates ifs->read_bytes_pending to represent only the 446 * submitted bytes (see comment in iomap_read_init()). If all bytes submitted 447 * have already been completed by the IO helper, then this will end the read. 448 * Else the IO helper will end the read after all submitted ranges have been 449 * read. 450 */ 451 static void iomap_read_end(struct folio *folio, size_t bytes_submitted) 452 { 453 struct iomap_folio_state *ifs = folio->private; 454 455 if (ifs) { 456 bool end_read, uptodate; 457 458 spin_lock_irq(&ifs->state_lock); 459 if (!ifs->read_bytes_pending) { 460 WARN_ON_ONCE(bytes_submitted); 461 spin_unlock_irq(&ifs->state_lock); 462 folio_unlock(folio); 463 return; 464 } 465 466 /* 467 * Subtract any bytes that were initially accounted to 468 * read_bytes_pending but skipped for IO. The +1 accounts for 469 * the bias we added in iomap_read_init(). 470 */ 471 ifs->read_bytes_pending -= 472 (folio_size(folio) + 1 - bytes_submitted); 473 474 /* 475 * If !ifs->read_bytes_pending, this means all pending reads by 476 * the IO helper have already completed, which means we need to 477 * end the folio read here. If ifs->read_bytes_pending != 0, 478 * the IO helper will end the folio read. 479 */ 480 end_read = !ifs->read_bytes_pending; 481 if (end_read) 482 uptodate = ifs_is_fully_uptodate(folio, ifs); 483 spin_unlock_irq(&ifs->state_lock); 484 if (end_read) 485 folio_end_read(folio, uptodate); 486 } else if (!bytes_submitted) { 487 /* 488 * If there were no bytes submitted, this means we are 489 * responsible for unlocking the folio here, since no IO helper 490 * has taken ownership of it. If there were bytes submitted, 491 * then the IO helper will end the read via 492 * iomap_finish_folio_read(). 493 */ 494 folio_unlock(folio); 495 } 496 } 497 498 static int iomap_read_folio_iter(struct iomap_iter *iter, 499 struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted) 500 { 501 const struct iomap *iomap = &iter->iomap; 502 loff_t pos = iter->pos; 503 loff_t length = iomap_length(iter); 504 struct folio *folio = ctx->cur_folio; 505 size_t poff, plen; 506 loff_t pos_diff; 507 int ret; 508 509 if (iomap->type == IOMAP_INLINE) { 510 ret = iomap_read_inline_data(iter, folio); 511 if (ret) 512 return ret; 513 return iomap_iter_advance(iter, length); 514 } 515 516 ifs_alloc(iter->inode, folio, iter->flags); 517 518 length = min_t(loff_t, length, 519 folio_size(folio) - offset_in_folio(folio, pos)); 520 while (length) { 521 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, 522 &plen); 523 524 pos_diff = pos - iter->pos; 525 if (WARN_ON_ONCE(pos_diff + plen > length)) 526 return -EIO; 527 528 ret = iomap_iter_advance(iter, pos_diff); 529 if (ret) 530 return ret; 531 532 if (plen == 0) 533 return 0; 534 535 /* zero post-eof blocks as the page may be mapped */ 536 if (iomap_block_needs_zeroing(iter, pos)) { 537 folio_zero_range(folio, poff, plen); 538 iomap_set_range_uptodate(folio, poff, plen); 539 } else { 540 if (!*bytes_submitted) 541 iomap_read_init(folio); 542 ret = ctx->ops->read_folio_range(iter, ctx, plen); 543 if (ret) 544 return ret; 545 *bytes_submitted += plen; 546 } 547 548 ret = iomap_iter_advance(iter, plen); 549 if (ret) 550 return ret; 551 length -= pos_diff + plen; 552 pos = iter->pos; 553 } 554 return 0; 555 } 556 557 void iomap_read_folio(const struct iomap_ops *ops, 558 struct iomap_read_folio_ctx *ctx) 559 { 560 struct folio *folio = ctx->cur_folio; 561 struct iomap_iter iter = { 562 .inode = folio->mapping->host, 563 .pos = folio_pos(folio), 564 .len = folio_size(folio), 565 }; 566 size_t bytes_submitted = 0; 567 int ret; 568 569 trace_iomap_readpage(iter.inode, 1); 570 571 while ((ret = iomap_iter(&iter, ops)) > 0) 572 iter.status = iomap_read_folio_iter(&iter, ctx, 573 &bytes_submitted); 574 575 if (ctx->ops->submit_read) 576 ctx->ops->submit_read(ctx); 577 578 iomap_read_end(folio, bytes_submitted); 579 } 580 EXPORT_SYMBOL_GPL(iomap_read_folio); 581 582 static int iomap_readahead_iter(struct iomap_iter *iter, 583 struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted) 584 { 585 int ret; 586 587 while (iomap_length(iter)) { 588 if (ctx->cur_folio && 589 offset_in_folio(ctx->cur_folio, iter->pos) == 0) { 590 iomap_read_end(ctx->cur_folio, *cur_bytes_submitted); 591 ctx->cur_folio = NULL; 592 } 593 if (!ctx->cur_folio) { 594 ctx->cur_folio = readahead_folio(ctx->rac); 595 if (WARN_ON_ONCE(!ctx->cur_folio)) 596 return -EINVAL; 597 *cur_bytes_submitted = 0; 598 } 599 ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted); 600 if (ret) 601 return ret; 602 } 603 604 return 0; 605 } 606 607 /** 608 * iomap_readahead - Attempt to read pages from a file. 609 * @ops: The operations vector for the filesystem. 610 * @ctx: The ctx used for issuing readahead. 611 * 612 * This function is for filesystems to call to implement their readahead 613 * address_space operation. 614 * 615 * Context: The @ops callbacks may submit I/O (eg to read the addresses of 616 * blocks from disc), and may wait for it. The caller may be trying to 617 * access a different page, and so sleeping excessively should be avoided. 618 * It may allocate memory, but should avoid costly allocations. This 619 * function is called with memalloc_nofs set, so allocations will not cause 620 * the filesystem to be reentered. 621 */ 622 void iomap_readahead(const struct iomap_ops *ops, 623 struct iomap_read_folio_ctx *ctx) 624 { 625 struct readahead_control *rac = ctx->rac; 626 struct iomap_iter iter = { 627 .inode = rac->mapping->host, 628 .pos = readahead_pos(rac), 629 .len = readahead_length(rac), 630 }; 631 size_t cur_bytes_submitted; 632 633 trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 634 635 while (iomap_iter(&iter, ops) > 0) 636 iter.status = iomap_readahead_iter(&iter, ctx, 637 &cur_bytes_submitted); 638 639 if (ctx->ops->submit_read) 640 ctx->ops->submit_read(ctx); 641 642 if (ctx->cur_folio) 643 iomap_read_end(ctx->cur_folio, cur_bytes_submitted); 644 } 645 EXPORT_SYMBOL_GPL(iomap_readahead); 646 647 /* 648 * iomap_is_partially_uptodate checks whether blocks within a folio are 649 * uptodate or not. 650 * 651 * Returns true if all blocks which correspond to the specified part 652 * of the folio are uptodate. 653 */ 654 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 655 { 656 struct iomap_folio_state *ifs = folio->private; 657 struct inode *inode = folio->mapping->host; 658 unsigned first, last; 659 660 if (!ifs) 661 return false; 662 663 /* Caller's range may extend past the end of this folio */ 664 count = min(folio_size(folio) - from, count); 665 666 /* First and last blocks in range within folio */ 667 first = from >> inode->i_blkbits; 668 last = (from + count - 1) >> inode->i_blkbits; 669 670 return ifs_next_nonuptodate_block(folio, first, last) > last; 671 } 672 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 673 674 /** 675 * iomap_get_folio - get a folio reference for writing 676 * @iter: iteration structure 677 * @pos: start offset of write 678 * @len: Suggested size of folio to create. 679 * 680 * Returns a locked reference to the folio at @pos, or an error pointer if the 681 * folio could not be obtained. 682 */ 683 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) 684 { 685 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; 686 687 if (iter->flags & IOMAP_NOWAIT) 688 fgp |= FGP_NOWAIT; 689 if (iter->flags & IOMAP_DONTCACHE) 690 fgp |= FGP_DONTCACHE; 691 fgp |= fgf_set_order(len); 692 693 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 694 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 695 } 696 EXPORT_SYMBOL_GPL(iomap_get_folio); 697 698 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) 699 { 700 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), 701 folio_size(folio)); 702 703 /* 704 * If the folio is dirty, we refuse to release our metadata because 705 * it may be partially dirty. Once we track per-block dirty state, 706 * we can release the metadata if every block is dirty. 707 */ 708 if (folio_test_dirty(folio)) 709 return false; 710 ifs_free(folio); 711 return true; 712 } 713 EXPORT_SYMBOL_GPL(iomap_release_folio); 714 715 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 716 { 717 trace_iomap_invalidate_folio(folio->mapping->host, 718 folio_pos(folio) + offset, len); 719 720 /* 721 * If we're invalidating the entire folio, clear the dirty state 722 * from it and release it to avoid unnecessary buildup of the LRU. 723 */ 724 if (offset == 0 && len == folio_size(folio)) { 725 WARN_ON_ONCE(folio_test_writeback(folio)); 726 folio_cancel_dirty(folio); 727 ifs_free(folio); 728 } 729 } 730 EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 731 732 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio) 733 { 734 struct inode *inode = mapping->host; 735 size_t len = folio_size(folio); 736 737 ifs_alloc(inode, folio, 0); 738 iomap_set_range_dirty(folio, 0, len); 739 return filemap_dirty_folio(mapping, folio); 740 } 741 EXPORT_SYMBOL_GPL(iomap_dirty_folio); 742 743 static void 744 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 745 { 746 loff_t i_size = i_size_read(inode); 747 748 /* 749 * Only truncate newly allocated pages beyoned EOF, even if the 750 * write started inside the existing inode size. 751 */ 752 if (pos + len > i_size) 753 truncate_pagecache_range(inode, max(pos, i_size), 754 pos + len - 1); 755 } 756 757 static int __iomap_write_begin(const struct iomap_iter *iter, 758 const struct iomap_write_ops *write_ops, size_t len, 759 struct folio *folio) 760 { 761 struct iomap_folio_state *ifs; 762 loff_t pos = iter->pos; 763 loff_t block_size = i_blocksize(iter->inode); 764 loff_t block_start = round_down(pos, block_size); 765 loff_t block_end = round_up(pos + len, block_size); 766 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); 767 size_t from = offset_in_folio(folio, pos), to = from + len; 768 size_t poff, plen; 769 770 /* 771 * If the write or zeroing completely overlaps the current folio, then 772 * entire folio will be dirtied so there is no need for 773 * per-block state tracking structures to be attached to this folio. 774 * For the unshare case, we must read in the ondisk contents because we 775 * are not changing pagecache contents. 776 */ 777 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && 778 pos + len >= folio_next_pos(folio)) 779 return 0; 780 781 ifs = ifs_alloc(iter->inode, folio, iter->flags); 782 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) 783 return -EAGAIN; 784 785 if (folio_test_uptodate(folio)) 786 return 0; 787 788 do { 789 iomap_adjust_read_range(iter->inode, folio, &block_start, 790 block_end - block_start, &poff, &plen); 791 if (plen == 0) 792 break; 793 794 /* 795 * If the read range will be entirely overwritten by the write, 796 * we can skip having to zero/read it in. 797 */ 798 if (!(iter->flags & IOMAP_UNSHARE) && from <= poff && 799 to >= poff + plen) 800 continue; 801 802 if (iomap_block_needs_zeroing(iter, block_start)) { 803 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 804 return -EIO; 805 folio_zero_segments(folio, poff, from, to, poff + plen); 806 } else { 807 int status; 808 809 if (iter->flags & IOMAP_NOWAIT) 810 return -EAGAIN; 811 812 if (write_ops && write_ops->read_folio_range) 813 status = write_ops->read_folio_range(iter, 814 folio, block_start, plen); 815 else 816 status = iomap_bio_read_folio_range_sync(iter, 817 folio, block_start, plen); 818 if (status) 819 return status; 820 } 821 iomap_set_range_uptodate(folio, poff, plen); 822 } while ((block_start += plen) < block_end); 823 824 return 0; 825 } 826 827 static struct folio *__iomap_get_folio(struct iomap_iter *iter, 828 const struct iomap_write_ops *write_ops, size_t len) 829 { 830 loff_t pos = iter->pos; 831 832 if (!mapping_large_folio_support(iter->inode->i_mapping)) 833 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 834 835 if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) { 836 struct folio *folio = folio_batch_next(iter->fbatch); 837 838 if (!folio) 839 return NULL; 840 841 /* 842 * The folio mapping generally shouldn't have changed based on 843 * fs locks, but be consistent with filemap lookup and retry 844 * the iter if it does. 845 */ 846 folio_lock(folio); 847 if (unlikely(folio->mapping != iter->inode->i_mapping)) { 848 iter->iomap.flags |= IOMAP_F_STALE; 849 folio_unlock(folio); 850 return NULL; 851 } 852 853 folio_get(folio); 854 folio_wait_stable(folio); 855 return folio; 856 } 857 858 if (write_ops && write_ops->get_folio) 859 return write_ops->get_folio(iter, pos, len); 860 return iomap_get_folio(iter, pos, len); 861 } 862 863 static void __iomap_put_folio(struct iomap_iter *iter, 864 const struct iomap_write_ops *write_ops, size_t ret, 865 struct folio *folio) 866 { 867 loff_t pos = iter->pos; 868 869 if (write_ops && write_ops->put_folio) { 870 write_ops->put_folio(iter->inode, pos, ret, folio); 871 } else { 872 folio_unlock(folio); 873 folio_put(folio); 874 } 875 } 876 877 /* trim pos and bytes to within a given folio */ 878 static loff_t iomap_trim_folio_range(struct iomap_iter *iter, 879 struct folio *folio, size_t *offset, u64 *bytes) 880 { 881 loff_t pos = iter->pos; 882 size_t fsize = folio_size(folio); 883 884 WARN_ON_ONCE(pos < folio_pos(folio)); 885 WARN_ON_ONCE(pos >= folio_pos(folio) + fsize); 886 887 *offset = offset_in_folio(folio, pos); 888 *bytes = min(*bytes, fsize - *offset); 889 890 return pos; 891 } 892 893 static int iomap_write_begin_inline(const struct iomap_iter *iter, 894 struct folio *folio) 895 { 896 /* needs more work for the tailpacking case; disable for now */ 897 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 898 return -EIO; 899 return iomap_read_inline_data(iter, folio); 900 } 901 902 /* 903 * Grab and prepare a folio for write based on iter state. Returns the folio, 904 * offset, and length. Callers can optionally pass a max length *plen, 905 * otherwise init to zero. 906 */ 907 static int iomap_write_begin(struct iomap_iter *iter, 908 const struct iomap_write_ops *write_ops, struct folio **foliop, 909 size_t *poffset, u64 *plen) 910 { 911 const struct iomap *srcmap = iomap_iter_srcmap(iter); 912 loff_t pos; 913 u64 len = min_t(u64, SIZE_MAX, iomap_length(iter)); 914 struct folio *folio; 915 int status = 0; 916 917 len = min_not_zero(len, *plen); 918 *foliop = NULL; 919 *plen = 0; 920 921 if (fatal_signal_pending(current)) 922 return -EINTR; 923 924 folio = __iomap_get_folio(iter, write_ops, len); 925 if (IS_ERR(folio)) 926 return PTR_ERR(folio); 927 928 /* 929 * No folio means we're done with a batch. We still have range to 930 * process so return and let the caller iterate and refill the batch. 931 */ 932 if (!folio) { 933 WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH)); 934 return 0; 935 } 936 937 /* 938 * Now we have a locked folio, before we do anything with it we need to 939 * check that the iomap we have cached is not stale. The inode extent 940 * mapping can change due to concurrent IO in flight (e.g. 941 * IOMAP_UNWRITTEN state can change and memory reclaim could have 942 * reclaimed a previously partially written page at this index after IO 943 * completion before this write reaches this file offset) and hence we 944 * could do the wrong thing here (zero a page range incorrectly or fail 945 * to zero) and corrupt data. 946 */ 947 if (write_ops && write_ops->iomap_valid) { 948 bool iomap_valid = write_ops->iomap_valid(iter->inode, 949 &iter->iomap); 950 if (!iomap_valid) { 951 iter->iomap.flags |= IOMAP_F_STALE; 952 status = 0; 953 goto out_unlock; 954 } 955 } 956 957 /* 958 * The folios in a batch may not be contiguous. If we've skipped 959 * forward, advance the iter to the pos of the current folio. If the 960 * folio starts beyond the end of the mapping, it may have been trimmed 961 * since the lookup for whatever reason. Return a NULL folio to 962 * terminate the op. 963 */ 964 if (folio_pos(folio) > iter->pos) { 965 len = min_t(u64, folio_pos(folio) - iter->pos, 966 iomap_length(iter)); 967 status = iomap_iter_advance(iter, len); 968 len = iomap_length(iter); 969 if (status || !len) 970 goto out_unlock; 971 } 972 973 pos = iomap_trim_folio_range(iter, folio, poffset, &len); 974 975 if (srcmap->type == IOMAP_INLINE) 976 status = iomap_write_begin_inline(iter, folio); 977 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 978 status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 979 else 980 status = __iomap_write_begin(iter, write_ops, len, folio); 981 982 if (unlikely(status)) 983 goto out_unlock; 984 985 *foliop = folio; 986 *plen = len; 987 return 0; 988 989 out_unlock: 990 __iomap_put_folio(iter, write_ops, 0, folio); 991 return status; 992 } 993 994 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 995 size_t copied, struct folio *folio) 996 { 997 flush_dcache_folio(folio); 998 999 /* 1000 * The blocks that were entirely written will now be uptodate, so we 1001 * don't have to worry about a read_folio reading them and overwriting a 1002 * partial write. However, if we've encountered a short write and only 1003 * partially written into a block, it will not be marked uptodate, so a 1004 * read_folio might come in and destroy our partial write. 1005 * 1006 * Do the simplest thing and just treat any short write to a 1007 * non-uptodate page as a zero-length write, and force the caller to 1008 * redo the whole thing. 1009 */ 1010 if (unlikely(copied < len && !folio_test_uptodate(folio))) 1011 return false; 1012 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); 1013 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied); 1014 filemap_dirty_folio(inode->i_mapping, folio); 1015 return true; 1016 } 1017 1018 static bool iomap_write_end_inline(const struct iomap_iter *iter, 1019 struct folio *folio, loff_t pos, size_t copied) 1020 { 1021 const struct iomap *iomap = &iter->iomap; 1022 void *addr; 1023 1024 WARN_ON_ONCE(!folio_test_uptodate(folio)); 1025 BUG_ON(!iomap_inline_data_valid(iomap)); 1026 1027 if (WARN_ON_ONCE(!iomap->inline_data)) 1028 return false; 1029 1030 flush_dcache_folio(folio); 1031 addr = kmap_local_folio(folio, pos); 1032 memcpy(iomap_inline_data(iomap, pos), addr, copied); 1033 kunmap_local(addr); 1034 1035 mark_inode_dirty(iter->inode); 1036 return true; 1037 } 1038 1039 /* 1040 * Returns true if all copied bytes have been written to the pagecache, 1041 * otherwise return false. 1042 */ 1043 static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied, 1044 struct folio *folio) 1045 { 1046 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1047 loff_t pos = iter->pos; 1048 1049 if (srcmap->type == IOMAP_INLINE) 1050 return iomap_write_end_inline(iter, folio, pos, copied); 1051 1052 if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 1053 size_t bh_written; 1054 1055 bh_written = block_write_end(pos, len, copied, folio); 1056 WARN_ON_ONCE(bh_written != copied && bh_written != 0); 1057 return bh_written == copied; 1058 } 1059 1060 return __iomap_write_end(iter->inode, pos, len, copied, folio); 1061 } 1062 1063 static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i, 1064 const struct iomap_write_ops *write_ops) 1065 { 1066 ssize_t total_written = 0; 1067 int status = 0; 1068 struct address_space *mapping = iter->inode->i_mapping; 1069 size_t chunk = mapping_max_folio_size(mapping); 1070 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; 1071 1072 do { 1073 struct folio *folio; 1074 loff_t old_size; 1075 size_t offset; /* Offset into folio */ 1076 u64 bytes; /* Bytes to write to folio */ 1077 size_t copied; /* Bytes copied from user */ 1078 u64 written; /* Bytes have been written */ 1079 loff_t pos; 1080 1081 bytes = iov_iter_count(i); 1082 retry: 1083 offset = iter->pos & (chunk - 1); 1084 bytes = min(chunk - offset, bytes); 1085 status = balance_dirty_pages_ratelimited_flags(mapping, 1086 bdp_flags); 1087 if (unlikely(status)) 1088 break; 1089 1090 if (bytes > iomap_length(iter)) 1091 bytes = iomap_length(iter); 1092 1093 /* 1094 * Bring in the user page that we'll copy from _first_. 1095 * Otherwise there's a nasty deadlock on copying from the 1096 * same page as we're writing to, without it being marked 1097 * up-to-date. 1098 * 1099 * For async buffered writes the assumption is that the user 1100 * page has already been faulted in. This can be optimized by 1101 * faulting the user page. 1102 */ 1103 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 1104 status = -EFAULT; 1105 break; 1106 } 1107 1108 status = iomap_write_begin(iter, write_ops, &folio, &offset, 1109 &bytes); 1110 if (unlikely(status)) { 1111 iomap_write_failed(iter->inode, iter->pos, bytes); 1112 break; 1113 } 1114 if (iter->iomap.flags & IOMAP_F_STALE) 1115 break; 1116 1117 pos = iter->pos; 1118 1119 if (mapping_writably_mapped(mapping)) 1120 flush_dcache_folio(folio); 1121 1122 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); 1123 written = iomap_write_end(iter, bytes, copied, folio) ? 1124 copied : 0; 1125 1126 /* 1127 * Update the in-memory inode size after copying the data into 1128 * the page cache. It's up to the file system to write the 1129 * updated size to disk, preferably after I/O completion so that 1130 * no stale data is exposed. Only once that's done can we 1131 * unlock and release the folio. 1132 */ 1133 old_size = iter->inode->i_size; 1134 if (pos + written > old_size) { 1135 i_size_write(iter->inode, pos + written); 1136 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 1137 } 1138 __iomap_put_folio(iter, write_ops, written, folio); 1139 1140 if (old_size < pos) 1141 pagecache_isize_extended(iter->inode, old_size, pos); 1142 1143 cond_resched(); 1144 if (unlikely(written == 0)) { 1145 /* 1146 * A short copy made iomap_write_end() reject the 1147 * thing entirely. Might be memory poisoning 1148 * halfway through, might be a race with munmap, 1149 * might be severe memory pressure. 1150 */ 1151 iomap_write_failed(iter->inode, pos, bytes); 1152 iov_iter_revert(i, copied); 1153 1154 if (chunk > PAGE_SIZE) 1155 chunk /= 2; 1156 if (copied) { 1157 bytes = copied; 1158 goto retry; 1159 } 1160 } else { 1161 total_written += written; 1162 iomap_iter_advance(iter, written); 1163 } 1164 } while (iov_iter_count(i) && iomap_length(iter)); 1165 1166 return total_written ? 0 : status; 1167 } 1168 1169 ssize_t 1170 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 1171 const struct iomap_ops *ops, 1172 const struct iomap_write_ops *write_ops, void *private) 1173 { 1174 struct iomap_iter iter = { 1175 .inode = iocb->ki_filp->f_mapping->host, 1176 .pos = iocb->ki_pos, 1177 .len = iov_iter_count(i), 1178 .flags = IOMAP_WRITE, 1179 .private = private, 1180 }; 1181 ssize_t ret; 1182 1183 if (iocb->ki_flags & IOCB_NOWAIT) 1184 iter.flags |= IOMAP_NOWAIT; 1185 if (iocb->ki_flags & IOCB_DONTCACHE) 1186 iter.flags |= IOMAP_DONTCACHE; 1187 1188 while ((ret = iomap_iter(&iter, ops)) > 0) 1189 iter.status = iomap_write_iter(&iter, i, write_ops); 1190 1191 if (unlikely(iter.pos == iocb->ki_pos)) 1192 return ret; 1193 ret = iter.pos - iocb->ki_pos; 1194 iocb->ki_pos = iter.pos; 1195 return ret; 1196 } 1197 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 1198 1199 static void iomap_write_delalloc_ifs_punch(struct inode *inode, 1200 struct folio *folio, loff_t start_byte, loff_t end_byte, 1201 struct iomap *iomap, iomap_punch_t punch) 1202 { 1203 unsigned int first_blk, last_blk; 1204 loff_t last_byte; 1205 u8 blkbits = inode->i_blkbits; 1206 struct iomap_folio_state *ifs; 1207 1208 /* 1209 * When we have per-block dirty tracking, there can be 1210 * blocks within a folio which are marked uptodate 1211 * but not dirty. In that case it is necessary to punch 1212 * out such blocks to avoid leaking any delalloc blocks. 1213 */ 1214 ifs = folio->private; 1215 if (!ifs) 1216 return; 1217 1218 last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1); 1219 first_blk = offset_in_folio(folio, start_byte) >> blkbits; 1220 last_blk = offset_in_folio(folio, last_byte) >> blkbits; 1221 while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk)) 1222 <= last_blk) { 1223 punch(inode, folio_pos(folio) + (first_blk << blkbits), 1224 1 << blkbits, iomap); 1225 first_blk++; 1226 } 1227 } 1228 1229 static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, 1230 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 1231 struct iomap *iomap, iomap_punch_t punch) 1232 { 1233 if (!folio_test_dirty(folio)) 1234 return; 1235 1236 /* if dirty, punch up to offset */ 1237 if (start_byte > *punch_start_byte) { 1238 punch(inode, *punch_start_byte, start_byte - *punch_start_byte, 1239 iomap); 1240 } 1241 1242 /* Punch non-dirty blocks within folio */ 1243 iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte, 1244 iomap, punch); 1245 1246 /* 1247 * Make sure the next punch start is correctly bound to 1248 * the end of this data range, not the end of the folio. 1249 */ 1250 *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio)); 1251 } 1252 1253 /* 1254 * Scan the data range passed to us for dirty page cache folios. If we find a 1255 * dirty folio, punch out the preceding range and update the offset from which 1256 * the next punch will start from. 1257 * 1258 * We can punch out storage reservations under clean pages because they either 1259 * contain data that has been written back - in which case the delalloc punch 1260 * over that range is a no-op - or they have been read faults in which case they 1261 * contain zeroes and we can remove the delalloc backing range and any new 1262 * writes to those pages will do the normal hole filling operation... 1263 * 1264 * This makes the logic simple: we only need to keep the delalloc extents only 1265 * over the dirty ranges of the page cache. 1266 * 1267 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 1268 * simplify range iterations. 1269 */ 1270 static void iomap_write_delalloc_scan(struct inode *inode, 1271 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 1272 struct iomap *iomap, iomap_punch_t punch) 1273 { 1274 while (start_byte < end_byte) { 1275 struct folio *folio; 1276 1277 /* grab locked page */ 1278 folio = filemap_lock_folio(inode->i_mapping, 1279 start_byte >> PAGE_SHIFT); 1280 if (IS_ERR(folio)) { 1281 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + 1282 PAGE_SIZE; 1283 continue; 1284 } 1285 1286 iomap_write_delalloc_punch(inode, folio, punch_start_byte, 1287 start_byte, end_byte, iomap, punch); 1288 1289 /* move offset to start of next folio in range */ 1290 start_byte = folio_next_pos(folio); 1291 folio_unlock(folio); 1292 folio_put(folio); 1293 } 1294 } 1295 1296 /* 1297 * When a short write occurs, the filesystem might need to use ->iomap_end 1298 * to remove space reservations created in ->iomap_begin. 1299 * 1300 * For filesystems that use delayed allocation, there can be dirty pages over 1301 * the delalloc extent outside the range of a short write but still within the 1302 * delalloc extent allocated for this iomap if the write raced with page 1303 * faults. 1304 * 1305 * Punch out all the delalloc blocks in the range given except for those that 1306 * have dirty data still pending in the page cache - those are going to be 1307 * written and so must still retain the delalloc backing for writeback. 1308 * 1309 * The punch() callback *must* only punch delalloc extents in the range passed 1310 * to it. It must skip over all other types of extents in the range and leave 1311 * them completely unchanged. It must do this punch atomically with respect to 1312 * other extent modifications. 1313 * 1314 * The punch() callback may be called with a folio locked to prevent writeback 1315 * extent allocation racing at the edge of the range we are currently punching. 1316 * The locked folio may or may not cover the range being punched, so it is not 1317 * safe for the punch() callback to lock folios itself. 1318 * 1319 * Lock order is: 1320 * 1321 * inode->i_rwsem (shared or exclusive) 1322 * inode->i_mapping->invalidate_lock (exclusive) 1323 * folio_lock() 1324 * ->punch 1325 * internal filesystem allocation lock 1326 * 1327 * As we are scanning the page cache for data, we don't need to reimplement the 1328 * wheel - mapping_seek_hole_data() does exactly what we need to identify the 1329 * start and end of data ranges correctly even for sub-folio block sizes. This 1330 * byte range based iteration is especially convenient because it means we 1331 * don't have to care about variable size folios, nor where the start or end of 1332 * the data range lies within a folio, if they lie within the same folio or even 1333 * if there are multiple discontiguous data ranges within the folio. 1334 * 1335 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so 1336 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault 1337 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to 1338 * date. A write page fault can then mark it dirty. If we then fail a write() 1339 * beyond EOF into that up to date cached range, we allocate a delalloc block 1340 * beyond EOF and then have to punch it out. Because the range is up to date, 1341 * mapping_seek_hole_data() will return it, and we will skip the punch because 1342 * the folio is dirty. THis is incorrect - we always need to punch out delalloc 1343 * beyond EOF in this case as writeback will never write back and covert that 1344 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, 1345 * resulting in always punching out the range from the EOF to the end of the 1346 * range the iomap spans. 1347 * 1348 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it 1349 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA 1350 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) 1351 * returns the end of the data range (data_end). Using closed intervals would 1352 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose 1353 * the code to subtle off-by-one bugs.... 1354 */ 1355 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, 1356 loff_t end_byte, unsigned flags, struct iomap *iomap, 1357 iomap_punch_t punch) 1358 { 1359 loff_t punch_start_byte = start_byte; 1360 loff_t scan_end_byte = min(i_size_read(inode), end_byte); 1361 1362 /* 1363 * The caller must hold invalidate_lock to avoid races with page faults 1364 * re-instantiating folios and dirtying them via ->page_mkwrite whilst 1365 * we walk the cache and perform delalloc extent removal. Failing to do 1366 * this can leave dirty pages with no space reservation in the cache. 1367 */ 1368 lockdep_assert_held_write(&inode->i_mapping->invalidate_lock); 1369 1370 while (start_byte < scan_end_byte) { 1371 loff_t data_end; 1372 1373 start_byte = mapping_seek_hole_data(inode->i_mapping, 1374 start_byte, scan_end_byte, SEEK_DATA); 1375 /* 1376 * If there is no more data to scan, all that is left is to 1377 * punch out the remaining range. 1378 * 1379 * Note that mapping_seek_hole_data is only supposed to return 1380 * either an offset or -ENXIO, so WARN on any other error as 1381 * that would be an API change without updating the callers. 1382 */ 1383 if (start_byte == -ENXIO || start_byte == scan_end_byte) 1384 break; 1385 if (WARN_ON_ONCE(start_byte < 0)) 1386 return; 1387 WARN_ON_ONCE(start_byte < punch_start_byte); 1388 WARN_ON_ONCE(start_byte > scan_end_byte); 1389 1390 /* 1391 * We find the end of this contiguous cached data range by 1392 * seeking from start_byte to the beginning of the next hole. 1393 */ 1394 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, 1395 scan_end_byte, SEEK_HOLE); 1396 if (WARN_ON_ONCE(data_end < 0)) 1397 return; 1398 1399 /* 1400 * If we race with post-direct I/O invalidation of the page cache, 1401 * there might be no data left at start_byte. 1402 */ 1403 if (data_end == start_byte) 1404 continue; 1405 1406 WARN_ON_ONCE(data_end < start_byte); 1407 WARN_ON_ONCE(data_end > scan_end_byte); 1408 1409 iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte, 1410 data_end, iomap, punch); 1411 1412 /* The next data search starts at the end of this one. */ 1413 start_byte = data_end; 1414 } 1415 1416 if (punch_start_byte < end_byte) 1417 punch(inode, punch_start_byte, end_byte - punch_start_byte, 1418 iomap); 1419 } 1420 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release); 1421 1422 static int iomap_unshare_iter(struct iomap_iter *iter, 1423 const struct iomap_write_ops *write_ops) 1424 { 1425 struct iomap *iomap = &iter->iomap; 1426 u64 bytes = iomap_length(iter); 1427 int status; 1428 1429 if (!iomap_want_unshare_iter(iter)) 1430 return iomap_iter_advance(iter, bytes); 1431 1432 do { 1433 struct folio *folio; 1434 size_t offset; 1435 bool ret; 1436 1437 bytes = min_t(u64, SIZE_MAX, bytes); 1438 status = iomap_write_begin(iter, write_ops, &folio, &offset, 1439 &bytes); 1440 if (unlikely(status)) 1441 return status; 1442 if (iomap->flags & IOMAP_F_STALE) 1443 break; 1444 1445 ret = iomap_write_end(iter, bytes, bytes, folio); 1446 __iomap_put_folio(iter, write_ops, bytes, folio); 1447 if (WARN_ON_ONCE(!ret)) 1448 return -EIO; 1449 1450 cond_resched(); 1451 1452 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 1453 1454 status = iomap_iter_advance(iter, bytes); 1455 if (status) 1456 break; 1457 } while ((bytes = iomap_length(iter)) > 0); 1458 1459 return status; 1460 } 1461 1462 int 1463 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1464 const struct iomap_ops *ops, 1465 const struct iomap_write_ops *write_ops) 1466 { 1467 struct iomap_iter iter = { 1468 .inode = inode, 1469 .pos = pos, 1470 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 1471 }; 1472 loff_t size = i_size_read(inode); 1473 int ret; 1474 1475 if (pos < 0 || pos >= size) 1476 return 0; 1477 1478 iter.len = min(len, size - pos); 1479 while ((ret = iomap_iter(&iter, ops)) > 0) 1480 iter.status = iomap_unshare_iter(&iter, write_ops); 1481 return ret; 1482 } 1483 EXPORT_SYMBOL_GPL(iomap_file_unshare); 1484 1485 /* 1486 * Flush the remaining range of the iter and mark the current mapping stale. 1487 * This is used when zero range sees an unwritten mapping that may have had 1488 * dirty pagecache over it. 1489 */ 1490 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i) 1491 { 1492 struct address_space *mapping = i->inode->i_mapping; 1493 loff_t end = i->pos + i->len - 1; 1494 1495 i->iomap.flags |= IOMAP_F_STALE; 1496 return filemap_write_and_wait_range(mapping, i->pos, end); 1497 } 1498 1499 static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero, 1500 const struct iomap_write_ops *write_ops) 1501 { 1502 u64 bytes = iomap_length(iter); 1503 int status; 1504 1505 do { 1506 struct folio *folio; 1507 size_t offset; 1508 bool ret; 1509 1510 bytes = min_t(u64, SIZE_MAX, bytes); 1511 status = iomap_write_begin(iter, write_ops, &folio, &offset, 1512 &bytes); 1513 if (status) 1514 return status; 1515 if (iter->iomap.flags & IOMAP_F_STALE) 1516 break; 1517 1518 /* a NULL folio means we're done with a folio batch */ 1519 if (!folio) { 1520 status = iomap_iter_advance_full(iter); 1521 break; 1522 } 1523 1524 /* warn about zeroing folios beyond eof that won't write back */ 1525 WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size); 1526 1527 trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset, 1528 bytes); 1529 1530 folio_zero_range(folio, offset, bytes); 1531 folio_mark_accessed(folio); 1532 1533 ret = iomap_write_end(iter, bytes, bytes, folio); 1534 __iomap_put_folio(iter, write_ops, bytes, folio); 1535 if (WARN_ON_ONCE(!ret)) 1536 return -EIO; 1537 1538 status = iomap_iter_advance(iter, bytes); 1539 if (status) 1540 break; 1541 } while ((bytes = iomap_length(iter)) > 0); 1542 1543 if (did_zero) 1544 *did_zero = true; 1545 return status; 1546 } 1547 1548 /** 1549 * iomap_fill_dirty_folios - fill a folio batch with dirty folios 1550 * @iter: Iteration structure 1551 * @start: Start offset of range. Updated based on lookup progress. 1552 * @end: End offset of range 1553 * @iomap_flags: Flags to set on the associated iomap to track the batch. 1554 * 1555 * Returns the folio count directly. Also returns the associated control flag if 1556 * the the batch lookup is performed and the expected offset of a subsequent 1557 * lookup via out params. The caller is responsible to set the flag on the 1558 * associated iomap. 1559 */ 1560 unsigned int 1561 iomap_fill_dirty_folios( 1562 struct iomap_iter *iter, 1563 loff_t *start, 1564 loff_t end, 1565 unsigned int *iomap_flags) 1566 { 1567 struct address_space *mapping = iter->inode->i_mapping; 1568 pgoff_t pstart = *start >> PAGE_SHIFT; 1569 pgoff_t pend = (end - 1) >> PAGE_SHIFT; 1570 unsigned int count; 1571 1572 if (!iter->fbatch) { 1573 *start = end; 1574 return 0; 1575 } 1576 1577 count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch); 1578 *start = (pstart << PAGE_SHIFT); 1579 *iomap_flags |= IOMAP_F_FOLIO_BATCH; 1580 return count; 1581 } 1582 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios); 1583 1584 int 1585 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1586 const struct iomap_ops *ops, 1587 const struct iomap_write_ops *write_ops, void *private) 1588 { 1589 struct folio_batch fbatch; 1590 struct iomap_iter iter = { 1591 .inode = inode, 1592 .pos = pos, 1593 .len = len, 1594 .flags = IOMAP_ZERO, 1595 .private = private, 1596 .fbatch = &fbatch, 1597 }; 1598 struct address_space *mapping = inode->i_mapping; 1599 int ret; 1600 bool range_dirty; 1601 1602 folio_batch_init(&fbatch); 1603 1604 /* 1605 * To avoid an unconditional flush, check pagecache state and only flush 1606 * if dirty and the fs returns a mapping that might convert on 1607 * writeback. 1608 */ 1609 range_dirty = filemap_range_needs_writeback(mapping, iter.pos, 1610 iter.pos + iter.len - 1); 1611 while ((ret = iomap_iter(&iter, ops)) > 0) { 1612 const struct iomap *srcmap = iomap_iter_srcmap(&iter); 1613 1614 if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) && 1615 srcmap->type != IOMAP_UNWRITTEN)) 1616 return -EIO; 1617 1618 if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) && 1619 (srcmap->type == IOMAP_HOLE || 1620 srcmap->type == IOMAP_UNWRITTEN)) { 1621 s64 status; 1622 1623 if (range_dirty) { 1624 range_dirty = false; 1625 status = iomap_zero_iter_flush_and_stale(&iter); 1626 } else { 1627 status = iomap_iter_advance_full(&iter); 1628 } 1629 iter.status = status; 1630 continue; 1631 } 1632 1633 iter.status = iomap_zero_iter(&iter, did_zero, write_ops); 1634 } 1635 return ret; 1636 } 1637 EXPORT_SYMBOL_GPL(iomap_zero_range); 1638 1639 int 1640 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1641 const struct iomap_ops *ops, 1642 const struct iomap_write_ops *write_ops, void *private) 1643 { 1644 unsigned int blocksize = i_blocksize(inode); 1645 unsigned int off = pos & (blocksize - 1); 1646 1647 /* Block boundary? Nothing to do */ 1648 if (!off) 1649 return 0; 1650 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops, 1651 write_ops, private); 1652 } 1653 EXPORT_SYMBOL_GPL(iomap_truncate_page); 1654 1655 static int iomap_folio_mkwrite_iter(struct iomap_iter *iter, 1656 struct folio *folio) 1657 { 1658 loff_t length = iomap_length(iter); 1659 int ret; 1660 1661 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 1662 ret = __block_write_begin_int(folio, iter->pos, length, NULL, 1663 &iter->iomap); 1664 if (ret) 1665 return ret; 1666 block_commit_write(folio, 0, length); 1667 } else { 1668 WARN_ON_ONCE(!folio_test_uptodate(folio)); 1669 folio_mark_dirty(folio); 1670 } 1671 1672 return iomap_iter_advance(iter, length); 1673 } 1674 1675 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops, 1676 void *private) 1677 { 1678 struct iomap_iter iter = { 1679 .inode = file_inode(vmf->vma->vm_file), 1680 .flags = IOMAP_WRITE | IOMAP_FAULT, 1681 .private = private, 1682 }; 1683 struct folio *folio = page_folio(vmf->page); 1684 ssize_t ret; 1685 1686 folio_lock(folio); 1687 ret = folio_mkwrite_check_truncate(folio, iter.inode); 1688 if (ret < 0) 1689 goto out_unlock; 1690 iter.pos = folio_pos(folio); 1691 iter.len = ret; 1692 while ((ret = iomap_iter(&iter, ops)) > 0) 1693 iter.status = iomap_folio_mkwrite_iter(&iter, folio); 1694 1695 if (ret < 0) 1696 goto out_unlock; 1697 folio_wait_stable(folio); 1698 return VM_FAULT_LOCKED; 1699 out_unlock: 1700 folio_unlock(folio); 1701 return vmf_fs_error(ret); 1702 } 1703 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1704 1705 static void iomap_writeback_init(struct inode *inode, struct folio *folio) 1706 { 1707 struct iomap_folio_state *ifs = folio->private; 1708 1709 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); 1710 if (ifs) { 1711 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0); 1712 /* 1713 * Set this to the folio size. After processing the folio for 1714 * writeback in iomap_writeback_folio(), we'll subtract any 1715 * ranges not written back. 1716 * 1717 * We do this because otherwise, we would have to atomically 1718 * increment ifs->write_bytes_pending every time a range in the 1719 * folio needs to be written back. 1720 */ 1721 atomic_set(&ifs->write_bytes_pending, folio_size(folio)); 1722 } 1723 } 1724 1725 void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 1726 size_t len) 1727 { 1728 struct iomap_folio_state *ifs = folio->private; 1729 1730 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); 1731 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); 1732 1733 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) 1734 folio_end_writeback(folio); 1735 } 1736 EXPORT_SYMBOL_GPL(iomap_finish_folio_write); 1737 1738 static int iomap_writeback_range(struct iomap_writepage_ctx *wpc, 1739 struct folio *folio, u64 pos, u32 rlen, u64 end_pos, 1740 size_t *bytes_submitted) 1741 { 1742 do { 1743 ssize_t ret; 1744 1745 ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos); 1746 if (WARN_ON_ONCE(ret == 0 || ret > rlen)) 1747 return -EIO; 1748 if (ret < 0) 1749 return ret; 1750 rlen -= ret; 1751 pos += ret; 1752 1753 /* 1754 * Holes are not written back by ->writeback_range, so track 1755 * if we did handle anything that is not a hole here. 1756 */ 1757 if (wpc->iomap.type != IOMAP_HOLE) 1758 *bytes_submitted += ret; 1759 } while (rlen); 1760 1761 return 0; 1762 } 1763 1764 /* 1765 * Check interaction of the folio with the file end. 1766 * 1767 * If the folio is entirely beyond i_size, return false. If it straddles 1768 * i_size, adjust end_pos and zero all data beyond i_size. 1769 */ 1770 static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode, 1771 u64 *end_pos) 1772 { 1773 u64 isize = i_size_read(inode); 1774 1775 if (*end_pos > isize) { 1776 size_t poff = offset_in_folio(folio, isize); 1777 pgoff_t end_index = isize >> PAGE_SHIFT; 1778 1779 /* 1780 * If the folio is entirely ouside of i_size, skip it. 1781 * 1782 * This can happen due to a truncate operation that is in 1783 * progress and in that case truncate will finish it off once 1784 * we've dropped the folio lock. 1785 * 1786 * Note that the pgoff_t used for end_index is an unsigned long. 1787 * If the given offset is greater than 16TB on a 32-bit system, 1788 * then if we checked if the folio is fully outside i_size with 1789 * "if (folio->index >= end_index + 1)", "end_index + 1" would 1790 * overflow and evaluate to 0. Hence this folio would be 1791 * redirtied and written out repeatedly, which would result in 1792 * an infinite loop; the user program performing this operation 1793 * would hang. Instead, we can detect this situation by 1794 * checking if the folio is totally beyond i_size or if its 1795 * offset is just equal to the EOF. 1796 */ 1797 if (folio->index > end_index || 1798 (folio->index == end_index && poff == 0)) 1799 return false; 1800 1801 /* 1802 * The folio straddles i_size. 1803 * 1804 * It must be zeroed out on each and every writepage invocation 1805 * because it may be mmapped: 1806 * 1807 * A file is mapped in multiples of the page size. For a 1808 * file that is not a multiple of the page size, the 1809 * remaining memory is zeroed when mapped, and writes to that 1810 * region are not written out to the file. 1811 * 1812 * Also adjust the end_pos to the end of file and skip writeback 1813 * for all blocks entirely beyond i_size. 1814 */ 1815 folio_zero_segment(folio, poff, folio_size(folio)); 1816 *end_pos = isize; 1817 } 1818 1819 return true; 1820 } 1821 1822 int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio) 1823 { 1824 struct iomap_folio_state *ifs = folio->private; 1825 struct inode *inode = wpc->inode; 1826 u64 pos = folio_pos(folio); 1827 u64 end_pos = pos + folio_size(folio); 1828 u64 end_aligned = 0; 1829 size_t bytes_submitted = 0; 1830 int error = 0; 1831 u32 rlen; 1832 1833 WARN_ON_ONCE(!folio_test_locked(folio)); 1834 WARN_ON_ONCE(folio_test_dirty(folio)); 1835 WARN_ON_ONCE(folio_test_writeback(folio)); 1836 1837 trace_iomap_writeback_folio(inode, pos, folio_size(folio)); 1838 1839 if (!iomap_writeback_handle_eof(folio, inode, &end_pos)) 1840 return 0; 1841 WARN_ON_ONCE(end_pos <= pos); 1842 1843 if (i_blocks_per_folio(inode, folio) > 1) { 1844 if (!ifs) { 1845 ifs = ifs_alloc(inode, folio, 0); 1846 iomap_set_range_dirty(folio, 0, end_pos - pos); 1847 } 1848 1849 iomap_writeback_init(inode, folio); 1850 } 1851 1852 /* 1853 * Set the writeback bit ASAP, as the I/O completion for the single 1854 * block per folio case happen hit as soon as we're submitting the bio. 1855 */ 1856 folio_start_writeback(folio); 1857 1858 /* 1859 * Walk through the folio to find dirty areas to write back. 1860 */ 1861 end_aligned = round_up(end_pos, i_blocksize(inode)); 1862 while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) { 1863 error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos, 1864 &bytes_submitted); 1865 if (error) 1866 break; 1867 pos += rlen; 1868 } 1869 1870 if (bytes_submitted) 1871 wpc->nr_folios++; 1872 1873 /* 1874 * We can have dirty bits set past end of file in page_mkwrite path 1875 * while mapping the last partial folio. Hence it's better to clear 1876 * all the dirty bits in the folio here. 1877 */ 1878 iomap_clear_range_dirty(folio, 0, folio_size(folio)); 1879 1880 /* 1881 * Usually the writeback bit is cleared by the I/O completion handler. 1882 * But we may end up either not actually writing any blocks, or (when 1883 * there are multiple blocks in a folio) all I/O might have finished 1884 * already at this point. In that case we need to clear the writeback 1885 * bit ourselves right after unlocking the page. 1886 */ 1887 if (ifs) { 1888 /* 1889 * Subtract any bytes that were initially accounted to 1890 * write_bytes_pending but skipped for writeback. 1891 */ 1892 size_t bytes_not_submitted = folio_size(folio) - 1893 bytes_submitted; 1894 1895 if (bytes_not_submitted) 1896 iomap_finish_folio_write(inode, folio, 1897 bytes_not_submitted); 1898 } else if (!bytes_submitted) { 1899 folio_end_writeback(folio); 1900 } 1901 1902 mapping_set_error(inode->i_mapping, error); 1903 return error; 1904 } 1905 EXPORT_SYMBOL_GPL(iomap_writeback_folio); 1906 1907 int 1908 iomap_writepages(struct iomap_writepage_ctx *wpc) 1909 { 1910 struct address_space *mapping = wpc->inode->i_mapping; 1911 struct folio *folio = NULL; 1912 int error; 1913 1914 /* 1915 * Writeback from reclaim context should never happen except in the case 1916 * of a VM regression so warn about it and refuse to write the data. 1917 */ 1918 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) == 1919 PF_MEMALLOC)) 1920 return -EIO; 1921 1922 while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) { 1923 error = iomap_writeback_folio(wpc, folio); 1924 folio_unlock(folio); 1925 } 1926 1927 /* 1928 * If @error is non-zero, it means that we have a situation where some 1929 * part of the submission process has failed after we've marked pages 1930 * for writeback. 1931 * 1932 * We cannot cancel the writeback directly in that case, so always call 1933 * ->writeback_submit to run the I/O completion handler to clear the 1934 * writeback bit and let the file system proess the errors. 1935 */ 1936 if (wpc->wb_ctx) 1937 return wpc->ops->writeback_submit(wpc, error); 1938 return error; 1939 } 1940 EXPORT_SYMBOL_GPL(iomap_writepages); 1941