1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2016-2019 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/iomap.h> 10 #include <linux/pagemap.h> 11 #include <linux/uio.h> 12 #include <linux/buffer_head.h> 13 #include <linux/dax.h> 14 #include <linux/writeback.h> 15 #include <linux/list_sort.h> 16 #include <linux/swap.h> 17 #include <linux/bio.h> 18 #include <linux/sched/signal.h> 19 #include <linux/migrate.h> 20 #include "trace.h" 21 22 #include "../internal.h" 23 24 #define IOEND_BATCH_SIZE 4096 25 26 /* 27 * Structure allocated for each folio when block size < folio size 28 * to track sub-folio uptodate status and I/O completions. 29 */ 30 struct iomap_page { 31 atomic_t read_bytes_pending; 32 atomic_t write_bytes_pending; 33 spinlock_t uptodate_lock; 34 unsigned long uptodate[]; 35 }; 36 37 static inline struct iomap_page *to_iomap_page(struct folio *folio) 38 { 39 if (folio_test_private(folio)) 40 return folio_get_private(folio); 41 return NULL; 42 } 43 44 static struct bio_set iomap_ioend_bioset; 45 46 static struct iomap_page * 47 iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) 48 { 49 struct iomap_page *iop = to_iomap_page(folio); 50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 51 gfp_t gfp; 52 53 if (iop || nr_blocks <= 1) 54 return iop; 55 56 if (flags & IOMAP_NOWAIT) 57 gfp = GFP_NOWAIT; 58 else 59 gfp = GFP_NOFS | __GFP_NOFAIL; 60 61 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 62 gfp); 63 if (iop) { 64 spin_lock_init(&iop->uptodate_lock); 65 if (folio_test_uptodate(folio)) 66 bitmap_fill(iop->uptodate, nr_blocks); 67 folio_attach_private(folio, iop); 68 } 69 return iop; 70 } 71 72 static void iomap_page_release(struct folio *folio) 73 { 74 struct iomap_page *iop = folio_detach_private(folio); 75 struct inode *inode = folio->mapping->host; 76 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 77 78 if (!iop) 79 return; 80 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 81 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 82 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 83 folio_test_uptodate(folio)); 84 kfree(iop); 85 } 86 87 /* 88 * Calculate the range inside the folio that we actually need to read. 89 */ 90 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 91 loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 92 { 93 struct iomap_page *iop = to_iomap_page(folio); 94 loff_t orig_pos = *pos; 95 loff_t isize = i_size_read(inode); 96 unsigned block_bits = inode->i_blkbits; 97 unsigned block_size = (1 << block_bits); 98 size_t poff = offset_in_folio(folio, *pos); 99 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 100 unsigned first = poff >> block_bits; 101 unsigned last = (poff + plen - 1) >> block_bits; 102 103 /* 104 * If the block size is smaller than the page size, we need to check the 105 * per-block uptodate status and adjust the offset and length if needed 106 * to avoid reading in already uptodate ranges. 107 */ 108 if (iop) { 109 unsigned int i; 110 111 /* move forward for each leading block marked uptodate */ 112 for (i = first; i <= last; i++) { 113 if (!test_bit(i, iop->uptodate)) 114 break; 115 *pos += block_size; 116 poff += block_size; 117 plen -= block_size; 118 first++; 119 } 120 121 /* truncate len if we find any trailing uptodate block(s) */ 122 for ( ; i <= last; i++) { 123 if (test_bit(i, iop->uptodate)) { 124 plen -= (last - i + 1) * block_size; 125 last = i - 1; 126 break; 127 } 128 } 129 } 130 131 /* 132 * If the extent spans the block that contains the i_size, we need to 133 * handle both halves separately so that we properly zero data in the 134 * page cache for blocks that are entirely outside of i_size. 135 */ 136 if (orig_pos <= isize && orig_pos + length > isize) { 137 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 138 139 if (first <= end && last > end) 140 plen -= (last - end) * block_size; 141 } 142 143 *offp = poff; 144 *lenp = plen; 145 } 146 147 static void iomap_iop_set_range_uptodate(struct folio *folio, 148 struct iomap_page *iop, size_t off, size_t len) 149 { 150 struct inode *inode = folio->mapping->host; 151 unsigned first = off >> inode->i_blkbits; 152 unsigned last = (off + len - 1) >> inode->i_blkbits; 153 unsigned long flags; 154 155 spin_lock_irqsave(&iop->uptodate_lock, flags); 156 bitmap_set(iop->uptodate, first, last - first + 1); 157 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 158 folio_mark_uptodate(folio); 159 spin_unlock_irqrestore(&iop->uptodate_lock, flags); 160 } 161 162 static void iomap_set_range_uptodate(struct folio *folio, 163 struct iomap_page *iop, size_t off, size_t len) 164 { 165 if (iop) 166 iomap_iop_set_range_uptodate(folio, iop, off, len); 167 else 168 folio_mark_uptodate(folio); 169 } 170 171 static void iomap_finish_folio_read(struct folio *folio, size_t offset, 172 size_t len, int error) 173 { 174 struct iomap_page *iop = to_iomap_page(folio); 175 176 if (unlikely(error)) { 177 folio_clear_uptodate(folio); 178 folio_set_error(folio); 179 } else { 180 iomap_set_range_uptodate(folio, iop, offset, len); 181 } 182 183 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 184 folio_unlock(folio); 185 } 186 187 static void iomap_read_end_io(struct bio *bio) 188 { 189 int error = blk_status_to_errno(bio->bi_status); 190 struct folio_iter fi; 191 192 bio_for_each_folio_all(fi, bio) 193 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 194 bio_put(bio); 195 } 196 197 struct iomap_readpage_ctx { 198 struct folio *cur_folio; 199 bool cur_folio_in_bio; 200 struct bio *bio; 201 struct readahead_control *rac; 202 }; 203 204 /** 205 * iomap_read_inline_data - copy inline data into the page cache 206 * @iter: iteration structure 207 * @folio: folio to copy to 208 * 209 * Copy the inline data in @iter into @folio and zero out the rest of the folio. 210 * Only a single IOMAP_INLINE extent is allowed at the end of each file. 211 * Returns zero for success to complete the read, or the usual negative errno. 212 */ 213 static int iomap_read_inline_data(const struct iomap_iter *iter, 214 struct folio *folio) 215 { 216 struct iomap_page *iop; 217 const struct iomap *iomap = iomap_iter_srcmap(iter); 218 size_t size = i_size_read(iter->inode) - iomap->offset; 219 size_t poff = offset_in_page(iomap->offset); 220 size_t offset = offset_in_folio(folio, iomap->offset); 221 void *addr; 222 223 if (folio_test_uptodate(folio)) 224 return 0; 225 226 if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 227 return -EIO; 228 if (WARN_ON_ONCE(size > PAGE_SIZE - 229 offset_in_page(iomap->inline_data))) 230 return -EIO; 231 if (WARN_ON_ONCE(size > iomap->length)) 232 return -EIO; 233 if (offset > 0) 234 iop = iomap_page_create(iter->inode, folio, iter->flags); 235 else 236 iop = to_iomap_page(folio); 237 238 addr = kmap_local_folio(folio, offset); 239 memcpy(addr, iomap->inline_data, size); 240 memset(addr + size, 0, PAGE_SIZE - poff - size); 241 kunmap_local(addr); 242 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 243 return 0; 244 } 245 246 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 247 loff_t pos) 248 { 249 const struct iomap *srcmap = iomap_iter_srcmap(iter); 250 251 return srcmap->type != IOMAP_MAPPED || 252 (srcmap->flags & IOMAP_F_NEW) || 253 pos >= i_size_read(iter->inode); 254 } 255 256 static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 257 struct iomap_readpage_ctx *ctx, loff_t offset) 258 { 259 const struct iomap *iomap = &iter->iomap; 260 loff_t pos = iter->pos + offset; 261 loff_t length = iomap_length(iter) - offset; 262 struct folio *folio = ctx->cur_folio; 263 struct iomap_page *iop; 264 loff_t orig_pos = pos; 265 size_t poff, plen; 266 sector_t sector; 267 268 if (iomap->type == IOMAP_INLINE) 269 return iomap_read_inline_data(iter, folio); 270 271 /* zero post-eof blocks as the page may be mapped */ 272 iop = iomap_page_create(iter->inode, folio, iter->flags); 273 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 274 if (plen == 0) 275 goto done; 276 277 if (iomap_block_needs_zeroing(iter, pos)) { 278 folio_zero_range(folio, poff, plen); 279 iomap_set_range_uptodate(folio, iop, poff, plen); 280 goto done; 281 } 282 283 ctx->cur_folio_in_bio = true; 284 if (iop) 285 atomic_add(plen, &iop->read_bytes_pending); 286 287 sector = iomap_sector(iomap, pos); 288 if (!ctx->bio || 289 bio_end_sector(ctx->bio) != sector || 290 !bio_add_folio(ctx->bio, folio, plen, poff)) { 291 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 292 gfp_t orig_gfp = gfp; 293 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 294 295 if (ctx->bio) 296 submit_bio(ctx->bio); 297 298 if (ctx->rac) /* same as readahead_gfp_mask */ 299 gfp |= __GFP_NORETRY | __GFP_NOWARN; 300 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), 301 REQ_OP_READ, gfp); 302 /* 303 * If the bio_alloc fails, try it again for a single page to 304 * avoid having to deal with partial page reads. This emulates 305 * what do_mpage_read_folio does. 306 */ 307 if (!ctx->bio) { 308 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, 309 orig_gfp); 310 } 311 if (ctx->rac) 312 ctx->bio->bi_opf |= REQ_RAHEAD; 313 ctx->bio->bi_iter.bi_sector = sector; 314 ctx->bio->bi_end_io = iomap_read_end_io; 315 bio_add_folio(ctx->bio, folio, plen, poff); 316 } 317 318 done: 319 /* 320 * Move the caller beyond our range so that it keeps making progress. 321 * For that, we have to include any leading non-uptodate ranges, but 322 * we can skip trailing ones as they will be handled in the next 323 * iteration. 324 */ 325 return pos - orig_pos + plen; 326 } 327 328 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) 329 { 330 struct iomap_iter iter = { 331 .inode = folio->mapping->host, 332 .pos = folio_pos(folio), 333 .len = folio_size(folio), 334 }; 335 struct iomap_readpage_ctx ctx = { 336 .cur_folio = folio, 337 }; 338 int ret; 339 340 trace_iomap_readpage(iter.inode, 1); 341 342 while ((ret = iomap_iter(&iter, ops)) > 0) 343 iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 344 345 if (ret < 0) 346 folio_set_error(folio); 347 348 if (ctx.bio) { 349 submit_bio(ctx.bio); 350 WARN_ON_ONCE(!ctx.cur_folio_in_bio); 351 } else { 352 WARN_ON_ONCE(ctx.cur_folio_in_bio); 353 folio_unlock(folio); 354 } 355 356 /* 357 * Just like mpage_readahead and block_read_full_folio, we always 358 * return 0 and just set the folio error flag on errors. This 359 * should be cleaned up throughout the stack eventually. 360 */ 361 return 0; 362 } 363 EXPORT_SYMBOL_GPL(iomap_read_folio); 364 365 static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 366 struct iomap_readpage_ctx *ctx) 367 { 368 loff_t length = iomap_length(iter); 369 loff_t done, ret; 370 371 for (done = 0; done < length; done += ret) { 372 if (ctx->cur_folio && 373 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 374 if (!ctx->cur_folio_in_bio) 375 folio_unlock(ctx->cur_folio); 376 ctx->cur_folio = NULL; 377 } 378 if (!ctx->cur_folio) { 379 ctx->cur_folio = readahead_folio(ctx->rac); 380 ctx->cur_folio_in_bio = false; 381 } 382 ret = iomap_readpage_iter(iter, ctx, done); 383 if (ret <= 0) 384 return ret; 385 } 386 387 return done; 388 } 389 390 /** 391 * iomap_readahead - Attempt to read pages from a file. 392 * @rac: Describes the pages to be read. 393 * @ops: The operations vector for the filesystem. 394 * 395 * This function is for filesystems to call to implement their readahead 396 * address_space operation. 397 * 398 * Context: The @ops callbacks may submit I/O (eg to read the addresses of 399 * blocks from disc), and may wait for it. The caller may be trying to 400 * access a different page, and so sleeping excessively should be avoided. 401 * It may allocate memory, but should avoid costly allocations. This 402 * function is called with memalloc_nofs set, so allocations will not cause 403 * the filesystem to be reentered. 404 */ 405 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 406 { 407 struct iomap_iter iter = { 408 .inode = rac->mapping->host, 409 .pos = readahead_pos(rac), 410 .len = readahead_length(rac), 411 }; 412 struct iomap_readpage_ctx ctx = { 413 .rac = rac, 414 }; 415 416 trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 417 418 while (iomap_iter(&iter, ops) > 0) 419 iter.processed = iomap_readahead_iter(&iter, &ctx); 420 421 if (ctx.bio) 422 submit_bio(ctx.bio); 423 if (ctx.cur_folio) { 424 if (!ctx.cur_folio_in_bio) 425 folio_unlock(ctx.cur_folio); 426 } 427 } 428 EXPORT_SYMBOL_GPL(iomap_readahead); 429 430 /* 431 * iomap_is_partially_uptodate checks whether blocks within a folio are 432 * uptodate or not. 433 * 434 * Returns true if all blocks which correspond to the specified part 435 * of the folio are uptodate. 436 */ 437 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 438 { 439 struct iomap_page *iop = to_iomap_page(folio); 440 struct inode *inode = folio->mapping->host; 441 unsigned first, last, i; 442 443 if (!iop) 444 return false; 445 446 /* Caller's range may extend past the end of this folio */ 447 count = min(folio_size(folio) - from, count); 448 449 /* First and last blocks in range within folio */ 450 first = from >> inode->i_blkbits; 451 last = (from + count - 1) >> inode->i_blkbits; 452 453 for (i = first; i <= last; i++) 454 if (!test_bit(i, iop->uptodate)) 455 return false; 456 return true; 457 } 458 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 459 460 /** 461 * iomap_get_folio - get a folio reference for writing 462 * @iter: iteration structure 463 * @pos: start offset of write 464 * 465 * Returns a locked reference to the folio at @pos, or an error pointer if the 466 * folio could not be obtained. 467 */ 468 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos) 469 { 470 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 471 struct folio *folio; 472 473 if (iter->flags & IOMAP_NOWAIT) 474 fgp |= FGP_NOWAIT; 475 476 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 477 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 478 if (folio) 479 return folio; 480 481 if (iter->flags & IOMAP_NOWAIT) 482 return ERR_PTR(-EAGAIN); 483 return ERR_PTR(-ENOMEM); 484 } 485 EXPORT_SYMBOL_GPL(iomap_get_folio); 486 487 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) 488 { 489 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), 490 folio_size(folio)); 491 492 /* 493 * mm accommodates an old ext3 case where clean folios might 494 * not have had the dirty bit cleared. Thus, it can send actual 495 * dirty folios to ->release_folio() via shrink_active_list(); 496 * skip those here. 497 */ 498 if (folio_test_dirty(folio) || folio_test_writeback(folio)) 499 return false; 500 iomap_page_release(folio); 501 return true; 502 } 503 EXPORT_SYMBOL_GPL(iomap_release_folio); 504 505 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 506 { 507 trace_iomap_invalidate_folio(folio->mapping->host, 508 folio_pos(folio) + offset, len); 509 510 /* 511 * If we're invalidating the entire folio, clear the dirty state 512 * from it and release it to avoid unnecessary buildup of the LRU. 513 */ 514 if (offset == 0 && len == folio_size(folio)) { 515 WARN_ON_ONCE(folio_test_writeback(folio)); 516 folio_cancel_dirty(folio); 517 iomap_page_release(folio); 518 } else if (folio_test_large(folio)) { 519 /* Must release the iop so the page can be split */ 520 WARN_ON_ONCE(!folio_test_uptodate(folio) && 521 folio_test_dirty(folio)); 522 iomap_page_release(folio); 523 } 524 } 525 EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 526 527 static void 528 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 529 { 530 loff_t i_size = i_size_read(inode); 531 532 /* 533 * Only truncate newly allocated pages beyoned EOF, even if the 534 * write started inside the existing inode size. 535 */ 536 if (pos + len > i_size) 537 truncate_pagecache_range(inode, max(pos, i_size), 538 pos + len - 1); 539 } 540 541 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 542 size_t poff, size_t plen, const struct iomap *iomap) 543 { 544 struct bio_vec bvec; 545 struct bio bio; 546 547 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 548 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 549 bio_add_folio(&bio, folio, plen, poff); 550 return submit_bio_wait(&bio); 551 } 552 553 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 554 size_t len, struct folio *folio) 555 { 556 const struct iomap *srcmap = iomap_iter_srcmap(iter); 557 struct iomap_page *iop; 558 loff_t block_size = i_blocksize(iter->inode); 559 loff_t block_start = round_down(pos, block_size); 560 loff_t block_end = round_up(pos + len, block_size); 561 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); 562 size_t from = offset_in_folio(folio, pos), to = from + len; 563 size_t poff, plen; 564 565 if (folio_test_uptodate(folio)) 566 return 0; 567 folio_clear_error(folio); 568 569 iop = iomap_page_create(iter->inode, folio, iter->flags); 570 if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1) 571 return -EAGAIN; 572 573 do { 574 iomap_adjust_read_range(iter->inode, folio, &block_start, 575 block_end - block_start, &poff, &plen); 576 if (plen == 0) 577 break; 578 579 if (!(iter->flags & IOMAP_UNSHARE) && 580 (from <= poff || from >= poff + plen) && 581 (to <= poff || to >= poff + plen)) 582 continue; 583 584 if (iomap_block_needs_zeroing(iter, block_start)) { 585 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 586 return -EIO; 587 folio_zero_segments(folio, poff, from, to, poff + plen); 588 } else { 589 int status; 590 591 if (iter->flags & IOMAP_NOWAIT) 592 return -EAGAIN; 593 594 status = iomap_read_folio_sync(block_start, folio, 595 poff, plen, srcmap); 596 if (status) 597 return status; 598 } 599 iomap_set_range_uptodate(folio, iop, poff, plen); 600 } while ((block_start += plen) < block_end); 601 602 return 0; 603 } 604 605 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, 606 size_t len) 607 { 608 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 609 610 if (folio_ops && folio_ops->get_folio) 611 return folio_ops->get_folio(iter, pos, len); 612 else 613 return iomap_get_folio(iter, pos); 614 } 615 616 static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, 617 struct folio *folio) 618 { 619 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 620 621 if (folio_ops && folio_ops->put_folio) { 622 folio_ops->put_folio(iter->inode, pos, ret, folio); 623 } else { 624 folio_unlock(folio); 625 folio_put(folio); 626 } 627 } 628 629 static int iomap_write_begin_inline(const struct iomap_iter *iter, 630 struct folio *folio) 631 { 632 /* needs more work for the tailpacking case; disable for now */ 633 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 634 return -EIO; 635 return iomap_read_inline_data(iter, folio); 636 } 637 638 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, 639 size_t len, struct folio **foliop) 640 { 641 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 642 const struct iomap *srcmap = iomap_iter_srcmap(iter); 643 struct folio *folio; 644 int status = 0; 645 646 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 647 if (srcmap != &iter->iomap) 648 BUG_ON(pos + len > srcmap->offset + srcmap->length); 649 650 if (fatal_signal_pending(current)) 651 return -EINTR; 652 653 if (!mapping_large_folio_support(iter->inode->i_mapping)) 654 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 655 656 folio = __iomap_get_folio(iter, pos, len); 657 if (IS_ERR(folio)) 658 return PTR_ERR(folio); 659 660 /* 661 * Now we have a locked folio, before we do anything with it we need to 662 * check that the iomap we have cached is not stale. The inode extent 663 * mapping can change due to concurrent IO in flight (e.g. 664 * IOMAP_UNWRITTEN state can change and memory reclaim could have 665 * reclaimed a previously partially written page at this index after IO 666 * completion before this write reaches this file offset) and hence we 667 * could do the wrong thing here (zero a page range incorrectly or fail 668 * to zero) and corrupt data. 669 */ 670 if (folio_ops && folio_ops->iomap_valid) { 671 bool iomap_valid = folio_ops->iomap_valid(iter->inode, 672 &iter->iomap); 673 if (!iomap_valid) { 674 iter->iomap.flags |= IOMAP_F_STALE; 675 status = 0; 676 goto out_unlock; 677 } 678 } 679 680 if (pos + len > folio_pos(folio) + folio_size(folio)) 681 len = folio_pos(folio) + folio_size(folio) - pos; 682 683 if (srcmap->type == IOMAP_INLINE) 684 status = iomap_write_begin_inline(iter, folio); 685 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 686 status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 687 else 688 status = __iomap_write_begin(iter, pos, len, folio); 689 690 if (unlikely(status)) 691 goto out_unlock; 692 693 *foliop = folio; 694 return 0; 695 696 out_unlock: 697 __iomap_put_folio(iter, pos, 0, folio); 698 iomap_write_failed(iter->inode, pos, len); 699 700 return status; 701 } 702 703 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 704 size_t copied, struct folio *folio) 705 { 706 struct iomap_page *iop = to_iomap_page(folio); 707 flush_dcache_folio(folio); 708 709 /* 710 * The blocks that were entirely written will now be uptodate, so we 711 * don't have to worry about a read_folio reading them and overwriting a 712 * partial write. However, if we've encountered a short write and only 713 * partially written into a block, it will not be marked uptodate, so a 714 * read_folio might come in and destroy our partial write. 715 * 716 * Do the simplest thing and just treat any short write to a 717 * non-uptodate page as a zero-length write, and force the caller to 718 * redo the whole thing. 719 */ 720 if (unlikely(copied < len && !folio_test_uptodate(folio))) 721 return 0; 722 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 723 filemap_dirty_folio(inode->i_mapping, folio); 724 return copied; 725 } 726 727 static size_t iomap_write_end_inline(const struct iomap_iter *iter, 728 struct folio *folio, loff_t pos, size_t copied) 729 { 730 const struct iomap *iomap = &iter->iomap; 731 void *addr; 732 733 WARN_ON_ONCE(!folio_test_uptodate(folio)); 734 BUG_ON(!iomap_inline_data_valid(iomap)); 735 736 flush_dcache_folio(folio); 737 addr = kmap_local_folio(folio, pos); 738 memcpy(iomap_inline_data(iomap, pos), addr, copied); 739 kunmap_local(addr); 740 741 mark_inode_dirty(iter->inode); 742 return copied; 743 } 744 745 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 746 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 747 size_t copied, struct folio *folio) 748 { 749 const struct iomap *srcmap = iomap_iter_srcmap(iter); 750 loff_t old_size = iter->inode->i_size; 751 size_t ret; 752 753 if (srcmap->type == IOMAP_INLINE) { 754 ret = iomap_write_end_inline(iter, folio, pos, copied); 755 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 756 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 757 copied, &folio->page, NULL); 758 } else { 759 ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 760 } 761 762 /* 763 * Update the in-memory inode size after copying the data into the page 764 * cache. It's up to the file system to write the updated size to disk, 765 * preferably after I/O completion so that no stale data is exposed. 766 */ 767 if (pos + ret > old_size) { 768 i_size_write(iter->inode, pos + ret); 769 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 770 } 771 __iomap_put_folio(iter, pos, ret, folio); 772 773 if (old_size < pos) 774 pagecache_isize_extended(iter->inode, old_size, pos); 775 if (ret < len) 776 iomap_write_failed(iter->inode, pos + ret, len - ret); 777 return ret; 778 } 779 780 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 781 { 782 loff_t length = iomap_length(iter); 783 loff_t pos = iter->pos; 784 ssize_t written = 0; 785 long status = 0; 786 struct address_space *mapping = iter->inode->i_mapping; 787 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; 788 789 do { 790 struct folio *folio; 791 struct page *page; 792 unsigned long offset; /* Offset into pagecache page */ 793 unsigned long bytes; /* Bytes to write to page */ 794 size_t copied; /* Bytes copied from user */ 795 796 offset = offset_in_page(pos); 797 bytes = min_t(unsigned long, PAGE_SIZE - offset, 798 iov_iter_count(i)); 799 again: 800 status = balance_dirty_pages_ratelimited_flags(mapping, 801 bdp_flags); 802 if (unlikely(status)) 803 break; 804 805 if (bytes > length) 806 bytes = length; 807 808 /* 809 * Bring in the user page that we'll copy from _first_. 810 * Otherwise there's a nasty deadlock on copying from the 811 * same page as we're writing to, without it being marked 812 * up-to-date. 813 * 814 * For async buffered writes the assumption is that the user 815 * page has already been faulted in. This can be optimized by 816 * faulting the user page. 817 */ 818 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 819 status = -EFAULT; 820 break; 821 } 822 823 status = iomap_write_begin(iter, pos, bytes, &folio); 824 if (unlikely(status)) 825 break; 826 if (iter->iomap.flags & IOMAP_F_STALE) 827 break; 828 829 page = folio_file_page(folio, pos >> PAGE_SHIFT); 830 if (mapping_writably_mapped(mapping)) 831 flush_dcache_page(page); 832 833 copied = copy_page_from_iter_atomic(page, offset, bytes, i); 834 835 status = iomap_write_end(iter, pos, bytes, copied, folio); 836 837 if (unlikely(copied != status)) 838 iov_iter_revert(i, copied - status); 839 840 cond_resched(); 841 if (unlikely(status == 0)) { 842 /* 843 * A short copy made iomap_write_end() reject the 844 * thing entirely. Might be memory poisoning 845 * halfway through, might be a race with munmap, 846 * might be severe memory pressure. 847 */ 848 if (copied) 849 bytes = copied; 850 goto again; 851 } 852 pos += status; 853 written += status; 854 length -= status; 855 } while (iov_iter_count(i) && length); 856 857 if (status == -EAGAIN) { 858 iov_iter_revert(i, written); 859 return -EAGAIN; 860 } 861 return written ? written : status; 862 } 863 864 ssize_t 865 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 866 const struct iomap_ops *ops) 867 { 868 struct iomap_iter iter = { 869 .inode = iocb->ki_filp->f_mapping->host, 870 .pos = iocb->ki_pos, 871 .len = iov_iter_count(i), 872 .flags = IOMAP_WRITE, 873 }; 874 int ret; 875 876 if (iocb->ki_flags & IOCB_NOWAIT) 877 iter.flags |= IOMAP_NOWAIT; 878 879 while ((ret = iomap_iter(&iter, ops)) > 0) 880 iter.processed = iomap_write_iter(&iter, i); 881 if (iter.pos == iocb->ki_pos) 882 return ret; 883 return iter.pos - iocb->ki_pos; 884 } 885 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 886 887 /* 888 * Scan the data range passed to us for dirty page cache folios. If we find a 889 * dirty folio, punch out the preceeding range and update the offset from which 890 * the next punch will start from. 891 * 892 * We can punch out storage reservations under clean pages because they either 893 * contain data that has been written back - in which case the delalloc punch 894 * over that range is a no-op - or they have been read faults in which case they 895 * contain zeroes and we can remove the delalloc backing range and any new 896 * writes to those pages will do the normal hole filling operation... 897 * 898 * This makes the logic simple: we only need to keep the delalloc extents only 899 * over the dirty ranges of the page cache. 900 * 901 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 902 * simplify range iterations. 903 */ 904 static int iomap_write_delalloc_scan(struct inode *inode, 905 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 906 int (*punch)(struct inode *inode, loff_t offset, loff_t length)) 907 { 908 while (start_byte < end_byte) { 909 struct folio *folio; 910 911 /* grab locked page */ 912 folio = filemap_lock_folio(inode->i_mapping, 913 start_byte >> PAGE_SHIFT); 914 if (!folio) { 915 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + 916 PAGE_SIZE; 917 continue; 918 } 919 920 /* if dirty, punch up to offset */ 921 if (folio_test_dirty(folio)) { 922 if (start_byte > *punch_start_byte) { 923 int error; 924 925 error = punch(inode, *punch_start_byte, 926 start_byte - *punch_start_byte); 927 if (error) { 928 folio_unlock(folio); 929 folio_put(folio); 930 return error; 931 } 932 } 933 934 /* 935 * Make sure the next punch start is correctly bound to 936 * the end of this data range, not the end of the folio. 937 */ 938 *punch_start_byte = min_t(loff_t, end_byte, 939 folio_next_index(folio) << PAGE_SHIFT); 940 } 941 942 /* move offset to start of next folio in range */ 943 start_byte = folio_next_index(folio) << PAGE_SHIFT; 944 folio_unlock(folio); 945 folio_put(folio); 946 } 947 return 0; 948 } 949 950 /* 951 * Punch out all the delalloc blocks in the range given except for those that 952 * have dirty data still pending in the page cache - those are going to be 953 * written and so must still retain the delalloc backing for writeback. 954 * 955 * As we are scanning the page cache for data, we don't need to reimplement the 956 * wheel - mapping_seek_hole_data() does exactly what we need to identify the 957 * start and end of data ranges correctly even for sub-folio block sizes. This 958 * byte range based iteration is especially convenient because it means we 959 * don't have to care about variable size folios, nor where the start or end of 960 * the data range lies within a folio, if they lie within the same folio or even 961 * if there are multiple discontiguous data ranges within the folio. 962 * 963 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so 964 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault 965 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to 966 * date. A write page fault can then mark it dirty. If we then fail a write() 967 * beyond EOF into that up to date cached range, we allocate a delalloc block 968 * beyond EOF and then have to punch it out. Because the range is up to date, 969 * mapping_seek_hole_data() will return it, and we will skip the punch because 970 * the folio is dirty. THis is incorrect - we always need to punch out delalloc 971 * beyond EOF in this case as writeback will never write back and covert that 972 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, 973 * resulting in always punching out the range from the EOF to the end of the 974 * range the iomap spans. 975 * 976 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it 977 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA 978 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) 979 * returns the end of the data range (data_end). Using closed intervals would 980 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose 981 * the code to subtle off-by-one bugs.... 982 */ 983 static int iomap_write_delalloc_release(struct inode *inode, 984 loff_t start_byte, loff_t end_byte, 985 int (*punch)(struct inode *inode, loff_t pos, loff_t length)) 986 { 987 loff_t punch_start_byte = start_byte; 988 loff_t scan_end_byte = min(i_size_read(inode), end_byte); 989 int error = 0; 990 991 /* 992 * Lock the mapping to avoid races with page faults re-instantiating 993 * folios and dirtying them via ->page_mkwrite whilst we walk the 994 * cache and perform delalloc extent removal. Failing to do this can 995 * leave dirty pages with no space reservation in the cache. 996 */ 997 filemap_invalidate_lock(inode->i_mapping); 998 while (start_byte < scan_end_byte) { 999 loff_t data_end; 1000 1001 start_byte = mapping_seek_hole_data(inode->i_mapping, 1002 start_byte, scan_end_byte, SEEK_DATA); 1003 /* 1004 * If there is no more data to scan, all that is left is to 1005 * punch out the remaining range. 1006 */ 1007 if (start_byte == -ENXIO || start_byte == scan_end_byte) 1008 break; 1009 if (start_byte < 0) { 1010 error = start_byte; 1011 goto out_unlock; 1012 } 1013 WARN_ON_ONCE(start_byte < punch_start_byte); 1014 WARN_ON_ONCE(start_byte > scan_end_byte); 1015 1016 /* 1017 * We find the end of this contiguous cached data range by 1018 * seeking from start_byte to the beginning of the next hole. 1019 */ 1020 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, 1021 scan_end_byte, SEEK_HOLE); 1022 if (data_end < 0) { 1023 error = data_end; 1024 goto out_unlock; 1025 } 1026 WARN_ON_ONCE(data_end <= start_byte); 1027 WARN_ON_ONCE(data_end > scan_end_byte); 1028 1029 error = iomap_write_delalloc_scan(inode, &punch_start_byte, 1030 start_byte, data_end, punch); 1031 if (error) 1032 goto out_unlock; 1033 1034 /* The next data search starts at the end of this one. */ 1035 start_byte = data_end; 1036 } 1037 1038 if (punch_start_byte < end_byte) 1039 error = punch(inode, punch_start_byte, 1040 end_byte - punch_start_byte); 1041 out_unlock: 1042 filemap_invalidate_unlock(inode->i_mapping); 1043 return error; 1044 } 1045 1046 /* 1047 * When a short write occurs, the filesystem may need to remove reserved space 1048 * that was allocated in ->iomap_begin from it's ->iomap_end method. For 1049 * filesystems that use delayed allocation, we need to punch out delalloc 1050 * extents from the range that are not dirty in the page cache. As the write can 1051 * race with page faults, there can be dirty pages over the delalloc extent 1052 * outside the range of a short write but still within the delalloc extent 1053 * allocated for this iomap. 1054 * 1055 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 1056 * simplify range iterations. 1057 * 1058 * The punch() callback *must* only punch delalloc extents in the range passed 1059 * to it. It must skip over all other types of extents in the range and leave 1060 * them completely unchanged. It must do this punch atomically with respect to 1061 * other extent modifications. 1062 * 1063 * The punch() callback may be called with a folio locked to prevent writeback 1064 * extent allocation racing at the edge of the range we are currently punching. 1065 * The locked folio may or may not cover the range being punched, so it is not 1066 * safe for the punch() callback to lock folios itself. 1067 * 1068 * Lock order is: 1069 * 1070 * inode->i_rwsem (shared or exclusive) 1071 * inode->i_mapping->invalidate_lock (exclusive) 1072 * folio_lock() 1073 * ->punch 1074 * internal filesystem allocation lock 1075 */ 1076 int iomap_file_buffered_write_punch_delalloc(struct inode *inode, 1077 struct iomap *iomap, loff_t pos, loff_t length, 1078 ssize_t written, 1079 int (*punch)(struct inode *inode, loff_t pos, loff_t length)) 1080 { 1081 loff_t start_byte; 1082 loff_t end_byte; 1083 int blocksize = i_blocksize(inode); 1084 1085 if (iomap->type != IOMAP_DELALLOC) 1086 return 0; 1087 1088 /* If we didn't reserve the blocks, we're not allowed to punch them. */ 1089 if (!(iomap->flags & IOMAP_F_NEW)) 1090 return 0; 1091 1092 /* 1093 * start_byte refers to the first unused block after a short write. If 1094 * nothing was written, round offset down to point at the first block in 1095 * the range. 1096 */ 1097 if (unlikely(!written)) 1098 start_byte = round_down(pos, blocksize); 1099 else 1100 start_byte = round_up(pos + written, blocksize); 1101 end_byte = round_up(pos + length, blocksize); 1102 1103 /* Nothing to do if we've written the entire delalloc extent */ 1104 if (start_byte >= end_byte) 1105 return 0; 1106 1107 return iomap_write_delalloc_release(inode, start_byte, end_byte, 1108 punch); 1109 } 1110 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc); 1111 1112 static loff_t iomap_unshare_iter(struct iomap_iter *iter) 1113 { 1114 struct iomap *iomap = &iter->iomap; 1115 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1116 loff_t pos = iter->pos; 1117 loff_t length = iomap_length(iter); 1118 long status = 0; 1119 loff_t written = 0; 1120 1121 /* don't bother with blocks that are not shared to start with */ 1122 if (!(iomap->flags & IOMAP_F_SHARED)) 1123 return length; 1124 /* don't bother with holes or unwritten extents */ 1125 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1126 return length; 1127 1128 do { 1129 unsigned long offset = offset_in_page(pos); 1130 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 1131 struct folio *folio; 1132 1133 status = iomap_write_begin(iter, pos, bytes, &folio); 1134 if (unlikely(status)) 1135 return status; 1136 if (iter->iomap.flags & IOMAP_F_STALE) 1137 break; 1138 1139 status = iomap_write_end(iter, pos, bytes, bytes, folio); 1140 if (WARN_ON_ONCE(status == 0)) 1141 return -EIO; 1142 1143 cond_resched(); 1144 1145 pos += status; 1146 written += status; 1147 length -= status; 1148 1149 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 1150 } while (length); 1151 1152 return written; 1153 } 1154 1155 int 1156 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1157 const struct iomap_ops *ops) 1158 { 1159 struct iomap_iter iter = { 1160 .inode = inode, 1161 .pos = pos, 1162 .len = len, 1163 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 1164 }; 1165 int ret; 1166 1167 while ((ret = iomap_iter(&iter, ops)) > 0) 1168 iter.processed = iomap_unshare_iter(&iter); 1169 return ret; 1170 } 1171 EXPORT_SYMBOL_GPL(iomap_file_unshare); 1172 1173 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 1174 { 1175 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1176 loff_t pos = iter->pos; 1177 loff_t length = iomap_length(iter); 1178 loff_t written = 0; 1179 1180 /* already zeroed? we're done. */ 1181 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1182 return length; 1183 1184 do { 1185 struct folio *folio; 1186 int status; 1187 size_t offset; 1188 size_t bytes = min_t(u64, SIZE_MAX, length); 1189 1190 status = iomap_write_begin(iter, pos, bytes, &folio); 1191 if (status) 1192 return status; 1193 if (iter->iomap.flags & IOMAP_F_STALE) 1194 break; 1195 1196 offset = offset_in_folio(folio, pos); 1197 if (bytes > folio_size(folio) - offset) 1198 bytes = folio_size(folio) - offset; 1199 1200 folio_zero_range(folio, offset, bytes); 1201 folio_mark_accessed(folio); 1202 1203 bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 1204 if (WARN_ON_ONCE(bytes == 0)) 1205 return -EIO; 1206 1207 pos += bytes; 1208 length -= bytes; 1209 written += bytes; 1210 } while (length > 0); 1211 1212 if (did_zero) 1213 *did_zero = true; 1214 return written; 1215 } 1216 1217 int 1218 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1219 const struct iomap_ops *ops) 1220 { 1221 struct iomap_iter iter = { 1222 .inode = inode, 1223 .pos = pos, 1224 .len = len, 1225 .flags = IOMAP_ZERO, 1226 }; 1227 int ret; 1228 1229 while ((ret = iomap_iter(&iter, ops)) > 0) 1230 iter.processed = iomap_zero_iter(&iter, did_zero); 1231 return ret; 1232 } 1233 EXPORT_SYMBOL_GPL(iomap_zero_range); 1234 1235 int 1236 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1237 const struct iomap_ops *ops) 1238 { 1239 unsigned int blocksize = i_blocksize(inode); 1240 unsigned int off = pos & (blocksize - 1); 1241 1242 /* Block boundary? Nothing to do */ 1243 if (!off) 1244 return 0; 1245 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1246 } 1247 EXPORT_SYMBOL_GPL(iomap_truncate_page); 1248 1249 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 1250 struct folio *folio) 1251 { 1252 loff_t length = iomap_length(iter); 1253 int ret; 1254 1255 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 1256 ret = __block_write_begin_int(folio, iter->pos, length, NULL, 1257 &iter->iomap); 1258 if (ret) 1259 return ret; 1260 block_commit_write(&folio->page, 0, length); 1261 } else { 1262 WARN_ON_ONCE(!folio_test_uptodate(folio)); 1263 folio_mark_dirty(folio); 1264 } 1265 1266 return length; 1267 } 1268 1269 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1270 { 1271 struct iomap_iter iter = { 1272 .inode = file_inode(vmf->vma->vm_file), 1273 .flags = IOMAP_WRITE | IOMAP_FAULT, 1274 }; 1275 struct folio *folio = page_folio(vmf->page); 1276 ssize_t ret; 1277 1278 folio_lock(folio); 1279 ret = folio_mkwrite_check_truncate(folio, iter.inode); 1280 if (ret < 0) 1281 goto out_unlock; 1282 iter.pos = folio_pos(folio); 1283 iter.len = ret; 1284 while ((ret = iomap_iter(&iter, ops)) > 0) 1285 iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1286 1287 if (ret < 0) 1288 goto out_unlock; 1289 folio_wait_stable(folio); 1290 return VM_FAULT_LOCKED; 1291 out_unlock: 1292 folio_unlock(folio); 1293 return block_page_mkwrite_return(ret); 1294 } 1295 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1296 1297 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 1298 size_t len, int error) 1299 { 1300 struct iomap_page *iop = to_iomap_page(folio); 1301 1302 if (error) { 1303 folio_set_error(folio); 1304 mapping_set_error(inode->i_mapping, error); 1305 } 1306 1307 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 1308 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1309 1310 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 1311 folio_end_writeback(folio); 1312 } 1313 1314 /* 1315 * We're now finished for good with this ioend structure. Update the page 1316 * state, release holds on bios, and finally free up memory. Do not use the 1317 * ioend after this. 1318 */ 1319 static u32 1320 iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1321 { 1322 struct inode *inode = ioend->io_inode; 1323 struct bio *bio = &ioend->io_inline_bio; 1324 struct bio *last = ioend->io_bio, *next; 1325 u64 start = bio->bi_iter.bi_sector; 1326 loff_t offset = ioend->io_offset; 1327 bool quiet = bio_flagged(bio, BIO_QUIET); 1328 u32 folio_count = 0; 1329 1330 for (bio = &ioend->io_inline_bio; bio; bio = next) { 1331 struct folio_iter fi; 1332 1333 /* 1334 * For the last bio, bi_private points to the ioend, so we 1335 * need to explicitly end the iteration here. 1336 */ 1337 if (bio == last) 1338 next = NULL; 1339 else 1340 next = bio->bi_private; 1341 1342 /* walk all folios in bio, ending page IO on them */ 1343 bio_for_each_folio_all(fi, bio) { 1344 iomap_finish_folio_write(inode, fi.folio, fi.length, 1345 error); 1346 folio_count++; 1347 } 1348 bio_put(bio); 1349 } 1350 /* The ioend has been freed by bio_put() */ 1351 1352 if (unlikely(error && !quiet)) { 1353 printk_ratelimited(KERN_ERR 1354 "%s: writeback error on inode %lu, offset %lld, sector %llu", 1355 inode->i_sb->s_id, inode->i_ino, offset, start); 1356 } 1357 return folio_count; 1358 } 1359 1360 /* 1361 * Ioend completion routine for merged bios. This can only be called from task 1362 * contexts as merged ioends can be of unbound length. Hence we have to break up 1363 * the writeback completions into manageable chunks to avoid long scheduler 1364 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1365 * good batch processing throughput without creating adverse scheduler latency 1366 * conditions. 1367 */ 1368 void 1369 iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1370 { 1371 struct list_head tmp; 1372 u32 completions; 1373 1374 might_sleep(); 1375 1376 list_replace_init(&ioend->io_list, &tmp); 1377 completions = iomap_finish_ioend(ioend, error); 1378 1379 while (!list_empty(&tmp)) { 1380 if (completions > IOEND_BATCH_SIZE * 8) { 1381 cond_resched(); 1382 completions = 0; 1383 } 1384 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1385 list_del_init(&ioend->io_list); 1386 completions += iomap_finish_ioend(ioend, error); 1387 } 1388 } 1389 EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1390 1391 /* 1392 * We can merge two adjacent ioends if they have the same set of work to do. 1393 */ 1394 static bool 1395 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1396 { 1397 if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1398 return false; 1399 if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1400 (next->io_flags & IOMAP_F_SHARED)) 1401 return false; 1402 if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1403 (next->io_type == IOMAP_UNWRITTEN)) 1404 return false; 1405 if (ioend->io_offset + ioend->io_size != next->io_offset) 1406 return false; 1407 /* 1408 * Do not merge physically discontiguous ioends. The filesystem 1409 * completion functions will have to iterate the physical 1410 * discontiguities even if we merge the ioends at a logical level, so 1411 * we don't gain anything by merging physical discontiguities here. 1412 * 1413 * We cannot use bio->bi_iter.bi_sector here as it is modified during 1414 * submission so does not point to the start sector of the bio at 1415 * completion. 1416 */ 1417 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1418 return false; 1419 return true; 1420 } 1421 1422 void 1423 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1424 { 1425 struct iomap_ioend *next; 1426 1427 INIT_LIST_HEAD(&ioend->io_list); 1428 1429 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1430 io_list))) { 1431 if (!iomap_ioend_can_merge(ioend, next)) 1432 break; 1433 list_move_tail(&next->io_list, &ioend->io_list); 1434 ioend->io_size += next->io_size; 1435 } 1436 } 1437 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1438 1439 static int 1440 iomap_ioend_compare(void *priv, const struct list_head *a, 1441 const struct list_head *b) 1442 { 1443 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1444 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1445 1446 if (ia->io_offset < ib->io_offset) 1447 return -1; 1448 if (ia->io_offset > ib->io_offset) 1449 return 1; 1450 return 0; 1451 } 1452 1453 void 1454 iomap_sort_ioends(struct list_head *ioend_list) 1455 { 1456 list_sort(NULL, ioend_list, iomap_ioend_compare); 1457 } 1458 EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1459 1460 static void iomap_writepage_end_bio(struct bio *bio) 1461 { 1462 struct iomap_ioend *ioend = bio->bi_private; 1463 1464 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1465 } 1466 1467 /* 1468 * Submit the final bio for an ioend. 1469 * 1470 * If @error is non-zero, it means that we have a situation where some part of 1471 * the submission process has failed after we've marked pages for writeback 1472 * and unlocked them. In this situation, we need to fail the bio instead of 1473 * submitting it. This typically only happens on a filesystem shutdown. 1474 */ 1475 static int 1476 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1477 int error) 1478 { 1479 ioend->io_bio->bi_private = ioend; 1480 ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1481 1482 if (wpc->ops->prepare_ioend) 1483 error = wpc->ops->prepare_ioend(ioend, error); 1484 if (error) { 1485 /* 1486 * If we're failing the IO now, just mark the ioend with an 1487 * error and finish it. This will run IO completion immediately 1488 * as there is only one reference to the ioend at this point in 1489 * time. 1490 */ 1491 ioend->io_bio->bi_status = errno_to_blk_status(error); 1492 bio_endio(ioend->io_bio); 1493 return error; 1494 } 1495 1496 submit_bio(ioend->io_bio); 1497 return 0; 1498 } 1499 1500 static struct iomap_ioend * 1501 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1502 loff_t offset, sector_t sector, struct writeback_control *wbc) 1503 { 1504 struct iomap_ioend *ioend; 1505 struct bio *bio; 1506 1507 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1508 REQ_OP_WRITE | wbc_to_write_flags(wbc), 1509 GFP_NOFS, &iomap_ioend_bioset); 1510 bio->bi_iter.bi_sector = sector; 1511 wbc_init_bio(wbc, bio); 1512 1513 ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1514 INIT_LIST_HEAD(&ioend->io_list); 1515 ioend->io_type = wpc->iomap.type; 1516 ioend->io_flags = wpc->iomap.flags; 1517 ioend->io_inode = inode; 1518 ioend->io_size = 0; 1519 ioend->io_folios = 0; 1520 ioend->io_offset = offset; 1521 ioend->io_bio = bio; 1522 ioend->io_sector = sector; 1523 return ioend; 1524 } 1525 1526 /* 1527 * Allocate a new bio, and chain the old bio to the new one. 1528 * 1529 * Note that we have to perform the chaining in this unintuitive order 1530 * so that the bi_private linkage is set up in the right direction for the 1531 * traversal in iomap_finish_ioend(). 1532 */ 1533 static struct bio * 1534 iomap_chain_bio(struct bio *prev) 1535 { 1536 struct bio *new; 1537 1538 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); 1539 bio_clone_blkg_association(new, prev); 1540 new->bi_iter.bi_sector = bio_end_sector(prev); 1541 1542 bio_chain(prev, new); 1543 bio_get(prev); /* for iomap_finish_ioend */ 1544 submit_bio(prev); 1545 return new; 1546 } 1547 1548 static bool 1549 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1550 sector_t sector) 1551 { 1552 if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1553 (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1554 return false; 1555 if (wpc->iomap.type != wpc->ioend->io_type) 1556 return false; 1557 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1558 return false; 1559 if (sector != bio_end_sector(wpc->ioend->io_bio)) 1560 return false; 1561 /* 1562 * Limit ioend bio chain lengths to minimise IO completion latency. This 1563 * also prevents long tight loops ending page writeback on all the 1564 * folios in the ioend. 1565 */ 1566 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1567 return false; 1568 return true; 1569 } 1570 1571 /* 1572 * Test to see if we have an existing ioend structure that we could append to 1573 * first; otherwise finish off the current ioend and start another. 1574 */ 1575 static void 1576 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 1577 struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1578 struct writeback_control *wbc, struct list_head *iolist) 1579 { 1580 sector_t sector = iomap_sector(&wpc->iomap, pos); 1581 unsigned len = i_blocksize(inode); 1582 size_t poff = offset_in_folio(folio, pos); 1583 1584 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1585 if (wpc->ioend) 1586 list_add(&wpc->ioend->io_list, iolist); 1587 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1588 } 1589 1590 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1591 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1592 bio_add_folio(wpc->ioend->io_bio, folio, len, poff); 1593 } 1594 1595 if (iop) 1596 atomic_add(len, &iop->write_bytes_pending); 1597 wpc->ioend->io_size += len; 1598 wbc_account_cgroup_owner(wbc, &folio->page, len); 1599 } 1600 1601 /* 1602 * We implement an immediate ioend submission policy here to avoid needing to 1603 * chain multiple ioends and hence nest mempool allocations which can violate 1604 * the forward progress guarantees we need to provide. The current ioend we're 1605 * adding blocks to is cached in the writepage context, and if the new block 1606 * doesn't append to the cached ioend, it will create a new ioend and cache that 1607 * instead. 1608 * 1609 * If a new ioend is created and cached, the old ioend is returned and queued 1610 * locally for submission once the entire page is processed or an error has been 1611 * detected. While ioends are submitted immediately after they are completed, 1612 * batching optimisations are provided by higher level block plugging. 1613 * 1614 * At the end of a writeback pass, there will be a cached ioend remaining on the 1615 * writepage context that the caller will need to submit. 1616 */ 1617 static int 1618 iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1619 struct writeback_control *wbc, struct inode *inode, 1620 struct folio *folio, u64 end_pos) 1621 { 1622 struct iomap_page *iop = iomap_page_create(inode, folio, 0); 1623 struct iomap_ioend *ioend, *next; 1624 unsigned len = i_blocksize(inode); 1625 unsigned nblocks = i_blocks_per_folio(inode, folio); 1626 u64 pos = folio_pos(folio); 1627 int error = 0, count = 0, i; 1628 LIST_HEAD(submit_list); 1629 1630 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1631 1632 /* 1633 * Walk through the folio to find areas to write back. If we 1634 * run off the end of the current map or find the current map 1635 * invalid, grab a new one. 1636 */ 1637 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 1638 if (iop && !test_bit(i, iop->uptodate)) 1639 continue; 1640 1641 error = wpc->ops->map_blocks(wpc, inode, pos); 1642 if (error) 1643 break; 1644 trace_iomap_writepage_map(inode, &wpc->iomap); 1645 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 1646 continue; 1647 if (wpc->iomap.type == IOMAP_HOLE) 1648 continue; 1649 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, 1650 &submit_list); 1651 count++; 1652 } 1653 if (count) 1654 wpc->ioend->io_folios++; 1655 1656 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1657 WARN_ON_ONCE(!folio_test_locked(folio)); 1658 WARN_ON_ONCE(folio_test_writeback(folio)); 1659 WARN_ON_ONCE(folio_test_dirty(folio)); 1660 1661 /* 1662 * We cannot cancel the ioend directly here on error. We may have 1663 * already set other pages under writeback and hence we have to run I/O 1664 * completion to mark the error state of the pages under writeback 1665 * appropriately. 1666 */ 1667 if (unlikely(error)) { 1668 /* 1669 * Let the filesystem know what portion of the current page 1670 * failed to map. If the page hasn't been added to ioend, it 1671 * won't be affected by I/O completion and we must unlock it 1672 * now. 1673 */ 1674 if (wpc->ops->discard_folio) 1675 wpc->ops->discard_folio(folio, pos); 1676 if (!count) { 1677 folio_unlock(folio); 1678 goto done; 1679 } 1680 } 1681 1682 folio_start_writeback(folio); 1683 folio_unlock(folio); 1684 1685 /* 1686 * Preserve the original error if there was one; catch 1687 * submission errors here and propagate into subsequent ioend 1688 * submissions. 1689 */ 1690 list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1691 int error2; 1692 1693 list_del_init(&ioend->io_list); 1694 error2 = iomap_submit_ioend(wpc, ioend, error); 1695 if (error2 && !error) 1696 error = error2; 1697 } 1698 1699 /* 1700 * We can end up here with no error and nothing to write only if we race 1701 * with a partial page truncate on a sub-page block sized filesystem. 1702 */ 1703 if (!count) 1704 folio_end_writeback(folio); 1705 done: 1706 mapping_set_error(inode->i_mapping, error); 1707 return error; 1708 } 1709 1710 /* 1711 * Write out a dirty page. 1712 * 1713 * For delalloc space on the page, we need to allocate space and flush it. 1714 * For unwritten space on the page, we need to start the conversion to 1715 * regular allocated space. 1716 */ 1717 static int iomap_do_writepage(struct folio *folio, 1718 struct writeback_control *wbc, void *data) 1719 { 1720 struct iomap_writepage_ctx *wpc = data; 1721 struct inode *inode = folio->mapping->host; 1722 u64 end_pos, isize; 1723 1724 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1725 1726 /* 1727 * Refuse to write the folio out if we're called from reclaim context. 1728 * 1729 * This avoids stack overflows when called from deeply used stacks in 1730 * random callers for direct reclaim or memcg reclaim. We explicitly 1731 * allow reclaim from kswapd as the stack usage there is relatively low. 1732 * 1733 * This should never happen except in the case of a VM regression so 1734 * warn about it. 1735 */ 1736 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1737 PF_MEMALLOC)) 1738 goto redirty; 1739 1740 /* 1741 * Is this folio beyond the end of the file? 1742 * 1743 * The folio index is less than the end_index, adjust the end_pos 1744 * to the highest offset that this folio should represent. 1745 * ----------------------------------------------------- 1746 * | file mapping | <EOF> | 1747 * ----------------------------------------------------- 1748 * | Page ... | Page N-2 | Page N-1 | Page N | | 1749 * ^--------------------------------^----------|-------- 1750 * | desired writeback range | see else | 1751 * ---------------------------------^------------------| 1752 */ 1753 isize = i_size_read(inode); 1754 end_pos = folio_pos(folio) + folio_size(folio); 1755 if (end_pos > isize) { 1756 /* 1757 * Check whether the page to write out is beyond or straddles 1758 * i_size or not. 1759 * ------------------------------------------------------- 1760 * | file mapping | <EOF> | 1761 * ------------------------------------------------------- 1762 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1763 * ^--------------------------------^-----------|--------- 1764 * | | Straddles | 1765 * ---------------------------------^-----------|--------| 1766 */ 1767 size_t poff = offset_in_folio(folio, isize); 1768 pgoff_t end_index = isize >> PAGE_SHIFT; 1769 1770 /* 1771 * Skip the page if it's fully outside i_size, e.g. 1772 * due to a truncate operation that's in progress. We've 1773 * cleaned this page and truncate will finish things off for 1774 * us. 1775 * 1776 * Note that the end_index is unsigned long. If the given 1777 * offset is greater than 16TB on a 32-bit system then if we 1778 * checked if the page is fully outside i_size with 1779 * "if (page->index >= end_index + 1)", "end_index + 1" would 1780 * overflow and evaluate to 0. Hence this page would be 1781 * redirtied and written out repeatedly, which would result in 1782 * an infinite loop; the user program performing this operation 1783 * would hang. Instead, we can detect this situation by 1784 * checking if the page is totally beyond i_size or if its 1785 * offset is just equal to the EOF. 1786 */ 1787 if (folio->index > end_index || 1788 (folio->index == end_index && poff == 0)) 1789 goto unlock; 1790 1791 /* 1792 * The page straddles i_size. It must be zeroed out on each 1793 * and every writepage invocation because it may be mmapped. 1794 * "A file is mapped in multiples of the page size. For a file 1795 * that is not a multiple of the page size, the remaining 1796 * memory is zeroed when mapped, and writes to that region are 1797 * not written out to the file." 1798 */ 1799 folio_zero_segment(folio, poff, folio_size(folio)); 1800 end_pos = isize; 1801 } 1802 1803 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1804 1805 redirty: 1806 folio_redirty_for_writepage(wbc, folio); 1807 unlock: 1808 folio_unlock(folio); 1809 return 0; 1810 } 1811 1812 int 1813 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1814 struct iomap_writepage_ctx *wpc, 1815 const struct iomap_writeback_ops *ops) 1816 { 1817 int ret; 1818 1819 wpc->ops = ops; 1820 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1821 if (!wpc->ioend) 1822 return ret; 1823 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1824 } 1825 EXPORT_SYMBOL_GPL(iomap_writepages); 1826 1827 static int __init iomap_init(void) 1828 { 1829 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1830 offsetof(struct iomap_ioend, io_inline_bio), 1831 BIOSET_NEED_BVECS); 1832 } 1833 fs_initcall(iomap_init); 1834