1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/buffer.c 4 * 5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 6 */ 7 8 /* 9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 10 * 11 * Removed a lot of unnecessary code and simplified things now that 12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 13 * 14 * Speed up hash, lru, and free list operations. Use gfp() for allocating 15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 16 * 17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 18 * 19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/syscalls.h> 25 #include <linux/fs.h> 26 #include <linux/iomap.h> 27 #include <linux/mm.h> 28 #include <linux/percpu.h> 29 #include <linux/slab.h> 30 #include <linux/capability.h> 31 #include <linux/blkdev.h> 32 #include <linux/blk-crypto.h> 33 #include <linux/file.h> 34 #include <linux/quotaops.h> 35 #include <linux/highmem.h> 36 #include <linux/export.h> 37 #include <linux/backing-dev.h> 38 #include <linux/writeback.h> 39 #include <linux/hash.h> 40 #include <linux/suspend.h> 41 #include <linux/buffer_head.h> 42 #include <linux/task_io_accounting_ops.h> 43 #include <linux/bio.h> 44 #include <linux/cpu.h> 45 #include <linux/bitops.h> 46 #include <linux/mpage.h> 47 #include <linux/bit_spinlock.h> 48 #include <linux/pagevec.h> 49 #include <linux/sched/mm.h> 50 #include <trace/events/block.h> 51 #include <linux/fscrypt.h> 52 #include <linux/fsverity.h> 53 #include <linux/sched/isolation.h> 54 55 #include "internal.h" 56 57 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 58 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 59 enum rw_hint hint, struct writeback_control *wbc); 60 61 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 62 63 inline void touch_buffer(struct buffer_head *bh) 64 { 65 trace_block_touch_buffer(bh); 66 folio_mark_accessed(bh->b_folio); 67 } 68 EXPORT_SYMBOL(touch_buffer); 69 70 void __lock_buffer(struct buffer_head *bh) 71 { 72 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 73 } 74 EXPORT_SYMBOL(__lock_buffer); 75 76 void unlock_buffer(struct buffer_head *bh) 77 { 78 clear_bit_unlock(BH_Lock, &bh->b_state); 79 smp_mb__after_atomic(); 80 wake_up_bit(&bh->b_state, BH_Lock); 81 } 82 EXPORT_SYMBOL(unlock_buffer); 83 84 /* 85 * Returns if the folio has dirty or writeback buffers. If all the buffers 86 * are unlocked and clean then the folio_test_dirty information is stale. If 87 * any of the buffers are locked, it is assumed they are locked for IO. 88 */ 89 void buffer_check_dirty_writeback(struct folio *folio, 90 bool *dirty, bool *writeback) 91 { 92 struct buffer_head *head, *bh; 93 *dirty = false; 94 *writeback = false; 95 96 BUG_ON(!folio_test_locked(folio)); 97 98 head = folio_buffers(folio); 99 if (!head) 100 return; 101 102 if (folio_test_writeback(folio)) 103 *writeback = true; 104 105 bh = head; 106 do { 107 if (buffer_locked(bh)) 108 *writeback = true; 109 110 if (buffer_dirty(bh)) 111 *dirty = true; 112 113 bh = bh->b_this_page; 114 } while (bh != head); 115 } 116 117 /* 118 * Block until a buffer comes unlocked. This doesn't stop it 119 * from becoming locked again - you have to lock it yourself 120 * if you want to preserve its state. 121 */ 122 void __wait_on_buffer(struct buffer_head * bh) 123 { 124 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 125 } 126 EXPORT_SYMBOL(__wait_on_buffer); 127 128 static void buffer_io_error(struct buffer_head *bh, char *msg) 129 { 130 if (!test_bit(BH_Quiet, &bh->b_state)) 131 printk_ratelimited(KERN_ERR 132 "Buffer I/O error on dev %pg, logical block %llu%s\n", 133 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 134 } 135 136 /* 137 * End-of-IO handler helper function which does not touch the bh after 138 * unlocking it. 139 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 140 * a race there is benign: unlock_buffer() only use the bh's address for 141 * hashing after unlocking the buffer, so it doesn't actually touch the bh 142 * itself. 143 */ 144 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 145 { 146 if (uptodate) { 147 set_buffer_uptodate(bh); 148 } else { 149 /* This happens, due to failed read-ahead attempts. */ 150 clear_buffer_uptodate(bh); 151 } 152 unlock_buffer(bh); 153 } 154 155 /* 156 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 157 * unlock the buffer. 158 */ 159 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 160 { 161 put_bh(bh); 162 __end_buffer_read_notouch(bh, uptodate); 163 } 164 EXPORT_SYMBOL(end_buffer_read_sync); 165 166 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 167 { 168 if (uptodate) { 169 set_buffer_uptodate(bh); 170 } else { 171 buffer_io_error(bh, ", lost sync page write"); 172 mark_buffer_write_io_error(bh); 173 clear_buffer_uptodate(bh); 174 } 175 unlock_buffer(bh); 176 put_bh(bh); 177 } 178 EXPORT_SYMBOL(end_buffer_write_sync); 179 180 static struct buffer_head * 181 __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic) 182 { 183 struct address_space *bd_mapping = bdev->bd_mapping; 184 const int blkbits = bd_mapping->host->i_blkbits; 185 struct buffer_head *ret = NULL; 186 pgoff_t index; 187 struct buffer_head *bh; 188 struct buffer_head *head; 189 struct folio *folio; 190 int all_mapped = 1; 191 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 192 193 index = ((loff_t)block << blkbits) / PAGE_SIZE; 194 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 195 if (IS_ERR(folio)) 196 goto out; 197 198 /* 199 * Folio lock protects the buffers. Callers that cannot block 200 * will fallback to serializing vs try_to_free_buffers() via 201 * the i_private_lock. 202 */ 203 if (atomic) 204 spin_lock(&bd_mapping->i_private_lock); 205 else 206 folio_lock(folio); 207 208 head = folio_buffers(folio); 209 if (!head) 210 goto out_unlock; 211 /* 212 * Upon a noref migration, the folio lock serializes here; 213 * otherwise bail. 214 */ 215 if (test_bit_acquire(BH_Migrate, &head->b_state)) { 216 WARN_ON(!atomic); 217 goto out_unlock; 218 } 219 220 bh = head; 221 do { 222 if (!buffer_mapped(bh)) 223 all_mapped = 0; 224 else if (bh->b_blocknr == block) { 225 ret = bh; 226 get_bh(bh); 227 goto out_unlock; 228 } 229 bh = bh->b_this_page; 230 } while (bh != head); 231 232 /* we might be here because some of the buffers on this page are 233 * not mapped. This is due to various races between 234 * file io on the block device and getblk. It gets dealt with 235 * elsewhere, don't buffer_error if we had some unmapped buffers 236 */ 237 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 238 if (all_mapped && __ratelimit(&last_warned)) { 239 printk("__find_get_block_slow() failed. block=%llu, " 240 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 241 "device %pg blocksize: %d\n", 242 (unsigned long long)block, 243 (unsigned long long)bh->b_blocknr, 244 bh->b_state, bh->b_size, bdev, 245 1 << blkbits); 246 } 247 out_unlock: 248 if (atomic) 249 spin_unlock(&bd_mapping->i_private_lock); 250 else 251 folio_unlock(folio); 252 folio_put(folio); 253 out: 254 return ret; 255 } 256 257 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 258 { 259 unsigned long flags; 260 struct buffer_head *first; 261 struct buffer_head *tmp; 262 struct folio *folio; 263 int folio_uptodate = 1; 264 265 BUG_ON(!buffer_async_read(bh)); 266 267 folio = bh->b_folio; 268 if (uptodate) { 269 set_buffer_uptodate(bh); 270 } else { 271 clear_buffer_uptodate(bh); 272 buffer_io_error(bh, ", async page read"); 273 } 274 275 /* 276 * Be _very_ careful from here on. Bad things can happen if 277 * two buffer heads end IO at almost the same time and both 278 * decide that the page is now completely done. 279 */ 280 first = folio_buffers(folio); 281 spin_lock_irqsave(&first->b_uptodate_lock, flags); 282 clear_buffer_async_read(bh); 283 unlock_buffer(bh); 284 tmp = bh; 285 do { 286 if (!buffer_uptodate(tmp)) 287 folio_uptodate = 0; 288 if (buffer_async_read(tmp)) { 289 BUG_ON(!buffer_locked(tmp)); 290 goto still_busy; 291 } 292 tmp = tmp->b_this_page; 293 } while (tmp != bh); 294 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 295 296 folio_end_read(folio, folio_uptodate); 297 return; 298 299 still_busy: 300 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 301 } 302 303 struct postprocess_bh_ctx { 304 struct work_struct work; 305 struct buffer_head *bh; 306 struct fsverity_info *vi; 307 }; 308 309 static void verify_bh(struct work_struct *work) 310 { 311 struct postprocess_bh_ctx *ctx = 312 container_of(work, struct postprocess_bh_ctx, work); 313 struct buffer_head *bh = ctx->bh; 314 bool valid; 315 316 valid = fsverity_verify_blocks(ctx->vi, bh->b_folio, bh->b_size, 317 bh_offset(bh)); 318 end_buffer_async_read(bh, valid); 319 kfree(ctx); 320 } 321 322 static void decrypt_bh(struct work_struct *work) 323 { 324 struct postprocess_bh_ctx *ctx = 325 container_of(work, struct postprocess_bh_ctx, work); 326 struct buffer_head *bh = ctx->bh; 327 int err; 328 329 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 330 bh_offset(bh)); 331 if (err == 0 && ctx->vi) { 332 /* 333 * We use different work queues for decryption and for verity 334 * because verity may require reading metadata pages that need 335 * decryption, and we shouldn't recurse to the same workqueue. 336 */ 337 INIT_WORK(&ctx->work, verify_bh); 338 fsverity_enqueue_verify_work(&ctx->work); 339 return; 340 } 341 end_buffer_async_read(bh, err == 0); 342 kfree(ctx); 343 } 344 345 /* 346 * I/O completion handler for block_read_full_folio() - pages 347 * which come unlocked at the end of I/O. 348 */ 349 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 350 { 351 struct inode *inode = bh->b_folio->mapping->host; 352 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 353 struct fsverity_info *vi = NULL; 354 355 /* needed by ext4 */ 356 if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE)) 357 vi = fsverity_get_info(inode); 358 359 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 360 if (uptodate && (decrypt || vi)) { 361 struct postprocess_bh_ctx *ctx = 362 kmalloc(sizeof(*ctx), GFP_ATOMIC); 363 364 if (ctx) { 365 ctx->bh = bh; 366 ctx->vi = vi; 367 if (decrypt) { 368 INIT_WORK(&ctx->work, decrypt_bh); 369 fscrypt_enqueue_decrypt_work(&ctx->work); 370 } else { 371 INIT_WORK(&ctx->work, verify_bh); 372 fsverity_enqueue_verify_work(&ctx->work); 373 } 374 return; 375 } 376 uptodate = 0; 377 } 378 end_buffer_async_read(bh, uptodate); 379 } 380 381 /* 382 * Completion handler for block_write_full_folio() - folios which are unlocked 383 * during I/O, and which have the writeback flag cleared upon I/O completion. 384 */ 385 static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 386 { 387 unsigned long flags; 388 struct buffer_head *first; 389 struct buffer_head *tmp; 390 struct folio *folio; 391 392 BUG_ON(!buffer_async_write(bh)); 393 394 folio = bh->b_folio; 395 if (uptodate) { 396 set_buffer_uptodate(bh); 397 } else { 398 buffer_io_error(bh, ", lost async page write"); 399 mark_buffer_write_io_error(bh); 400 clear_buffer_uptodate(bh); 401 } 402 403 first = folio_buffers(folio); 404 spin_lock_irqsave(&first->b_uptodate_lock, flags); 405 406 clear_buffer_async_write(bh); 407 unlock_buffer(bh); 408 tmp = bh->b_this_page; 409 while (tmp != bh) { 410 if (buffer_async_write(tmp)) { 411 BUG_ON(!buffer_locked(tmp)); 412 goto still_busy; 413 } 414 tmp = tmp->b_this_page; 415 } 416 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 417 folio_end_writeback(folio); 418 return; 419 420 still_busy: 421 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 422 } 423 424 /* 425 * If a page's buffers are under async readin (end_buffer_async_read 426 * completion) then there is a possibility that another thread of 427 * control could lock one of the buffers after it has completed 428 * but while some of the other buffers have not completed. This 429 * locked buffer would confuse end_buffer_async_read() into not unlocking 430 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 431 * that this buffer is not under async I/O. 432 * 433 * The page comes unlocked when it has no locked buffer_async buffers 434 * left. 435 * 436 * PageLocked prevents anyone starting new async I/O reads any of 437 * the buffers. 438 * 439 * PageWriteback is used to prevent simultaneous writeout of the same 440 * page. 441 * 442 * PageLocked prevents anyone from starting writeback of a page which is 443 * under read I/O (PageWriteback is only ever set against a locked page). 444 */ 445 static void mark_buffer_async_read(struct buffer_head *bh) 446 { 447 bh->b_end_io = end_buffer_async_read_io; 448 set_buffer_async_read(bh); 449 } 450 451 static void mark_buffer_async_write_endio(struct buffer_head *bh, 452 bh_end_io_t *handler) 453 { 454 bh->b_end_io = handler; 455 set_buffer_async_write(bh); 456 } 457 458 void mark_buffer_async_write(struct buffer_head *bh) 459 { 460 mark_buffer_async_write_endio(bh, end_buffer_async_write); 461 } 462 EXPORT_SYMBOL(mark_buffer_async_write); 463 464 465 /* 466 * fs/buffer.c contains helper functions for buffer-backed address space's 467 * fsync functions. A common requirement for buffer-based filesystems is 468 * that certain data from the backing blockdev needs to be written out for 469 * a successful fsync(). For example, ext2 indirect blocks need to be 470 * written back and waited upon before fsync() returns. 471 * 472 * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(), 473 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 474 * management of a list of dependent buffers at ->i_mapping->i_private_list. 475 * 476 * Locking is a little subtle: try_to_free_buffers() will remove buffers 477 * from their controlling inode's queue when they are being freed. But 478 * try_to_free_buffers() will be operating against the *blockdev* mapping 479 * at the time, not against the S_ISREG file which depends on those buffers. 480 * So the locking for i_private_list is via the i_private_lock in the address_space 481 * which backs the buffers. Which is different from the address_space 482 * against which the buffers are listed. So for a particular address_space, 483 * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact, 484 * mapping->i_private_list will always be protected by the backing blockdev's 485 * ->i_private_lock. 486 * 487 * Which introduces a requirement: all buffers on an address_space's 488 * ->i_private_list must be from the same address_space: the blockdev's. 489 * 490 * address_spaces which do not place buffers at ->i_private_list via these 491 * utility functions are free to use i_private_lock and i_private_list for 492 * whatever they want. The only requirement is that list_empty(i_private_list) 493 * be true at clear_inode() time. 494 * 495 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 496 * filesystems should do that. invalidate_inode_buffers() should just go 497 * BUG_ON(!list_empty). 498 * 499 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 500 * take an address_space, not an inode. And it should be called 501 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 502 * queued up. 503 * 504 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 505 * list if it is already on a list. Because if the buffer is on a list, 506 * it *must* already be on the right one. If not, the filesystem is being 507 * silly. This will save a ton of locking. But first we have to ensure 508 * that buffers are taken *off* the old inode's list when they are freed 509 * (presumably in truncate). That requires careful auditing of all 510 * filesystems (do it inside bforget()). It could also be done by bringing 511 * b_inode back. 512 */ 513 514 /* 515 * The buffer's backing address_space's i_private_lock must be held 516 */ 517 static void __remove_assoc_queue(struct buffer_head *bh) 518 { 519 list_del_init(&bh->b_assoc_buffers); 520 WARN_ON(!bh->b_assoc_map); 521 bh->b_assoc_map = NULL; 522 } 523 524 int inode_has_buffers(struct inode *inode) 525 { 526 return !list_empty(&inode->i_data.i_private_list); 527 } 528 529 /* 530 * osync is designed to support O_SYNC io. It waits synchronously for 531 * all already-submitted IO to complete, but does not queue any new 532 * writes to the disk. 533 * 534 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 535 * as you dirty the buffers, and then use osync_inode_buffers to wait for 536 * completion. Any other dirty buffers which are not yet queued for 537 * write will not be flushed to disk by the osync. 538 */ 539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 540 { 541 struct buffer_head *bh; 542 struct list_head *p; 543 int err = 0; 544 545 spin_lock(lock); 546 repeat: 547 list_for_each_prev(p, list) { 548 bh = BH_ENTRY(p); 549 if (buffer_locked(bh)) { 550 get_bh(bh); 551 spin_unlock(lock); 552 wait_on_buffer(bh); 553 if (!buffer_uptodate(bh)) 554 err = -EIO; 555 brelse(bh); 556 spin_lock(lock); 557 goto repeat; 558 } 559 } 560 spin_unlock(lock); 561 return err; 562 } 563 564 /** 565 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 566 * @mapping: the mapping which wants those buffers written 567 * 568 * Starts I/O against the buffers at mapping->i_private_list, and waits upon 569 * that I/O. 570 * 571 * Basically, this is a convenience function for fsync(). 572 * @mapping is a file or directory which needs those buffers to be written for 573 * a successful fsync(). 574 */ 575 int sync_mapping_buffers(struct address_space *mapping) 576 { 577 struct address_space *buffer_mapping = mapping->i_private_data; 578 579 if (buffer_mapping == NULL || list_empty(&mapping->i_private_list)) 580 return 0; 581 582 return fsync_buffers_list(&buffer_mapping->i_private_lock, 583 &mapping->i_private_list); 584 } 585 EXPORT_SYMBOL(sync_mapping_buffers); 586 587 /** 588 * generic_buffers_fsync_noflush - generic buffer fsync implementation 589 * for simple filesystems with no inode lock 590 * 591 * @file: file to synchronize 592 * @start: start offset in bytes 593 * @end: end offset in bytes (inclusive) 594 * @datasync: only synchronize essential metadata if true 595 * 596 * This is a generic implementation of the fsync method for simple 597 * filesystems which track all non-inode metadata in the buffers list 598 * hanging off the address_space structure. 599 */ 600 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 601 bool datasync) 602 { 603 struct inode *inode = file->f_mapping->host; 604 int err; 605 int ret; 606 607 err = file_write_and_wait_range(file, start, end); 608 if (err) 609 return err; 610 611 ret = sync_mapping_buffers(inode->i_mapping); 612 if (!(inode_state_read_once(inode) & I_DIRTY_ALL)) 613 goto out; 614 if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC)) 615 goto out; 616 617 err = sync_inode_metadata(inode, 1); 618 if (ret == 0) 619 ret = err; 620 621 out: 622 /* check and advance again to catch errors after syncing out buffers */ 623 err = file_check_and_advance_wb_err(file); 624 if (ret == 0) 625 ret = err; 626 return ret; 627 } 628 EXPORT_SYMBOL(generic_buffers_fsync_noflush); 629 630 /** 631 * generic_buffers_fsync - generic buffer fsync implementation 632 * for simple filesystems with no inode lock 633 * 634 * @file: file to synchronize 635 * @start: start offset in bytes 636 * @end: end offset in bytes (inclusive) 637 * @datasync: only synchronize essential metadata if true 638 * 639 * This is a generic implementation of the fsync method for simple 640 * filesystems which track all non-inode metadata in the buffers list 641 * hanging off the address_space structure. This also makes sure that 642 * a device cache flush operation is called at the end. 643 */ 644 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 645 bool datasync) 646 { 647 struct inode *inode = file->f_mapping->host; 648 int ret; 649 650 ret = generic_buffers_fsync_noflush(file, start, end, datasync); 651 if (!ret) 652 ret = blkdev_issue_flush(inode->i_sb->s_bdev); 653 return ret; 654 } 655 EXPORT_SYMBOL(generic_buffers_fsync); 656 657 /* 658 * Called when we've recently written block `bblock', and it is known that 659 * `bblock' was for a buffer_boundary() buffer. This means that the block at 660 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 661 * dirty, schedule it for IO. So that indirects merge nicely with their data. 662 */ 663 void write_boundary_block(struct block_device *bdev, 664 sector_t bblock, unsigned blocksize) 665 { 666 struct buffer_head *bh; 667 668 bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize); 669 if (bh) { 670 if (buffer_dirty(bh)) 671 write_dirty_buffer(bh, 0); 672 put_bh(bh); 673 } 674 } 675 676 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 677 { 678 struct address_space *mapping = inode->i_mapping; 679 struct address_space *buffer_mapping = bh->b_folio->mapping; 680 681 mark_buffer_dirty(bh); 682 if (!mapping->i_private_data) { 683 mapping->i_private_data = buffer_mapping; 684 } else { 685 BUG_ON(mapping->i_private_data != buffer_mapping); 686 } 687 if (!bh->b_assoc_map) { 688 spin_lock(&buffer_mapping->i_private_lock); 689 list_move_tail(&bh->b_assoc_buffers, 690 &mapping->i_private_list); 691 bh->b_assoc_map = mapping; 692 spin_unlock(&buffer_mapping->i_private_lock); 693 } 694 } 695 EXPORT_SYMBOL(mark_buffer_dirty_inode); 696 697 /** 698 * block_dirty_folio - Mark a folio as dirty. 699 * @mapping: The address space containing this folio. 700 * @folio: The folio to mark dirty. 701 * 702 * Filesystems which use buffer_heads can use this function as their 703 * ->dirty_folio implementation. Some filesystems need to do a little 704 * work before calling this function. Filesystems which do not use 705 * buffer_heads should call filemap_dirty_folio() instead. 706 * 707 * If the folio has buffers, the uptodate buffers are set dirty, to 708 * preserve dirty-state coherency between the folio and the buffers. 709 * Buffers added to a dirty folio are created dirty. 710 * 711 * The buffers are dirtied before the folio is dirtied. There's a small 712 * race window in which writeback may see the folio cleanness but not the 713 * buffer dirtiness. That's fine. If this code were to set the folio 714 * dirty before the buffers, writeback could clear the folio dirty flag, 715 * see a bunch of clean buffers and we'd end up with dirty buffers/clean 716 * folio on the dirty folio list. 717 * 718 * We use i_private_lock to lock against try_to_free_buffers() while 719 * using the folio's buffer list. This also prevents clean buffers 720 * being added to the folio after it was set dirty. 721 * 722 * Context: May only be called from process context. Does not sleep. 723 * Caller must ensure that @folio cannot be truncated during this call, 724 * typically by holding the folio lock or having a page in the folio 725 * mapped and holding the page table lock. 726 * 727 * Return: True if the folio was dirtied; false if it was already dirtied. 728 */ 729 bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 730 { 731 struct buffer_head *head; 732 bool newly_dirty; 733 734 spin_lock(&mapping->i_private_lock); 735 head = folio_buffers(folio); 736 if (head) { 737 struct buffer_head *bh = head; 738 739 do { 740 set_buffer_dirty(bh); 741 bh = bh->b_this_page; 742 } while (bh != head); 743 } 744 /* 745 * Lock out page's memcg migration to keep PageDirty 746 * synchronized with per-memcg dirty page counters. 747 */ 748 newly_dirty = !folio_test_set_dirty(folio); 749 spin_unlock(&mapping->i_private_lock); 750 751 if (newly_dirty) 752 __folio_mark_dirty(folio, mapping, 1); 753 754 if (newly_dirty) 755 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 756 757 return newly_dirty; 758 } 759 EXPORT_SYMBOL(block_dirty_folio); 760 761 /* 762 * Write out and wait upon a list of buffers. 763 * 764 * We have conflicting pressures: we want to make sure that all 765 * initially dirty buffers get waited on, but that any subsequently 766 * dirtied buffers don't. After all, we don't want fsync to last 767 * forever if somebody is actively writing to the file. 768 * 769 * Do this in two main stages: first we copy dirty buffers to a 770 * temporary inode list, queueing the writes as we go. Then we clean 771 * up, waiting for those writes to complete. 772 * 773 * During this second stage, any subsequent updates to the file may end 774 * up refiling the buffer on the original inode's dirty list again, so 775 * there is a chance we will end up with a buffer queued for write but 776 * not yet completed on that list. So, as a final cleanup we go through 777 * the osync code to catch these locked, dirty buffers without requeuing 778 * any newly dirty buffers for write. 779 */ 780 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 781 { 782 struct buffer_head *bh; 783 struct address_space *mapping; 784 int err = 0, err2; 785 struct blk_plug plug; 786 LIST_HEAD(tmp); 787 788 blk_start_plug(&plug); 789 790 spin_lock(lock); 791 while (!list_empty(list)) { 792 bh = BH_ENTRY(list->next); 793 mapping = bh->b_assoc_map; 794 __remove_assoc_queue(bh); 795 /* Avoid race with mark_buffer_dirty_inode() which does 796 * a lockless check and we rely on seeing the dirty bit */ 797 smp_mb(); 798 if (buffer_dirty(bh) || buffer_locked(bh)) { 799 list_add(&bh->b_assoc_buffers, &tmp); 800 bh->b_assoc_map = mapping; 801 if (buffer_dirty(bh)) { 802 get_bh(bh); 803 spin_unlock(lock); 804 /* 805 * Ensure any pending I/O completes so that 806 * write_dirty_buffer() actually writes the 807 * current contents - it is a noop if I/O is 808 * still in flight on potentially older 809 * contents. 810 */ 811 write_dirty_buffer(bh, REQ_SYNC); 812 813 /* 814 * Kick off IO for the previous mapping. Note 815 * that we will not run the very last mapping, 816 * wait_on_buffer() will do that for us 817 * through sync_buffer(). 818 */ 819 brelse(bh); 820 spin_lock(lock); 821 } 822 } 823 } 824 825 spin_unlock(lock); 826 blk_finish_plug(&plug); 827 spin_lock(lock); 828 829 while (!list_empty(&tmp)) { 830 bh = BH_ENTRY(tmp.prev); 831 get_bh(bh); 832 mapping = bh->b_assoc_map; 833 __remove_assoc_queue(bh); 834 /* Avoid race with mark_buffer_dirty_inode() which does 835 * a lockless check and we rely on seeing the dirty bit */ 836 smp_mb(); 837 if (buffer_dirty(bh)) { 838 list_add(&bh->b_assoc_buffers, 839 &mapping->i_private_list); 840 bh->b_assoc_map = mapping; 841 } 842 spin_unlock(lock); 843 wait_on_buffer(bh); 844 if (!buffer_uptodate(bh)) 845 err = -EIO; 846 brelse(bh); 847 spin_lock(lock); 848 } 849 850 spin_unlock(lock); 851 err2 = osync_buffers_list(lock, list); 852 if (err) 853 return err; 854 else 855 return err2; 856 } 857 858 /* 859 * Invalidate any and all dirty buffers on a given inode. We are 860 * probably unmounting the fs, but that doesn't mean we have already 861 * done a sync(). Just drop the buffers from the inode list. 862 * 863 * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which 864 * assumes that all the buffers are against the blockdev. 865 */ 866 void invalidate_inode_buffers(struct inode *inode) 867 { 868 if (inode_has_buffers(inode)) { 869 struct address_space *mapping = &inode->i_data; 870 struct list_head *list = &mapping->i_private_list; 871 struct address_space *buffer_mapping = mapping->i_private_data; 872 873 spin_lock(&buffer_mapping->i_private_lock); 874 while (!list_empty(list)) 875 __remove_assoc_queue(BH_ENTRY(list->next)); 876 spin_unlock(&buffer_mapping->i_private_lock); 877 } 878 } 879 EXPORT_SYMBOL(invalidate_inode_buffers); 880 881 /* 882 * Remove any clean buffers from the inode's buffer list. This is called 883 * when we're trying to free the inode itself. Those buffers can pin it. 884 * 885 * Returns true if all buffers were removed. 886 */ 887 int remove_inode_buffers(struct inode *inode) 888 { 889 int ret = 1; 890 891 if (inode_has_buffers(inode)) { 892 struct address_space *mapping = &inode->i_data; 893 struct list_head *list = &mapping->i_private_list; 894 struct address_space *buffer_mapping = mapping->i_private_data; 895 896 spin_lock(&buffer_mapping->i_private_lock); 897 while (!list_empty(list)) { 898 struct buffer_head *bh = BH_ENTRY(list->next); 899 if (buffer_dirty(bh)) { 900 ret = 0; 901 break; 902 } 903 __remove_assoc_queue(bh); 904 } 905 spin_unlock(&buffer_mapping->i_private_lock); 906 } 907 return ret; 908 } 909 910 /* 911 * Create the appropriate buffers when given a folio for data area and 912 * the size of each buffer.. Use the bh->b_this_page linked list to 913 * follow the buffers created. Return NULL if unable to create more 914 * buffers. 915 * 916 * The retry flag is used to differentiate async IO (paging, swapping) 917 * which may not fail from ordinary buffer allocations. 918 */ 919 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 920 gfp_t gfp) 921 { 922 struct buffer_head *bh, *head; 923 long offset; 924 struct mem_cgroup *memcg, *old_memcg; 925 926 /* The folio lock pins the memcg */ 927 memcg = folio_memcg(folio); 928 old_memcg = set_active_memcg(memcg); 929 930 head = NULL; 931 offset = folio_size(folio); 932 while ((offset -= size) >= 0) { 933 bh = alloc_buffer_head(gfp); 934 if (!bh) 935 goto no_grow; 936 937 bh->b_this_page = head; 938 bh->b_blocknr = -1; 939 head = bh; 940 941 bh->b_size = size; 942 943 /* Link the buffer to its folio */ 944 folio_set_bh(bh, folio, offset); 945 } 946 out: 947 set_active_memcg(old_memcg); 948 return head; 949 /* 950 * In case anything failed, we just free everything we got. 951 */ 952 no_grow: 953 if (head) { 954 do { 955 bh = head; 956 head = head->b_this_page; 957 free_buffer_head(bh); 958 } while (head); 959 } 960 961 goto out; 962 } 963 EXPORT_SYMBOL_GPL(folio_alloc_buffers); 964 965 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size) 966 { 967 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 968 969 return folio_alloc_buffers(page_folio(page), size, gfp); 970 } 971 EXPORT_SYMBOL_GPL(alloc_page_buffers); 972 973 static inline void link_dev_buffers(struct folio *folio, 974 struct buffer_head *head) 975 { 976 struct buffer_head *bh, *tail; 977 978 bh = head; 979 do { 980 tail = bh; 981 bh = bh->b_this_page; 982 } while (bh); 983 tail->b_this_page = head; 984 folio_attach_private(folio, head); 985 } 986 987 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 988 { 989 sector_t retval = ~((sector_t)0); 990 loff_t sz = bdev_nr_bytes(bdev); 991 992 if (sz) { 993 unsigned int sizebits = blksize_bits(size); 994 retval = (sz >> sizebits); 995 } 996 return retval; 997 } 998 999 /* 1000 * Initialise the state of a blockdev folio's buffers. 1001 */ 1002 static sector_t folio_init_buffers(struct folio *folio, 1003 struct block_device *bdev, unsigned size) 1004 { 1005 struct buffer_head *head = folio_buffers(folio); 1006 struct buffer_head *bh = head; 1007 bool uptodate = folio_test_uptodate(folio); 1008 sector_t block = div_u64(folio_pos(folio), size); 1009 sector_t end_block = blkdev_max_block(bdev, size); 1010 1011 do { 1012 if (!buffer_mapped(bh)) { 1013 bh->b_end_io = NULL; 1014 bh->b_private = NULL; 1015 bh->b_bdev = bdev; 1016 bh->b_blocknr = block; 1017 if (uptodate) 1018 set_buffer_uptodate(bh); 1019 if (block < end_block) 1020 set_buffer_mapped(bh); 1021 } 1022 block++; 1023 bh = bh->b_this_page; 1024 } while (bh != head); 1025 1026 /* 1027 * Caller needs to validate requested block against end of device. 1028 */ 1029 return end_block; 1030 } 1031 1032 /* 1033 * Create the page-cache folio that contains the requested block. 1034 * 1035 * This is used purely for blockdev mappings. 1036 * 1037 * Returns false if we have a failure which cannot be cured by retrying 1038 * without sleeping. Returns true if we succeeded, or the caller should retry. 1039 */ 1040 static bool grow_dev_folio(struct block_device *bdev, sector_t block, 1041 pgoff_t index, unsigned size, gfp_t gfp) 1042 { 1043 struct address_space *mapping = bdev->bd_mapping; 1044 struct folio *folio; 1045 struct buffer_head *bh; 1046 sector_t end_block = 0; 1047 1048 folio = __filemap_get_folio(mapping, index, 1049 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 1050 if (IS_ERR(folio)) 1051 return false; 1052 1053 bh = folio_buffers(folio); 1054 if (bh) { 1055 if (bh->b_size == size) { 1056 end_block = folio_init_buffers(folio, bdev, size); 1057 goto unlock; 1058 } 1059 1060 /* 1061 * Retrying may succeed; for example the folio may finish 1062 * writeback, or buffers may be cleaned. This should not 1063 * happen very often; maybe we have old buffers attached to 1064 * this blockdev's page cache and we're trying to change 1065 * the block size? 1066 */ 1067 if (!try_to_free_buffers(folio)) { 1068 end_block = ~0ULL; 1069 goto unlock; 1070 } 1071 } 1072 1073 bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); 1074 if (!bh) 1075 goto unlock; 1076 1077 /* 1078 * Link the folio to the buffers and initialise them. Take the 1079 * lock to be atomic wrt __find_get_block(), which does not 1080 * run under the folio lock. 1081 */ 1082 spin_lock(&mapping->i_private_lock); 1083 link_dev_buffers(folio, bh); 1084 end_block = folio_init_buffers(folio, bdev, size); 1085 spin_unlock(&mapping->i_private_lock); 1086 unlock: 1087 folio_unlock(folio); 1088 folio_put(folio); 1089 return block < end_block; 1090 } 1091 1092 /* 1093 * Create buffers for the specified block device block's folio. If 1094 * that folio was dirty, the buffers are set dirty also. Returns false 1095 * if we've hit a permanent error. 1096 */ 1097 static bool grow_buffers(struct block_device *bdev, sector_t block, 1098 unsigned size, gfp_t gfp) 1099 { 1100 loff_t pos; 1101 1102 /* 1103 * Check for a block which lies outside our maximum possible 1104 * pagecache index. 1105 */ 1106 if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) { 1107 printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n", 1108 __func__, (unsigned long long)block, 1109 bdev); 1110 return false; 1111 } 1112 1113 /* Create a folio with the proper size buffers */ 1114 return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp); 1115 } 1116 1117 static struct buffer_head * 1118 __getblk_slow(struct block_device *bdev, sector_t block, 1119 unsigned size, gfp_t gfp) 1120 { 1121 bool blocking = gfpflags_allow_blocking(gfp); 1122 1123 if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) { 1124 printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n", 1125 size, bdev_logical_block_size(bdev)); 1126 return NULL; 1127 } 1128 1129 for (;;) { 1130 struct buffer_head *bh; 1131 1132 if (!grow_buffers(bdev, block, size, gfp)) 1133 return NULL; 1134 1135 if (blocking) 1136 bh = __find_get_block_nonatomic(bdev, block, size); 1137 else 1138 bh = __find_get_block(bdev, block, size); 1139 if (bh) 1140 return bh; 1141 } 1142 } 1143 1144 /* 1145 * The relationship between dirty buffers and dirty pages: 1146 * 1147 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1148 * the page is tagged dirty in the page cache. 1149 * 1150 * At all times, the dirtiness of the buffers represents the dirtiness of 1151 * subsections of the page. If the page has buffers, the page dirty bit is 1152 * merely a hint about the true dirty state. 1153 * 1154 * When a page is set dirty in its entirety, all its buffers are marked dirty 1155 * (if the page has buffers). 1156 * 1157 * When a buffer is marked dirty, its page is dirtied, but the page's other 1158 * buffers are not. 1159 * 1160 * Also. When blockdev buffers are explicitly read with bread(), they 1161 * individually become uptodate. But their backing page remains not 1162 * uptodate - even if all of its buffers are uptodate. A subsequent 1163 * block_read_full_folio() against that folio will discover all the uptodate 1164 * buffers, will set the folio uptodate and will perform no I/O. 1165 */ 1166 1167 /** 1168 * mark_buffer_dirty - mark a buffer_head as needing writeout 1169 * @bh: the buffer_head to mark dirty 1170 * 1171 * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1172 * its backing page dirty, then tag the page as dirty in the page cache 1173 * and then attach the address_space's inode to its superblock's dirty 1174 * inode list. 1175 * 1176 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock, 1177 * i_pages lock and mapping->host->i_lock. 1178 */ 1179 void mark_buffer_dirty(struct buffer_head *bh) 1180 { 1181 WARN_ON_ONCE(!buffer_uptodate(bh)); 1182 1183 trace_block_dirty_buffer(bh); 1184 1185 /* 1186 * Very *carefully* optimize the it-is-already-dirty case. 1187 * 1188 * Don't let the final "is it dirty" escape to before we 1189 * perhaps modified the buffer. 1190 */ 1191 if (buffer_dirty(bh)) { 1192 smp_mb(); 1193 if (buffer_dirty(bh)) 1194 return; 1195 } 1196 1197 if (!test_set_buffer_dirty(bh)) { 1198 struct folio *folio = bh->b_folio; 1199 struct address_space *mapping = NULL; 1200 1201 if (!folio_test_set_dirty(folio)) { 1202 mapping = folio->mapping; 1203 if (mapping) 1204 __folio_mark_dirty(folio, mapping, 0); 1205 } 1206 if (mapping) 1207 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1208 } 1209 } 1210 EXPORT_SYMBOL(mark_buffer_dirty); 1211 1212 void mark_buffer_write_io_error(struct buffer_head *bh) 1213 { 1214 set_buffer_write_io_error(bh); 1215 /* FIXME: do we need to set this in both places? */ 1216 if (bh->b_folio && bh->b_folio->mapping) 1217 mapping_set_error(bh->b_folio->mapping, -EIO); 1218 if (bh->b_assoc_map) 1219 mapping_set_error(bh->b_assoc_map, -EIO); 1220 } 1221 EXPORT_SYMBOL(mark_buffer_write_io_error); 1222 1223 /** 1224 * __brelse - Release a buffer. 1225 * @bh: The buffer to release. 1226 * 1227 * This variant of brelse() can be called if @bh is guaranteed to not be NULL. 1228 */ 1229 void __brelse(struct buffer_head *bh) 1230 { 1231 if (atomic_read(&bh->b_count)) { 1232 put_bh(bh); 1233 return; 1234 } 1235 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1236 } 1237 EXPORT_SYMBOL(__brelse); 1238 1239 /** 1240 * __bforget - Discard any dirty data in a buffer. 1241 * @bh: The buffer to forget. 1242 * 1243 * This variant of bforget() can be called if @bh is guaranteed to not 1244 * be NULL. 1245 */ 1246 void __bforget(struct buffer_head *bh) 1247 { 1248 clear_buffer_dirty(bh); 1249 if (bh->b_assoc_map) { 1250 struct address_space *buffer_mapping = bh->b_folio->mapping; 1251 1252 spin_lock(&buffer_mapping->i_private_lock); 1253 list_del_init(&bh->b_assoc_buffers); 1254 bh->b_assoc_map = NULL; 1255 spin_unlock(&buffer_mapping->i_private_lock); 1256 } 1257 __brelse(bh); 1258 } 1259 EXPORT_SYMBOL(__bforget); 1260 1261 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1262 { 1263 lock_buffer(bh); 1264 if (buffer_uptodate(bh)) { 1265 unlock_buffer(bh); 1266 return bh; 1267 } else { 1268 get_bh(bh); 1269 bh->b_end_io = end_buffer_read_sync; 1270 submit_bh(REQ_OP_READ, bh); 1271 wait_on_buffer(bh); 1272 if (buffer_uptodate(bh)) 1273 return bh; 1274 } 1275 brelse(bh); 1276 return NULL; 1277 } 1278 1279 /* 1280 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1281 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1282 * refcount elevated by one when they're in an LRU. A buffer can only appear 1283 * once in a particular CPU's LRU. A single buffer can be present in multiple 1284 * CPU's LRUs at the same time. 1285 * 1286 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1287 * sb_find_get_block(). 1288 * 1289 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1290 * a local interrupt disable for that. 1291 */ 1292 1293 #define BH_LRU_SIZE 16 1294 1295 struct bh_lru { 1296 struct buffer_head *bhs[BH_LRU_SIZE]; 1297 }; 1298 1299 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1300 1301 #ifdef CONFIG_SMP 1302 #define bh_lru_lock() local_irq_disable() 1303 #define bh_lru_unlock() local_irq_enable() 1304 #else 1305 #define bh_lru_lock() preempt_disable() 1306 #define bh_lru_unlock() preempt_enable() 1307 #endif 1308 1309 static inline void check_irqs_on(void) 1310 { 1311 #ifdef irqs_disabled 1312 BUG_ON(irqs_disabled()); 1313 #endif 1314 } 1315 1316 /* 1317 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1318 * inserted at the front, and the buffer_head at the back if any is evicted. 1319 * Or, if already in the LRU it is moved to the front. 1320 */ 1321 static void bh_lru_install(struct buffer_head *bh) 1322 { 1323 struct buffer_head *evictee = bh; 1324 struct bh_lru *b; 1325 int i; 1326 1327 check_irqs_on(); 1328 bh_lru_lock(); 1329 1330 /* 1331 * the refcount of buffer_head in bh_lru prevents dropping the 1332 * attached page(i.e., try_to_free_buffers) so it could cause 1333 * failing page migration. 1334 * Skip putting upcoming bh into bh_lru until migration is done. 1335 */ 1336 if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) { 1337 bh_lru_unlock(); 1338 return; 1339 } 1340 1341 b = this_cpu_ptr(&bh_lrus); 1342 for (i = 0; i < BH_LRU_SIZE; i++) { 1343 swap(evictee, b->bhs[i]); 1344 if (evictee == bh) { 1345 bh_lru_unlock(); 1346 return; 1347 } 1348 } 1349 1350 get_bh(bh); 1351 bh_lru_unlock(); 1352 brelse(evictee); 1353 } 1354 1355 /* 1356 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1357 */ 1358 static struct buffer_head * 1359 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1360 { 1361 struct buffer_head *ret = NULL; 1362 unsigned int i; 1363 1364 check_irqs_on(); 1365 bh_lru_lock(); 1366 if (cpu_is_isolated(smp_processor_id())) { 1367 bh_lru_unlock(); 1368 return NULL; 1369 } 1370 for (i = 0; i < BH_LRU_SIZE; i++) { 1371 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1372 1373 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1374 bh->b_size == size) { 1375 if (i) { 1376 while (i) { 1377 __this_cpu_write(bh_lrus.bhs[i], 1378 __this_cpu_read(bh_lrus.bhs[i - 1])); 1379 i--; 1380 } 1381 __this_cpu_write(bh_lrus.bhs[0], bh); 1382 } 1383 get_bh(bh); 1384 ret = bh; 1385 break; 1386 } 1387 } 1388 bh_lru_unlock(); 1389 return ret; 1390 } 1391 1392 /* 1393 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1394 * it in the LRU and mark it as accessed. If it is not present then return 1395 * NULL. Atomic context callers may also return NULL if the buffer is being 1396 * migrated; similarly the page is not marked accessed either. 1397 */ 1398 static struct buffer_head * 1399 find_get_block_common(struct block_device *bdev, sector_t block, 1400 unsigned size, bool atomic) 1401 { 1402 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1403 1404 if (bh == NULL) { 1405 /* __find_get_block_slow will mark the page accessed */ 1406 bh = __find_get_block_slow(bdev, block, atomic); 1407 if (bh) 1408 bh_lru_install(bh); 1409 } else 1410 touch_buffer(bh); 1411 1412 return bh; 1413 } 1414 1415 struct buffer_head * 1416 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1417 { 1418 return find_get_block_common(bdev, block, size, true); 1419 } 1420 EXPORT_SYMBOL(__find_get_block); 1421 1422 /* same as __find_get_block() but allows sleeping contexts */ 1423 struct buffer_head * 1424 __find_get_block_nonatomic(struct block_device *bdev, sector_t block, 1425 unsigned size) 1426 { 1427 return find_get_block_common(bdev, block, size, false); 1428 } 1429 EXPORT_SYMBOL(__find_get_block_nonatomic); 1430 1431 /** 1432 * bdev_getblk - Get a buffer_head in a block device's buffer cache. 1433 * @bdev: The block device. 1434 * @block: The block number. 1435 * @size: The size of buffer_heads for this @bdev. 1436 * @gfp: The memory allocation flags to use. 1437 * 1438 * The returned buffer head has its reference count incremented, but is 1439 * not locked. The caller should call brelse() when it has finished 1440 * with the buffer. The buffer may not be uptodate. If needed, the 1441 * caller can bring it uptodate either by reading it or overwriting it. 1442 * 1443 * Return: The buffer head, or NULL if memory could not be allocated. 1444 */ 1445 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 1446 unsigned size, gfp_t gfp) 1447 { 1448 struct buffer_head *bh; 1449 1450 if (gfpflags_allow_blocking(gfp)) 1451 bh = __find_get_block_nonatomic(bdev, block, size); 1452 else 1453 bh = __find_get_block(bdev, block, size); 1454 1455 might_alloc(gfp); 1456 if (bh) 1457 return bh; 1458 1459 return __getblk_slow(bdev, block, size, gfp); 1460 } 1461 EXPORT_SYMBOL(bdev_getblk); 1462 1463 /* 1464 * Do async read-ahead on a buffer.. 1465 */ 1466 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1467 { 1468 struct buffer_head *bh = bdev_getblk(bdev, block, size, 1469 GFP_NOWAIT | __GFP_MOVABLE); 1470 1471 if (likely(bh)) { 1472 bh_readahead(bh, REQ_RAHEAD); 1473 brelse(bh); 1474 } 1475 } 1476 EXPORT_SYMBOL(__breadahead); 1477 1478 /** 1479 * __bread_gfp() - Read a block. 1480 * @bdev: The block device to read from. 1481 * @block: Block number in units of block size. 1482 * @size: The block size of this device in bytes. 1483 * @gfp: Not page allocation flags; see below. 1484 * 1485 * You are not expected to call this function. You should use one of 1486 * sb_bread(), sb_bread_unmovable() or __bread(). 1487 * 1488 * Read a specified block, and return the buffer head that refers to it. 1489 * If @gfp is 0, the memory will be allocated using the block device's 1490 * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be 1491 * allocated from a movable area. Do not pass in a complete set of 1492 * GFP flags. 1493 * 1494 * The returned buffer head has its refcount increased. The caller should 1495 * call brelse() when it has finished with the buffer. 1496 * 1497 * Context: May sleep waiting for I/O. 1498 * Return: NULL if the block was unreadable. 1499 */ 1500 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block, 1501 unsigned size, gfp_t gfp) 1502 { 1503 struct buffer_head *bh; 1504 1505 gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); 1506 1507 /* 1508 * Prefer looping in the allocator rather than here, at least that 1509 * code knows what it's doing. 1510 */ 1511 gfp |= __GFP_NOFAIL; 1512 1513 bh = bdev_getblk(bdev, block, size, gfp); 1514 1515 if (likely(bh) && !buffer_uptodate(bh)) 1516 bh = __bread_slow(bh); 1517 return bh; 1518 } 1519 EXPORT_SYMBOL(__bread_gfp); 1520 1521 static void __invalidate_bh_lrus(struct bh_lru *b) 1522 { 1523 int i; 1524 1525 for (i = 0; i < BH_LRU_SIZE; i++) { 1526 brelse(b->bhs[i]); 1527 b->bhs[i] = NULL; 1528 } 1529 } 1530 /* 1531 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1532 * This doesn't race because it runs in each cpu either in irq 1533 * or with preempt disabled. 1534 */ 1535 static void invalidate_bh_lru(void *arg) 1536 { 1537 struct bh_lru *b = &get_cpu_var(bh_lrus); 1538 1539 __invalidate_bh_lrus(b); 1540 put_cpu_var(bh_lrus); 1541 } 1542 1543 bool has_bh_in_lru(int cpu, void *dummy) 1544 { 1545 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1546 int i; 1547 1548 for (i = 0; i < BH_LRU_SIZE; i++) { 1549 if (b->bhs[i]) 1550 return true; 1551 } 1552 1553 return false; 1554 } 1555 1556 void invalidate_bh_lrus(void) 1557 { 1558 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 1559 } 1560 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1561 1562 /* 1563 * It's called from workqueue context so we need a bh_lru_lock to close 1564 * the race with preemption/irq. 1565 */ 1566 void invalidate_bh_lrus_cpu(void) 1567 { 1568 struct bh_lru *b; 1569 1570 bh_lru_lock(); 1571 b = this_cpu_ptr(&bh_lrus); 1572 __invalidate_bh_lrus(b); 1573 bh_lru_unlock(); 1574 } 1575 1576 void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1577 unsigned long offset) 1578 { 1579 bh->b_folio = folio; 1580 BUG_ON(offset >= folio_size(folio)); 1581 if (folio_test_highmem(folio)) 1582 /* 1583 * This catches illegal uses and preserves the offset: 1584 */ 1585 bh->b_data = (char *)(0 + offset); 1586 else 1587 bh->b_data = folio_address(folio) + offset; 1588 } 1589 EXPORT_SYMBOL(folio_set_bh); 1590 1591 /* 1592 * Called when truncating a buffer on a page completely. 1593 */ 1594 1595 /* Bits that are cleared during an invalidate */ 1596 #define BUFFER_FLAGS_DISCARD \ 1597 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1598 1 << BH_Delay | 1 << BH_Unwritten) 1599 1600 static void discard_buffer(struct buffer_head * bh) 1601 { 1602 unsigned long b_state; 1603 1604 lock_buffer(bh); 1605 clear_buffer_dirty(bh); 1606 bh->b_bdev = NULL; 1607 b_state = READ_ONCE(bh->b_state); 1608 do { 1609 } while (!try_cmpxchg_relaxed(&bh->b_state, &b_state, 1610 b_state & ~BUFFER_FLAGS_DISCARD)); 1611 unlock_buffer(bh); 1612 } 1613 1614 /** 1615 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 1616 * @folio: The folio which is affected. 1617 * @offset: start of the range to invalidate 1618 * @length: length of the range to invalidate 1619 * 1620 * block_invalidate_folio() is called when all or part of the folio has been 1621 * invalidated by a truncate operation. 1622 * 1623 * block_invalidate_folio() does not have to release all buffers, but it must 1624 * ensure that no dirty buffer is left outside @offset and that no I/O 1625 * is underway against any of the blocks which are outside the truncation 1626 * point. Because the caller is about to free (and possibly reuse) those 1627 * blocks on-disk. 1628 */ 1629 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 1630 { 1631 struct buffer_head *head, *bh, *next; 1632 size_t curr_off = 0; 1633 size_t stop = length + offset; 1634 1635 BUG_ON(!folio_test_locked(folio)); 1636 1637 /* 1638 * Check for overflow 1639 */ 1640 BUG_ON(stop > folio_size(folio) || stop < length); 1641 1642 head = folio_buffers(folio); 1643 if (!head) 1644 return; 1645 1646 bh = head; 1647 do { 1648 size_t next_off = curr_off + bh->b_size; 1649 next = bh->b_this_page; 1650 1651 /* 1652 * Are we still fully in range ? 1653 */ 1654 if (next_off > stop) 1655 goto out; 1656 1657 /* 1658 * is this block fully invalidated? 1659 */ 1660 if (offset <= curr_off) 1661 discard_buffer(bh); 1662 curr_off = next_off; 1663 bh = next; 1664 } while (bh != head); 1665 1666 /* 1667 * We release buffers only if the entire folio is being invalidated. 1668 * The get_block cached value has been unconditionally invalidated, 1669 * so real IO is not possible anymore. 1670 */ 1671 if (length == folio_size(folio)) 1672 filemap_release_folio(folio, 0); 1673 out: 1674 folio_clear_mappedtodisk(folio); 1675 } 1676 EXPORT_SYMBOL(block_invalidate_folio); 1677 1678 /* 1679 * We attach and possibly dirty the buffers atomically wrt 1680 * block_dirty_folio() via i_private_lock. try_to_free_buffers 1681 * is already excluded via the folio lock. 1682 */ 1683 struct buffer_head *create_empty_buffers(struct folio *folio, 1684 unsigned long blocksize, unsigned long b_state) 1685 { 1686 struct buffer_head *bh, *head, *tail; 1687 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL; 1688 1689 head = folio_alloc_buffers(folio, blocksize, gfp); 1690 bh = head; 1691 do { 1692 bh->b_state |= b_state; 1693 tail = bh; 1694 bh = bh->b_this_page; 1695 } while (bh); 1696 tail->b_this_page = head; 1697 1698 spin_lock(&folio->mapping->i_private_lock); 1699 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 1700 bh = head; 1701 do { 1702 if (folio_test_dirty(folio)) 1703 set_buffer_dirty(bh); 1704 if (folio_test_uptodate(folio)) 1705 set_buffer_uptodate(bh); 1706 bh = bh->b_this_page; 1707 } while (bh != head); 1708 } 1709 folio_attach_private(folio, head); 1710 spin_unlock(&folio->mapping->i_private_lock); 1711 1712 return head; 1713 } 1714 EXPORT_SYMBOL(create_empty_buffers); 1715 1716 /** 1717 * clean_bdev_aliases: clean a range of buffers in block device 1718 * @bdev: Block device to clean buffers in 1719 * @block: Start of a range of blocks to clean 1720 * @len: Number of blocks to clean 1721 * 1722 * We are taking a range of blocks for data and we don't want writeback of any 1723 * buffer-cache aliases starting from return from this function and until the 1724 * moment when something will explicitly mark the buffer dirty (hopefully that 1725 * will not happen until we will free that block ;-) We don't even need to mark 1726 * it not-uptodate - nobody can expect anything from a newly allocated buffer 1727 * anyway. We used to use unmap_buffer() for such invalidation, but that was 1728 * wrong. We definitely don't want to mark the alias unmapped, for example - it 1729 * would confuse anyone who might pick it with bread() afterwards... 1730 * 1731 * Also.. Note that bforget() doesn't lock the buffer. So there can be 1732 * writeout I/O going on against recently-freed buffers. We don't wait on that 1733 * I/O in bforget() - it's more efficient to wait on the I/O only if we really 1734 * need to. That happens here. 1735 */ 1736 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1737 { 1738 struct address_space *bd_mapping = bdev->bd_mapping; 1739 const int blkbits = bd_mapping->host->i_blkbits; 1740 struct folio_batch fbatch; 1741 pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE; 1742 pgoff_t end; 1743 int i, count; 1744 struct buffer_head *bh; 1745 struct buffer_head *head; 1746 1747 end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE; 1748 folio_batch_init(&fbatch); 1749 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 1750 count = folio_batch_count(&fbatch); 1751 for (i = 0; i < count; i++) { 1752 struct folio *folio = fbatch.folios[i]; 1753 1754 if (!folio_buffers(folio)) 1755 continue; 1756 /* 1757 * We use folio lock instead of bd_mapping->i_private_lock 1758 * to pin buffers here since we can afford to sleep and 1759 * it scales better than a global spinlock lock. 1760 */ 1761 folio_lock(folio); 1762 /* Recheck when the folio is locked which pins bhs */ 1763 head = folio_buffers(folio); 1764 if (!head) 1765 goto unlock_page; 1766 bh = head; 1767 do { 1768 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 1769 goto next; 1770 if (bh->b_blocknr >= block + len) 1771 break; 1772 clear_buffer_dirty(bh); 1773 wait_on_buffer(bh); 1774 clear_buffer_req(bh); 1775 next: 1776 bh = bh->b_this_page; 1777 } while (bh != head); 1778 unlock_page: 1779 folio_unlock(folio); 1780 } 1781 folio_batch_release(&fbatch); 1782 cond_resched(); 1783 /* End of range already reached? */ 1784 if (index > end || !index) 1785 break; 1786 } 1787 } 1788 EXPORT_SYMBOL(clean_bdev_aliases); 1789 1790 static struct buffer_head *folio_create_buffers(struct folio *folio, 1791 struct inode *inode, 1792 unsigned int b_state) 1793 { 1794 struct buffer_head *bh; 1795 1796 BUG_ON(!folio_test_locked(folio)); 1797 1798 bh = folio_buffers(folio); 1799 if (!bh) 1800 bh = create_empty_buffers(folio, 1801 1 << READ_ONCE(inode->i_blkbits), b_state); 1802 return bh; 1803 } 1804 1805 /* 1806 * NOTE! All mapped/uptodate combinations are valid: 1807 * 1808 * Mapped Uptodate Meaning 1809 * 1810 * No No "unknown" - must do get_block() 1811 * No Yes "hole" - zero-filled 1812 * Yes No "allocated" - allocated on disk, not read in 1813 * Yes Yes "valid" - allocated and up-to-date in memory. 1814 * 1815 * "Dirty" is valid only with the last case (mapped+uptodate). 1816 */ 1817 1818 /* 1819 * While block_write_full_folio is writing back the dirty buffers under 1820 * the page lock, whoever dirtied the buffers may decide to clean them 1821 * again at any time. We handle that by only looking at the buffer 1822 * state inside lock_buffer(). 1823 * 1824 * If block_write_full_folio() is called for regular writeback 1825 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1826 * locked buffer. This only can happen if someone has written the buffer 1827 * directly, with submit_bh(). At the address_space level PageWriteback 1828 * prevents this contention from occurring. 1829 * 1830 * If block_write_full_folio() is called with wbc->sync_mode == 1831 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1832 * causes the writes to be flagged as synchronous writes. 1833 */ 1834 int __block_write_full_folio(struct inode *inode, struct folio *folio, 1835 get_block_t *get_block, struct writeback_control *wbc) 1836 { 1837 int err; 1838 sector_t block; 1839 sector_t last_block; 1840 struct buffer_head *bh, *head; 1841 size_t blocksize; 1842 int nr_underway = 0; 1843 blk_opf_t write_flags = wbc_to_write_flags(wbc); 1844 1845 head = folio_create_buffers(folio, inode, 1846 (1 << BH_Dirty) | (1 << BH_Uptodate)); 1847 1848 /* 1849 * Be very careful. We have no exclusion from block_dirty_folio 1850 * here, and the (potentially unmapped) buffers may become dirty at 1851 * any time. If a buffer becomes dirty here after we've inspected it 1852 * then we just miss that fact, and the folio stays dirty. 1853 * 1854 * Buffers outside i_size may be dirtied by block_dirty_folio; 1855 * handle that here by just cleaning them. 1856 */ 1857 1858 bh = head; 1859 blocksize = bh->b_size; 1860 1861 block = div_u64(folio_pos(folio), blocksize); 1862 last_block = div_u64(i_size_read(inode) - 1, blocksize); 1863 1864 /* 1865 * Get all the dirty buffers mapped to disk addresses and 1866 * handle any aliases from the underlying blockdev's mapping. 1867 */ 1868 do { 1869 if (block > last_block) { 1870 /* 1871 * mapped buffers outside i_size will occur, because 1872 * this folio can be outside i_size when there is a 1873 * truncate in progress. 1874 */ 1875 /* 1876 * The buffer was zeroed by block_write_full_folio() 1877 */ 1878 clear_buffer_dirty(bh); 1879 set_buffer_uptodate(bh); 1880 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1881 buffer_dirty(bh)) { 1882 WARN_ON(bh->b_size != blocksize); 1883 err = get_block(inode, block, bh, 1); 1884 if (err) 1885 goto recover; 1886 clear_buffer_delay(bh); 1887 if (buffer_new(bh)) { 1888 /* blockdev mappings never come here */ 1889 clear_buffer_new(bh); 1890 clean_bdev_bh_alias(bh); 1891 } 1892 } 1893 bh = bh->b_this_page; 1894 block++; 1895 } while (bh != head); 1896 1897 do { 1898 if (!buffer_mapped(bh)) 1899 continue; 1900 /* 1901 * If it's a fully non-blocking write attempt and we cannot 1902 * lock the buffer then redirty the folio. Note that this can 1903 * potentially cause a busy-wait loop from writeback threads 1904 * and kswapd activity, but those code paths have their own 1905 * higher-level throttling. 1906 */ 1907 if (wbc->sync_mode != WB_SYNC_NONE) { 1908 lock_buffer(bh); 1909 } else if (!trylock_buffer(bh)) { 1910 folio_redirty_for_writepage(wbc, folio); 1911 continue; 1912 } 1913 if (test_clear_buffer_dirty(bh)) { 1914 mark_buffer_async_write_endio(bh, 1915 end_buffer_async_write); 1916 } else { 1917 unlock_buffer(bh); 1918 } 1919 } while ((bh = bh->b_this_page) != head); 1920 1921 /* 1922 * The folio and its buffers are protected by the writeback flag, 1923 * so we can drop the bh refcounts early. 1924 */ 1925 BUG_ON(folio_test_writeback(folio)); 1926 folio_start_writeback(folio); 1927 1928 do { 1929 struct buffer_head *next = bh->b_this_page; 1930 if (buffer_async_write(bh)) { 1931 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 1932 inode->i_write_hint, wbc); 1933 nr_underway++; 1934 } 1935 bh = next; 1936 } while (bh != head); 1937 folio_unlock(folio); 1938 1939 err = 0; 1940 done: 1941 if (nr_underway == 0) { 1942 /* 1943 * The folio was marked dirty, but the buffers were 1944 * clean. Someone wrote them back by hand with 1945 * write_dirty_buffer/submit_bh. A rare case. 1946 */ 1947 folio_end_writeback(folio); 1948 1949 /* 1950 * The folio and buffer_heads can be released at any time from 1951 * here on. 1952 */ 1953 } 1954 return err; 1955 1956 recover: 1957 /* 1958 * ENOSPC, or some other error. We may already have added some 1959 * blocks to the file, so we need to write these out to avoid 1960 * exposing stale data. 1961 * The folio is currently locked and not marked for writeback 1962 */ 1963 bh = head; 1964 /* Recovery: lock and submit the mapped buffers */ 1965 do { 1966 if (buffer_mapped(bh) && buffer_dirty(bh) && 1967 !buffer_delay(bh)) { 1968 lock_buffer(bh); 1969 mark_buffer_async_write_endio(bh, 1970 end_buffer_async_write); 1971 } else { 1972 /* 1973 * The buffer may have been set dirty during 1974 * attachment to a dirty folio. 1975 */ 1976 clear_buffer_dirty(bh); 1977 } 1978 } while ((bh = bh->b_this_page) != head); 1979 BUG_ON(folio_test_writeback(folio)); 1980 mapping_set_error(folio->mapping, err); 1981 folio_start_writeback(folio); 1982 do { 1983 struct buffer_head *next = bh->b_this_page; 1984 if (buffer_async_write(bh)) { 1985 clear_buffer_dirty(bh); 1986 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 1987 inode->i_write_hint, wbc); 1988 nr_underway++; 1989 } 1990 bh = next; 1991 } while (bh != head); 1992 folio_unlock(folio); 1993 goto done; 1994 } 1995 EXPORT_SYMBOL(__block_write_full_folio); 1996 1997 /* 1998 * If a folio has any new buffers, zero them out here, and mark them uptodate 1999 * and dirty so they'll be written out (in order to prevent uninitialised 2000 * block data from leaking). And clear the new bit. 2001 */ 2002 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) 2003 { 2004 size_t block_start, block_end; 2005 struct buffer_head *head, *bh; 2006 2007 BUG_ON(!folio_test_locked(folio)); 2008 head = folio_buffers(folio); 2009 if (!head) 2010 return; 2011 2012 bh = head; 2013 block_start = 0; 2014 do { 2015 block_end = block_start + bh->b_size; 2016 2017 if (buffer_new(bh)) { 2018 if (block_end > from && block_start < to) { 2019 if (!folio_test_uptodate(folio)) { 2020 size_t start, xend; 2021 2022 start = max(from, block_start); 2023 xend = min(to, block_end); 2024 2025 folio_zero_segment(folio, start, xend); 2026 set_buffer_uptodate(bh); 2027 } 2028 2029 clear_buffer_new(bh); 2030 mark_buffer_dirty(bh); 2031 } 2032 } 2033 2034 block_start = block_end; 2035 bh = bh->b_this_page; 2036 } while (bh != head); 2037 } 2038 EXPORT_SYMBOL(folio_zero_new_buffers); 2039 2040 static int 2041 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 2042 const struct iomap *iomap) 2043 { 2044 loff_t offset = (loff_t)block << inode->i_blkbits; 2045 2046 bh->b_bdev = iomap->bdev; 2047 2048 /* 2049 * Block points to offset in file we need to map, iomap contains 2050 * the offset at which the map starts. If the map ends before the 2051 * current block, then do not map the buffer and let the caller 2052 * handle it. 2053 */ 2054 if (offset >= iomap->offset + iomap->length) 2055 return -EIO; 2056 2057 switch (iomap->type) { 2058 case IOMAP_HOLE: 2059 /* 2060 * If the buffer is not up to date or beyond the current EOF, 2061 * we need to mark it as new to ensure sub-block zeroing is 2062 * executed if necessary. 2063 */ 2064 if (!buffer_uptodate(bh) || 2065 (offset >= i_size_read(inode))) 2066 set_buffer_new(bh); 2067 return 0; 2068 case IOMAP_DELALLOC: 2069 if (!buffer_uptodate(bh) || 2070 (offset >= i_size_read(inode))) 2071 set_buffer_new(bh); 2072 set_buffer_uptodate(bh); 2073 set_buffer_mapped(bh); 2074 set_buffer_delay(bh); 2075 return 0; 2076 case IOMAP_UNWRITTEN: 2077 /* 2078 * For unwritten regions, we always need to ensure that regions 2079 * in the block we are not writing to are zeroed. Mark the 2080 * buffer as new to ensure this. 2081 */ 2082 set_buffer_new(bh); 2083 set_buffer_unwritten(bh); 2084 fallthrough; 2085 case IOMAP_MAPPED: 2086 if ((iomap->flags & IOMAP_F_NEW) || 2087 offset >= i_size_read(inode)) { 2088 /* 2089 * This can happen if truncating the block device races 2090 * with the check in the caller as i_size updates on 2091 * block devices aren't synchronized by i_rwsem for 2092 * block devices. 2093 */ 2094 if (S_ISBLK(inode->i_mode)) 2095 return -EIO; 2096 set_buffer_new(bh); 2097 } 2098 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 2099 inode->i_blkbits; 2100 set_buffer_mapped(bh); 2101 return 0; 2102 default: 2103 WARN_ON_ONCE(1); 2104 return -EIO; 2105 } 2106 } 2107 2108 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 2109 get_block_t *get_block, const struct iomap *iomap) 2110 { 2111 size_t from = offset_in_folio(folio, pos); 2112 size_t to = from + len; 2113 struct inode *inode = folio->mapping->host; 2114 size_t block_start, block_end; 2115 sector_t block; 2116 int err = 0; 2117 size_t blocksize; 2118 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 2119 2120 BUG_ON(!folio_test_locked(folio)); 2121 BUG_ON(to > folio_size(folio)); 2122 BUG_ON(from > to); 2123 2124 head = folio_create_buffers(folio, inode, 0); 2125 blocksize = head->b_size; 2126 block = div_u64(folio_pos(folio), blocksize); 2127 2128 for (bh = head, block_start = 0; bh != head || !block_start; 2129 block++, block_start=block_end, bh = bh->b_this_page) { 2130 block_end = block_start + blocksize; 2131 if (block_end <= from || block_start >= to) { 2132 if (folio_test_uptodate(folio)) { 2133 if (!buffer_uptodate(bh)) 2134 set_buffer_uptodate(bh); 2135 } 2136 continue; 2137 } 2138 if (buffer_new(bh)) 2139 clear_buffer_new(bh); 2140 if (!buffer_mapped(bh)) { 2141 WARN_ON(bh->b_size != blocksize); 2142 if (get_block) 2143 err = get_block(inode, block, bh, 1); 2144 else 2145 err = iomap_to_bh(inode, block, bh, iomap); 2146 if (err) 2147 break; 2148 2149 if (buffer_new(bh)) { 2150 clean_bdev_bh_alias(bh); 2151 if (folio_test_uptodate(folio)) { 2152 clear_buffer_new(bh); 2153 set_buffer_uptodate(bh); 2154 mark_buffer_dirty(bh); 2155 continue; 2156 } 2157 if (block_end > to || block_start < from) 2158 folio_zero_segments(folio, 2159 to, block_end, 2160 block_start, from); 2161 continue; 2162 } 2163 } 2164 if (folio_test_uptodate(folio)) { 2165 if (!buffer_uptodate(bh)) 2166 set_buffer_uptodate(bh); 2167 continue; 2168 } 2169 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2170 !buffer_unwritten(bh) && 2171 (block_start < from || block_end > to)) { 2172 bh_read_nowait(bh, 0); 2173 *wait_bh++=bh; 2174 } 2175 } 2176 /* 2177 * If we issued read requests - let them complete. 2178 */ 2179 while(wait_bh > wait) { 2180 wait_on_buffer(*--wait_bh); 2181 if (!buffer_uptodate(*wait_bh)) 2182 err = -EIO; 2183 } 2184 if (unlikely(err)) 2185 folio_zero_new_buffers(folio, from, to); 2186 return err; 2187 } 2188 2189 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, 2190 get_block_t *get_block) 2191 { 2192 return __block_write_begin_int(folio, pos, len, get_block, NULL); 2193 } 2194 EXPORT_SYMBOL(__block_write_begin); 2195 2196 void block_commit_write(struct folio *folio, size_t from, size_t to) 2197 { 2198 size_t block_start, block_end; 2199 bool partial = false; 2200 unsigned blocksize; 2201 struct buffer_head *bh, *head; 2202 2203 bh = head = folio_buffers(folio); 2204 if (!bh) 2205 return; 2206 blocksize = bh->b_size; 2207 2208 block_start = 0; 2209 do { 2210 block_end = block_start + blocksize; 2211 if (block_end <= from || block_start >= to) { 2212 if (!buffer_uptodate(bh)) 2213 partial = true; 2214 } else { 2215 set_buffer_uptodate(bh); 2216 mark_buffer_dirty(bh); 2217 } 2218 if (buffer_new(bh)) 2219 clear_buffer_new(bh); 2220 2221 block_start = block_end; 2222 bh = bh->b_this_page; 2223 } while (bh != head); 2224 2225 /* 2226 * If this is a partial write which happened to make all buffers 2227 * uptodate then we can optimize away a bogus read_folio() for 2228 * the next read(). Here we 'discover' whether the folio went 2229 * uptodate as a result of this (potentially partial) write. 2230 */ 2231 if (!partial) 2232 folio_mark_uptodate(folio); 2233 } 2234 EXPORT_SYMBOL(block_commit_write); 2235 2236 /* 2237 * block_write_begin takes care of the basic task of block allocation and 2238 * bringing partial write blocks uptodate first. 2239 * 2240 * The filesystem needs to handle block truncation upon failure. 2241 */ 2242 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2243 struct folio **foliop, get_block_t *get_block) 2244 { 2245 pgoff_t index = pos >> PAGE_SHIFT; 2246 struct folio *folio; 2247 int status; 2248 2249 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, 2250 mapping_gfp_mask(mapping)); 2251 if (IS_ERR(folio)) 2252 return PTR_ERR(folio); 2253 2254 status = __block_write_begin_int(folio, pos, len, get_block, NULL); 2255 if (unlikely(status)) { 2256 folio_unlock(folio); 2257 folio_put(folio); 2258 folio = NULL; 2259 } 2260 2261 *foliop = folio; 2262 return status; 2263 } 2264 EXPORT_SYMBOL(block_write_begin); 2265 2266 int block_write_end(loff_t pos, unsigned len, unsigned copied, 2267 struct folio *folio) 2268 { 2269 size_t start = pos - folio_pos(folio); 2270 2271 if (unlikely(copied < len)) { 2272 /* 2273 * The buffers that were written will now be uptodate, so 2274 * we don't have to worry about a read_folio reading them 2275 * and overwriting a partial write. However if we have 2276 * encountered a short write and only partially written 2277 * into a buffer, it will not be marked uptodate, so a 2278 * read_folio might come in and destroy our partial write. 2279 * 2280 * Do the simplest thing, and just treat any short write to a 2281 * non uptodate folio as a zero-length write, and force the 2282 * caller to redo the whole thing. 2283 */ 2284 if (!folio_test_uptodate(folio)) 2285 copied = 0; 2286 2287 folio_zero_new_buffers(folio, start+copied, start+len); 2288 } 2289 flush_dcache_folio(folio); 2290 2291 /* This could be a short (even 0-length) commit */ 2292 block_commit_write(folio, start, start + copied); 2293 2294 return copied; 2295 } 2296 EXPORT_SYMBOL(block_write_end); 2297 2298 int generic_write_end(const struct kiocb *iocb, struct address_space *mapping, 2299 loff_t pos, unsigned len, unsigned copied, 2300 struct folio *folio, void *fsdata) 2301 { 2302 struct inode *inode = mapping->host; 2303 loff_t old_size = inode->i_size; 2304 bool i_size_changed = false; 2305 2306 copied = block_write_end(pos, len, copied, folio); 2307 2308 /* 2309 * No need to use i_size_read() here, the i_size cannot change under us 2310 * because we hold i_rwsem. 2311 * 2312 * But it's important to update i_size while still holding folio lock: 2313 * page writeout could otherwise come in and zero beyond i_size. 2314 */ 2315 if (pos + copied > inode->i_size) { 2316 i_size_write(inode, pos + copied); 2317 i_size_changed = true; 2318 } 2319 2320 folio_unlock(folio); 2321 folio_put(folio); 2322 2323 if (old_size < pos) 2324 pagecache_isize_extended(inode, old_size, pos); 2325 /* 2326 * Don't mark the inode dirty under page lock. First, it unnecessarily 2327 * makes the holding time of page lock longer. Second, it forces lock 2328 * ordering of page lock and transaction start for journaling 2329 * filesystems. 2330 */ 2331 if (i_size_changed) 2332 mark_inode_dirty(inode); 2333 return copied; 2334 } 2335 EXPORT_SYMBOL(generic_write_end); 2336 2337 /* 2338 * block_is_partially_uptodate checks whether buffers within a folio are 2339 * uptodate or not. 2340 * 2341 * Returns true if all buffers which correspond to the specified part 2342 * of the folio are uptodate. 2343 */ 2344 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 2345 { 2346 unsigned block_start, block_end, blocksize; 2347 unsigned to; 2348 struct buffer_head *bh, *head; 2349 bool ret = true; 2350 2351 head = folio_buffers(folio); 2352 if (!head) 2353 return false; 2354 blocksize = head->b_size; 2355 to = min(folio_size(folio) - from, count); 2356 to = from + to; 2357 if (from < blocksize && to > folio_size(folio) - blocksize) 2358 return false; 2359 2360 bh = head; 2361 block_start = 0; 2362 do { 2363 block_end = block_start + blocksize; 2364 if (block_end > from && block_start < to) { 2365 if (!buffer_uptodate(bh)) { 2366 ret = false; 2367 break; 2368 } 2369 if (block_end >= to) 2370 break; 2371 } 2372 block_start = block_end; 2373 bh = bh->b_this_page; 2374 } while (bh != head); 2375 2376 return ret; 2377 } 2378 EXPORT_SYMBOL(block_is_partially_uptodate); 2379 2380 /* 2381 * Generic "read_folio" function for block devices that have the normal 2382 * get_block functionality. This is most of the block device filesystems. 2383 * Reads the folio asynchronously --- the unlock_buffer() and 2384 * set/clear_buffer_uptodate() functions propagate buffer state into the 2385 * folio once IO has completed. 2386 */ 2387 int block_read_full_folio(struct folio *folio, get_block_t *get_block) 2388 { 2389 struct inode *inode = folio->mapping->host; 2390 sector_t iblock, lblock; 2391 struct buffer_head *bh, *head, *prev = NULL; 2392 size_t blocksize; 2393 int fully_mapped = 1; 2394 bool page_error = false; 2395 loff_t limit = i_size_read(inode); 2396 2397 /* This is needed for ext4. */ 2398 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 2399 limit = inode->i_sb->s_maxbytes; 2400 2401 head = folio_create_buffers(folio, inode, 0); 2402 blocksize = head->b_size; 2403 2404 iblock = div_u64(folio_pos(folio), blocksize); 2405 lblock = div_u64(limit + blocksize - 1, blocksize); 2406 bh = head; 2407 2408 do { 2409 if (buffer_uptodate(bh)) 2410 continue; 2411 2412 if (!buffer_mapped(bh)) { 2413 int err = 0; 2414 2415 fully_mapped = 0; 2416 if (iblock < lblock) { 2417 WARN_ON(bh->b_size != blocksize); 2418 err = get_block(inode, iblock, bh, 0); 2419 if (err) 2420 page_error = true; 2421 } 2422 if (!buffer_mapped(bh)) { 2423 folio_zero_range(folio, bh_offset(bh), 2424 blocksize); 2425 if (!err) 2426 set_buffer_uptodate(bh); 2427 continue; 2428 } 2429 /* 2430 * get_block() might have updated the buffer 2431 * synchronously 2432 */ 2433 if (buffer_uptodate(bh)) 2434 continue; 2435 } 2436 2437 lock_buffer(bh); 2438 if (buffer_uptodate(bh)) { 2439 unlock_buffer(bh); 2440 continue; 2441 } 2442 2443 mark_buffer_async_read(bh); 2444 if (prev) 2445 submit_bh(REQ_OP_READ, prev); 2446 prev = bh; 2447 } while (iblock++, (bh = bh->b_this_page) != head); 2448 2449 if (fully_mapped) 2450 folio_set_mappedtodisk(folio); 2451 2452 /* 2453 * All buffers are uptodate or get_block() returned an error 2454 * when trying to map them - we must finish the read because 2455 * end_buffer_async_read() will never be called on any buffer 2456 * in this folio. 2457 */ 2458 if (prev) 2459 submit_bh(REQ_OP_READ, prev); 2460 else 2461 folio_end_read(folio, !page_error); 2462 2463 return 0; 2464 } 2465 EXPORT_SYMBOL(block_read_full_folio); 2466 2467 /* utility function for filesystems that need to do work on expanding 2468 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2469 * deal with the hole. 2470 */ 2471 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2472 { 2473 struct address_space *mapping = inode->i_mapping; 2474 const struct address_space_operations *aops = mapping->a_ops; 2475 struct folio *folio; 2476 void *fsdata = NULL; 2477 int err; 2478 2479 err = inode_newsize_ok(inode, size); 2480 if (err) 2481 goto out; 2482 2483 err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata); 2484 if (err) 2485 goto out; 2486 2487 err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata); 2488 BUG_ON(err > 0); 2489 2490 out: 2491 return err; 2492 } 2493 EXPORT_SYMBOL(generic_cont_expand_simple); 2494 2495 static int cont_expand_zero(const struct kiocb *iocb, 2496 struct address_space *mapping, 2497 loff_t pos, loff_t *bytes) 2498 { 2499 struct inode *inode = mapping->host; 2500 const struct address_space_operations *aops = mapping->a_ops; 2501 unsigned int blocksize = i_blocksize(inode); 2502 struct folio *folio; 2503 void *fsdata = NULL; 2504 pgoff_t index, curidx; 2505 loff_t curpos; 2506 unsigned zerofrom, offset, len; 2507 int err = 0; 2508 2509 index = pos >> PAGE_SHIFT; 2510 offset = pos & ~PAGE_MASK; 2511 2512 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 2513 zerofrom = curpos & ~PAGE_MASK; 2514 if (zerofrom & (blocksize-1)) { 2515 *bytes |= (blocksize-1); 2516 (*bytes)++; 2517 } 2518 len = PAGE_SIZE - zerofrom; 2519 2520 err = aops->write_begin(iocb, mapping, curpos, len, 2521 &folio, &fsdata); 2522 if (err) 2523 goto out; 2524 folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2525 err = aops->write_end(iocb, mapping, curpos, len, len, 2526 folio, fsdata); 2527 if (err < 0) 2528 goto out; 2529 BUG_ON(err != len); 2530 err = 0; 2531 2532 balance_dirty_pages_ratelimited(mapping); 2533 2534 if (fatal_signal_pending(current)) { 2535 err = -EINTR; 2536 goto out; 2537 } 2538 } 2539 2540 /* page covers the boundary, find the boundary offset */ 2541 if (index == curidx) { 2542 zerofrom = curpos & ~PAGE_MASK; 2543 /* if we will expand the thing last block will be filled */ 2544 if (offset <= zerofrom) { 2545 goto out; 2546 } 2547 if (zerofrom & (blocksize-1)) { 2548 *bytes |= (blocksize-1); 2549 (*bytes)++; 2550 } 2551 len = offset - zerofrom; 2552 2553 err = aops->write_begin(iocb, mapping, curpos, len, 2554 &folio, &fsdata); 2555 if (err) 2556 goto out; 2557 folio_zero_range(folio, offset_in_folio(folio, curpos), len); 2558 err = aops->write_end(iocb, mapping, curpos, len, len, 2559 folio, fsdata); 2560 if (err < 0) 2561 goto out; 2562 BUG_ON(err != len); 2563 err = 0; 2564 } 2565 out: 2566 return err; 2567 } 2568 2569 /* 2570 * For moronic filesystems that do not allow holes in file. 2571 * We may have to extend the file. 2572 */ 2573 int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping, 2574 loff_t pos, unsigned len, struct folio **foliop, 2575 void **fsdata, get_block_t *get_block, loff_t *bytes) 2576 { 2577 struct inode *inode = mapping->host; 2578 unsigned int blocksize = i_blocksize(inode); 2579 unsigned int zerofrom; 2580 int err; 2581 2582 err = cont_expand_zero(iocb, mapping, pos, bytes); 2583 if (err) 2584 return err; 2585 2586 zerofrom = *bytes & ~PAGE_MASK; 2587 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2588 *bytes |= (blocksize-1); 2589 (*bytes)++; 2590 } 2591 2592 return block_write_begin(mapping, pos, len, foliop, get_block); 2593 } 2594 EXPORT_SYMBOL(cont_write_begin); 2595 2596 /* 2597 * block_page_mkwrite() is not allowed to change the file size as it gets 2598 * called from a page fault handler when a page is first dirtied. Hence we must 2599 * be careful to check for EOF conditions here. We set the page up correctly 2600 * for a written page which means we get ENOSPC checking when writing into 2601 * holes and correct delalloc and unwritten extent mapping on filesystems that 2602 * support these features. 2603 * 2604 * We are not allowed to take the i_rwsem here so we have to play games to 2605 * protect against truncate races as the page could now be beyond EOF. Because 2606 * truncate writes the inode size before removing pages, once we have the 2607 * page lock we can determine safely if the page is beyond EOF. If it is not 2608 * beyond EOF, then the page is guaranteed safe against truncation until we 2609 * unlock the page. 2610 * 2611 * Direct callers of this function should protect against filesystem freezing 2612 * using sb_start_pagefault() - sb_end_pagefault() functions. 2613 */ 2614 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2615 get_block_t get_block) 2616 { 2617 struct folio *folio = page_folio(vmf->page); 2618 struct inode *inode = file_inode(vma->vm_file); 2619 unsigned long end; 2620 loff_t size; 2621 int ret; 2622 2623 folio_lock(folio); 2624 size = i_size_read(inode); 2625 if ((folio->mapping != inode->i_mapping) || 2626 (folio_pos(folio) >= size)) { 2627 /* We overload EFAULT to mean page got truncated */ 2628 ret = -EFAULT; 2629 goto out_unlock; 2630 } 2631 2632 end = folio_size(folio); 2633 /* folio is wholly or partially inside EOF */ 2634 if (folio_pos(folio) + end > size) 2635 end = size - folio_pos(folio); 2636 2637 ret = __block_write_begin_int(folio, 0, end, get_block, NULL); 2638 if (unlikely(ret)) 2639 goto out_unlock; 2640 2641 block_commit_write(folio, 0, end); 2642 2643 folio_mark_dirty(folio); 2644 folio_wait_stable(folio); 2645 return 0; 2646 out_unlock: 2647 folio_unlock(folio); 2648 return ret; 2649 } 2650 EXPORT_SYMBOL(block_page_mkwrite); 2651 2652 int block_truncate_page(struct address_space *mapping, 2653 loff_t from, get_block_t *get_block) 2654 { 2655 pgoff_t index = from >> PAGE_SHIFT; 2656 unsigned blocksize; 2657 sector_t iblock; 2658 size_t offset, length, pos; 2659 struct inode *inode = mapping->host; 2660 struct folio *folio; 2661 struct buffer_head *bh; 2662 int err = 0; 2663 2664 blocksize = i_blocksize(inode); 2665 length = from & (blocksize - 1); 2666 2667 /* Block boundary? Nothing to do */ 2668 if (!length) 2669 return 0; 2670 2671 length = blocksize - length; 2672 iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits; 2673 2674 folio = filemap_grab_folio(mapping, index); 2675 if (IS_ERR(folio)) 2676 return PTR_ERR(folio); 2677 2678 bh = folio_buffers(folio); 2679 if (!bh) 2680 bh = create_empty_buffers(folio, blocksize, 0); 2681 2682 /* Find the buffer that contains "offset" */ 2683 offset = offset_in_folio(folio, from); 2684 pos = blocksize; 2685 while (offset >= pos) { 2686 bh = bh->b_this_page; 2687 iblock++; 2688 pos += blocksize; 2689 } 2690 2691 if (!buffer_mapped(bh)) { 2692 WARN_ON(bh->b_size != blocksize); 2693 err = get_block(inode, iblock, bh, 0); 2694 if (err) 2695 goto unlock; 2696 /* unmapped? It's a hole - nothing to do */ 2697 if (!buffer_mapped(bh)) 2698 goto unlock; 2699 } 2700 2701 /* Ok, it's mapped. Make sure it's up-to-date */ 2702 if (folio_test_uptodate(folio)) 2703 set_buffer_uptodate(bh); 2704 2705 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2706 err = bh_read(bh, 0); 2707 /* Uhhuh. Read error. Complain and punt. */ 2708 if (err < 0) 2709 goto unlock; 2710 } 2711 2712 folio_zero_range(folio, offset, length); 2713 mark_buffer_dirty(bh); 2714 2715 unlock: 2716 folio_unlock(folio); 2717 folio_put(folio); 2718 2719 return err; 2720 } 2721 EXPORT_SYMBOL(block_truncate_page); 2722 2723 /* 2724 * The generic write folio function for buffer-backed address_spaces 2725 */ 2726 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, 2727 void *get_block) 2728 { 2729 struct inode * const inode = folio->mapping->host; 2730 loff_t i_size = i_size_read(inode); 2731 2732 /* Is the folio fully inside i_size? */ 2733 if (folio_next_pos(folio) <= i_size) 2734 return __block_write_full_folio(inode, folio, get_block, wbc); 2735 2736 /* Is the folio fully outside i_size? (truncate in progress) */ 2737 if (folio_pos(folio) >= i_size) { 2738 folio_unlock(folio); 2739 return 0; /* don't care */ 2740 } 2741 2742 /* 2743 * The folio straddles i_size. It must be zeroed out on each and every 2744 * writeback invocation because it may be mmapped. "A file is mapped 2745 * in multiples of the page size. For a file that is not a multiple of 2746 * the page size, the remaining memory is zeroed when mapped, and 2747 * writes to that region are not written out to the file." 2748 */ 2749 folio_zero_segment(folio, offset_in_folio(folio, i_size), 2750 folio_size(folio)); 2751 return __block_write_full_folio(inode, folio, get_block, wbc); 2752 } 2753 2754 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2755 get_block_t *get_block) 2756 { 2757 struct inode *inode = mapping->host; 2758 struct buffer_head tmp = { 2759 .b_size = i_blocksize(inode), 2760 }; 2761 2762 get_block(inode, block, &tmp, 0); 2763 return tmp.b_blocknr; 2764 } 2765 EXPORT_SYMBOL(generic_block_bmap); 2766 2767 static void end_bio_bh_io_sync(struct bio *bio) 2768 { 2769 struct buffer_head *bh = bio->bi_private; 2770 2771 if (unlikely(bio_flagged(bio, BIO_QUIET))) 2772 set_bit(BH_Quiet, &bh->b_state); 2773 2774 bh->b_end_io(bh, !bio->bi_status); 2775 bio_put(bio); 2776 } 2777 2778 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2779 enum rw_hint write_hint, 2780 struct writeback_control *wbc) 2781 { 2782 const enum req_op op = opf & REQ_OP_MASK; 2783 struct bio *bio; 2784 2785 BUG_ON(!buffer_locked(bh)); 2786 BUG_ON(!buffer_mapped(bh)); 2787 BUG_ON(!bh->b_end_io); 2788 BUG_ON(buffer_delay(bh)); 2789 BUG_ON(buffer_unwritten(bh)); 2790 2791 /* 2792 * Only clear out a write error when rewriting 2793 */ 2794 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 2795 clear_buffer_write_io_error(bh); 2796 2797 if (buffer_meta(bh)) 2798 opf |= REQ_META; 2799 if (buffer_prio(bh)) 2800 opf |= REQ_PRIO; 2801 2802 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 2803 2804 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 2805 2806 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2807 bio->bi_write_hint = write_hint; 2808 2809 bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh)); 2810 2811 bio->bi_end_io = end_bio_bh_io_sync; 2812 bio->bi_private = bh; 2813 2814 /* Take care of bh's that straddle the end of the device */ 2815 guard_bio_eod(bio); 2816 2817 if (wbc) { 2818 wbc_init_bio(wbc, bio); 2819 wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size); 2820 } 2821 2822 blk_crypto_submit_bio(bio); 2823 } 2824 2825 void submit_bh(blk_opf_t opf, struct buffer_head *bh) 2826 { 2827 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); 2828 } 2829 EXPORT_SYMBOL(submit_bh); 2830 2831 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2832 { 2833 lock_buffer(bh); 2834 if (!test_clear_buffer_dirty(bh)) { 2835 unlock_buffer(bh); 2836 return; 2837 } 2838 bh->b_end_io = end_buffer_write_sync; 2839 get_bh(bh); 2840 submit_bh(REQ_OP_WRITE | op_flags, bh); 2841 } 2842 EXPORT_SYMBOL(write_dirty_buffer); 2843 2844 /* 2845 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2846 * and then start new I/O and then wait upon it. The caller must have a ref on 2847 * the buffer_head. 2848 */ 2849 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2850 { 2851 WARN_ON(atomic_read(&bh->b_count) < 1); 2852 lock_buffer(bh); 2853 if (test_clear_buffer_dirty(bh)) { 2854 /* 2855 * The bh should be mapped, but it might not be if the 2856 * device was hot-removed. Not much we can do but fail the I/O. 2857 */ 2858 if (!buffer_mapped(bh)) { 2859 unlock_buffer(bh); 2860 return -EIO; 2861 } 2862 2863 get_bh(bh); 2864 bh->b_end_io = end_buffer_write_sync; 2865 submit_bh(REQ_OP_WRITE | op_flags, bh); 2866 wait_on_buffer(bh); 2867 if (!buffer_uptodate(bh)) 2868 return -EIO; 2869 } else { 2870 unlock_buffer(bh); 2871 } 2872 return 0; 2873 } 2874 EXPORT_SYMBOL(__sync_dirty_buffer); 2875 2876 int sync_dirty_buffer(struct buffer_head *bh) 2877 { 2878 return __sync_dirty_buffer(bh, REQ_SYNC); 2879 } 2880 EXPORT_SYMBOL(sync_dirty_buffer); 2881 2882 static inline int buffer_busy(struct buffer_head *bh) 2883 { 2884 return atomic_read(&bh->b_count) | 2885 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2886 } 2887 2888 static bool 2889 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 2890 { 2891 struct buffer_head *head = folio_buffers(folio); 2892 struct buffer_head *bh; 2893 2894 bh = head; 2895 do { 2896 if (buffer_busy(bh)) 2897 goto failed; 2898 bh = bh->b_this_page; 2899 } while (bh != head); 2900 2901 do { 2902 struct buffer_head *next = bh->b_this_page; 2903 2904 if (bh->b_assoc_map) 2905 __remove_assoc_queue(bh); 2906 bh = next; 2907 } while (bh != head); 2908 *buffers_to_free = head; 2909 folio_detach_private(folio); 2910 return true; 2911 failed: 2912 return false; 2913 } 2914 2915 /** 2916 * try_to_free_buffers - Release buffers attached to this folio. 2917 * @folio: The folio. 2918 * 2919 * If any buffers are in use (dirty, under writeback, elevated refcount), 2920 * no buffers will be freed. 2921 * 2922 * If the folio is dirty but all the buffers are clean then we need to 2923 * be sure to mark the folio clean as well. This is because the folio 2924 * may be against a block device, and a later reattachment of buffers 2925 * to a dirty folio will set *all* buffers dirty. Which would corrupt 2926 * filesystem data on the same device. 2927 * 2928 * The same applies to regular filesystem folios: if all the buffers are 2929 * clean then we set the folio clean and proceed. To do that, we require 2930 * total exclusion from block_dirty_folio(). That is obtained with 2931 * i_private_lock. 2932 * 2933 * Exclusion against try_to_free_buffers may be obtained by either 2934 * locking the folio or by holding its mapping's i_private_lock. 2935 * 2936 * Context: Process context. @folio must be locked. Will not sleep. 2937 * Return: true if all buffers attached to this folio were freed. 2938 */ 2939 bool try_to_free_buffers(struct folio *folio) 2940 { 2941 struct address_space * const mapping = folio->mapping; 2942 struct buffer_head *buffers_to_free = NULL; 2943 bool ret = 0; 2944 2945 BUG_ON(!folio_test_locked(folio)); 2946 if (folio_test_writeback(folio)) 2947 return false; 2948 2949 /* Misconfigured folio check */ 2950 if (WARN_ON_ONCE(!folio_buffers(folio))) 2951 return true; 2952 2953 if (mapping == NULL) { /* can this still happen? */ 2954 ret = drop_buffers(folio, &buffers_to_free); 2955 goto out; 2956 } 2957 2958 spin_lock(&mapping->i_private_lock); 2959 ret = drop_buffers(folio, &buffers_to_free); 2960 2961 /* 2962 * If the filesystem writes its buffers by hand (eg ext3) 2963 * then we can have clean buffers against a dirty folio. We 2964 * clean the folio here; otherwise the VM will never notice 2965 * that the filesystem did any IO at all. 2966 * 2967 * Also, during truncate, discard_buffer will have marked all 2968 * the folio's buffers clean. We discover that here and clean 2969 * the folio also. 2970 * 2971 * i_private_lock must be held over this entire operation in order 2972 * to synchronise against block_dirty_folio and prevent the 2973 * dirty bit from being lost. 2974 */ 2975 if (ret) 2976 folio_cancel_dirty(folio); 2977 spin_unlock(&mapping->i_private_lock); 2978 out: 2979 if (buffers_to_free) { 2980 struct buffer_head *bh = buffers_to_free; 2981 2982 do { 2983 struct buffer_head *next = bh->b_this_page; 2984 free_buffer_head(bh); 2985 bh = next; 2986 } while (bh != buffers_to_free); 2987 } 2988 return ret; 2989 } 2990 EXPORT_SYMBOL(try_to_free_buffers); 2991 2992 /* 2993 * Buffer-head allocation 2994 */ 2995 static struct kmem_cache *bh_cachep __ro_after_init; 2996 2997 /* 2998 * Once the number of bh's in the machine exceeds this level, we start 2999 * stripping them in writeback. 3000 */ 3001 static unsigned long max_buffer_heads __ro_after_init; 3002 3003 int buffer_heads_over_limit; 3004 3005 struct bh_accounting { 3006 int nr; /* Number of live bh's */ 3007 int ratelimit; /* Limit cacheline bouncing */ 3008 }; 3009 3010 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3011 3012 static void recalc_bh_state(void) 3013 { 3014 int i; 3015 int tot = 0; 3016 3017 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3018 return; 3019 __this_cpu_write(bh_accounting.ratelimit, 0); 3020 for_each_online_cpu(i) 3021 tot += per_cpu(bh_accounting, i).nr; 3022 buffer_heads_over_limit = (tot > max_buffer_heads); 3023 } 3024 3025 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3026 { 3027 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3028 if (ret) { 3029 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3030 spin_lock_init(&ret->b_uptodate_lock); 3031 preempt_disable(); 3032 __this_cpu_inc(bh_accounting.nr); 3033 recalc_bh_state(); 3034 preempt_enable(); 3035 } 3036 return ret; 3037 } 3038 EXPORT_SYMBOL(alloc_buffer_head); 3039 3040 void free_buffer_head(struct buffer_head *bh) 3041 { 3042 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3043 kmem_cache_free(bh_cachep, bh); 3044 preempt_disable(); 3045 __this_cpu_dec(bh_accounting.nr); 3046 recalc_bh_state(); 3047 preempt_enable(); 3048 } 3049 EXPORT_SYMBOL(free_buffer_head); 3050 3051 static int buffer_exit_cpu_dead(unsigned int cpu) 3052 { 3053 int i; 3054 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3055 3056 for (i = 0; i < BH_LRU_SIZE; i++) { 3057 brelse(b->bhs[i]); 3058 b->bhs[i] = NULL; 3059 } 3060 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3061 per_cpu(bh_accounting, cpu).nr = 0; 3062 return 0; 3063 } 3064 3065 /** 3066 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3067 * @bh: struct buffer_head 3068 * 3069 * Return true if the buffer is up-to-date and false, 3070 * with the buffer locked, if not. 3071 */ 3072 int bh_uptodate_or_lock(struct buffer_head *bh) 3073 { 3074 if (!buffer_uptodate(bh)) { 3075 lock_buffer(bh); 3076 if (!buffer_uptodate(bh)) 3077 return 0; 3078 unlock_buffer(bh); 3079 } 3080 return 1; 3081 } 3082 EXPORT_SYMBOL(bh_uptodate_or_lock); 3083 3084 /** 3085 * __bh_read - Submit read for a locked buffer 3086 * @bh: struct buffer_head 3087 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3088 * @wait: wait until reading finish 3089 * 3090 * Returns zero on success or don't wait, and -EIO on error. 3091 */ 3092 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3093 { 3094 int ret = 0; 3095 3096 BUG_ON(!buffer_locked(bh)); 3097 3098 get_bh(bh); 3099 bh->b_end_io = end_buffer_read_sync; 3100 submit_bh(REQ_OP_READ | op_flags, bh); 3101 if (wait) { 3102 wait_on_buffer(bh); 3103 if (!buffer_uptodate(bh)) 3104 ret = -EIO; 3105 } 3106 return ret; 3107 } 3108 EXPORT_SYMBOL(__bh_read); 3109 3110 /** 3111 * __bh_read_batch - Submit read for a batch of unlocked buffers 3112 * @nr: entry number of the buffer batch 3113 * @bhs: a batch of struct buffer_head 3114 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3115 * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3116 * buffer that cannot lock. 3117 * 3118 * Returns zero on success or don't wait, and -EIO on error. 3119 */ 3120 void __bh_read_batch(int nr, struct buffer_head *bhs[], 3121 blk_opf_t op_flags, bool force_lock) 3122 { 3123 int i; 3124 3125 for (i = 0; i < nr; i++) { 3126 struct buffer_head *bh = bhs[i]; 3127 3128 if (buffer_uptodate(bh)) 3129 continue; 3130 3131 if (force_lock) 3132 lock_buffer(bh); 3133 else 3134 if (!trylock_buffer(bh)) 3135 continue; 3136 3137 if (buffer_uptodate(bh)) { 3138 unlock_buffer(bh); 3139 continue; 3140 } 3141 3142 bh->b_end_io = end_buffer_read_sync; 3143 get_bh(bh); 3144 submit_bh(REQ_OP_READ | op_flags, bh); 3145 } 3146 } 3147 EXPORT_SYMBOL(__bh_read_batch); 3148 3149 void __init buffer_init(void) 3150 { 3151 unsigned long nrpages; 3152 int ret; 3153 3154 bh_cachep = KMEM_CACHE(buffer_head, 3155 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC); 3156 /* 3157 * Limit the bh occupancy to 10% of ZONE_NORMAL 3158 */ 3159 nrpages = (nr_free_buffer_pages() * 10) / 100; 3160 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3161 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3162 NULL, buffer_exit_cpu_dead); 3163 WARN_ON(ret < 0); 3164 } 3165