1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/fs/buffer.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 91da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that 121da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating 151da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 161da177e4SLinus Torvalds * 171da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 201da177e4SLinus Torvalds */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include <linux/kernel.h> 23f361bf4aSIngo Molnar #include <linux/sched/signal.h> 241da177e4SLinus Torvalds #include <linux/syscalls.h> 251da177e4SLinus Torvalds #include <linux/fs.h> 26ae259a9cSChristoph Hellwig #include <linux/iomap.h> 271da177e4SLinus Torvalds #include <linux/mm.h> 281da177e4SLinus Torvalds #include <linux/percpu.h> 291da177e4SLinus Torvalds #include <linux/slab.h> 3016f7e0feSRandy Dunlap #include <linux/capability.h> 311da177e4SLinus Torvalds #include <linux/blkdev.h> 321da177e4SLinus Torvalds #include <linux/file.h> 331da177e4SLinus Torvalds #include <linux/quotaops.h> 341da177e4SLinus Torvalds #include <linux/highmem.h> 35630d9c47SPaul Gortmaker #include <linux/export.h> 36bafc0dbaSTejun Heo #include <linux/backing-dev.h> 371da177e4SLinus Torvalds #include <linux/writeback.h> 381da177e4SLinus Torvalds #include <linux/hash.h> 391da177e4SLinus Torvalds #include <linux/suspend.h> 401da177e4SLinus Torvalds #include <linux/buffer_head.h> 4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h> 421da177e4SLinus Torvalds #include <linux/bio.h> 431da177e4SLinus Torvalds #include <linux/cpu.h> 441da177e4SLinus Torvalds #include <linux/bitops.h> 451da177e4SLinus Torvalds #include <linux/mpage.h> 46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h> 4729f3ad7dSJan Kara #include <linux/pagevec.h> 48f745c6f5SShakeel Butt #include <linux/sched/mm.h> 495305cb83STejun Heo #include <trace/events/block.h> 5031fb992cSEric Biggers #include <linux/fscrypt.h> 514fa512ceSEric Biggers #include <linux/fsverity.h> 521da177e4SLinus Torvalds 532b211dc0SBen Dooks #include "internal.h" 542b211dc0SBen Dooks 551da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 565bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 571420c4a5SBart Van Assche struct writeback_control *wbc); 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 601da177e4SLinus Torvalds 61f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh) 62f0059afdSTejun Heo { 635305cb83STejun Heo trace_block_touch_buffer(bh); 6403c5f331SMatthew Wilcox (Oracle) folio_mark_accessed(bh->b_folio); 65f0059afdSTejun Heo } 66f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer); 67f0059afdSTejun Heo 68fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh) 691da177e4SLinus Torvalds { 7074316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer); 731da177e4SLinus Torvalds 74fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh) 751da177e4SLinus Torvalds { 7651b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state); 774e857c58SPeter Zijlstra smp_mb__after_atomic(); 781da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock); 791da177e4SLinus Torvalds } 801fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer); 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds /* 83520f301cSMatthew Wilcox (Oracle) * Returns if the folio has dirty or writeback buffers. If all the buffers 84520f301cSMatthew Wilcox (Oracle) * are unlocked and clean then the folio_test_dirty information is stale. If 85520f301cSMatthew Wilcox (Oracle) * any of the buffers are locked, it is assumed they are locked for IO. 86b4597226SMel Gorman */ 87520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio, 88b4597226SMel Gorman bool *dirty, bool *writeback) 89b4597226SMel Gorman { 90b4597226SMel Gorman struct buffer_head *head, *bh; 91b4597226SMel Gorman *dirty = false; 92b4597226SMel Gorman *writeback = false; 93b4597226SMel Gorman 94520f301cSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 95b4597226SMel Gorman 96520f301cSMatthew Wilcox (Oracle) head = folio_buffers(folio); 97520f301cSMatthew Wilcox (Oracle) if (!head) 98b4597226SMel Gorman return; 99b4597226SMel Gorman 100520f301cSMatthew Wilcox (Oracle) if (folio_test_writeback(folio)) 101b4597226SMel Gorman *writeback = true; 102b4597226SMel Gorman 103b4597226SMel Gorman bh = head; 104b4597226SMel Gorman do { 105b4597226SMel Gorman if (buffer_locked(bh)) 106b4597226SMel Gorman *writeback = true; 107b4597226SMel Gorman 108b4597226SMel Gorman if (buffer_dirty(bh)) 109b4597226SMel Gorman *dirty = true; 110b4597226SMel Gorman 111b4597226SMel Gorman bh = bh->b_this_page; 112b4597226SMel Gorman } while (bh != head); 113b4597226SMel Gorman } 114b4597226SMel Gorman EXPORT_SYMBOL(buffer_check_dirty_writeback); 115b4597226SMel Gorman 116b4597226SMel Gorman /* 1171da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it 1181da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself 1191da177e4SLinus Torvalds * if you want to preserve its state. 1201da177e4SLinus Torvalds */ 1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh) 1221da177e4SLinus Torvalds { 12374316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 1241da177e4SLinus Torvalds } 1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer); 1261da177e4SLinus Torvalds 127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg) 1281da177e4SLinus Torvalds { 129432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state)) 130432f16e6SRobert Elliott printk_ratelimited(KERN_ERR 131a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n", 132a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* 13668671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after 13768671f35SDmitry Monakhov * unlocking it. 13868671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 13968671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for 14068671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh 14168671f35SDmitry Monakhov * itself. 1421da177e4SLinus Torvalds */ 14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 1441da177e4SLinus Torvalds { 1451da177e4SLinus Torvalds if (uptodate) { 1461da177e4SLinus Torvalds set_buffer_uptodate(bh); 1471da177e4SLinus Torvalds } else { 14870246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */ 1491da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1501da177e4SLinus Torvalds } 1511da177e4SLinus Torvalds unlock_buffer(bh); 15268671f35SDmitry Monakhov } 15368671f35SDmitry Monakhov 15468671f35SDmitry Monakhov /* 15568671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and 15679f59784SZhang Yi * unlock the buffer. 15768671f35SDmitry Monakhov */ 15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 15968671f35SDmitry Monakhov { 16068671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 1611da177e4SLinus Torvalds put_bh(bh); 1621da177e4SLinus Torvalds } 1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync); 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 1661da177e4SLinus Torvalds { 1671da177e4SLinus Torvalds if (uptodate) { 1681da177e4SLinus Torvalds set_buffer_uptodate(bh); 1691da177e4SLinus Torvalds } else { 170b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write"); 17187354e5dSJeff Layton mark_buffer_write_io_error(bh); 1721da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1731da177e4SLinus Torvalds } 1741da177e4SLinus Torvalds unlock_buffer(bh); 1751da177e4SLinus Torvalds put_bh(bh); 1761da177e4SLinus Torvalds } 1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync); 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* 1801da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking. 1811da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this, 1821da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's 1831da177e4SLinus Torvalds * private_lock. 1841da177e4SLinus Torvalds * 185b93b0163SMatthew Wilcox * Hack idea: for the blockdev mapping, private_lock contention 1861da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that 187b93b0163SMatthew Wilcox * succeeds, there is no need to take private_lock. 1881da177e4SLinus Torvalds */ 1891da177e4SLinus Torvalds static struct buffer_head * 190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block) 1911da177e4SLinus Torvalds { 1921da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode; 1931da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping; 1941da177e4SLinus Torvalds struct buffer_head *ret = NULL; 1951da177e4SLinus Torvalds pgoff_t index; 1961da177e4SLinus Torvalds struct buffer_head *bh; 1971da177e4SLinus Torvalds struct buffer_head *head; 1981da177e4SLinus Torvalds struct page *page; 1991da177e4SLinus Torvalds int all_mapped = 1; 20043636c80STetsuo Handa static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 2011da177e4SLinus Torvalds 20209cbfeafSKirill A. Shutemov index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 2032457aec6SMel Gorman page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 2041da177e4SLinus Torvalds if (!page) 2051da177e4SLinus Torvalds goto out; 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds spin_lock(&bd_mapping->private_lock); 2081da177e4SLinus Torvalds if (!page_has_buffers(page)) 2091da177e4SLinus Torvalds goto out_unlock; 2101da177e4SLinus Torvalds head = page_buffers(page); 2111da177e4SLinus Torvalds bh = head; 2121da177e4SLinus Torvalds do { 21397f76d3dSNikanth Karthikesan if (!buffer_mapped(bh)) 21497f76d3dSNikanth Karthikesan all_mapped = 0; 21597f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) { 2161da177e4SLinus Torvalds ret = bh; 2171da177e4SLinus Torvalds get_bh(bh); 2181da177e4SLinus Torvalds goto out_unlock; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds bh = bh->b_this_page; 2211da177e4SLinus Torvalds } while (bh != head); 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are 2241da177e4SLinus Torvalds * not mapped. This is due to various races between 2251da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with 2261da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers 2271da177e4SLinus Torvalds */ 22843636c80STetsuo Handa ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 22943636c80STetsuo Handa if (all_mapped && __ratelimit(&last_warned)) { 23043636c80STetsuo Handa printk("__find_get_block_slow() failed. block=%llu, " 23143636c80STetsuo Handa "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 23243636c80STetsuo Handa "device %pg blocksize: %d\n", 233205f87f6SBadari Pulavarty (unsigned long long)block, 23443636c80STetsuo Handa (unsigned long long)bh->b_blocknr, 23543636c80STetsuo Handa bh->b_state, bh->b_size, bdev, 23672a2ebd8STao Ma 1 << bd_inode->i_blkbits); 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds out_unlock: 2391da177e4SLinus Torvalds spin_unlock(&bd_mapping->private_lock); 24009cbfeafSKirill A. Shutemov put_page(page); 2411da177e4SLinus Torvalds out: 2421da177e4SLinus Torvalds return ret; 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 2461da177e4SLinus Torvalds { 2471da177e4SLinus Torvalds unsigned long flags; 248a3972203SNick Piggin struct buffer_head *first; 2491da177e4SLinus Torvalds struct buffer_head *tmp; 2502e2dba15SMatthew Wilcox (Oracle) struct folio *folio; 2512e2dba15SMatthew Wilcox (Oracle) int folio_uptodate = 1; 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh)); 2541da177e4SLinus Torvalds 2552e2dba15SMatthew Wilcox (Oracle) folio = bh->b_folio; 2561da177e4SLinus Torvalds if (uptodate) { 2571da177e4SLinus Torvalds set_buffer_uptodate(bh); 2581da177e4SLinus Torvalds } else { 2591da177e4SLinus Torvalds clear_buffer_uptodate(bh); 260b744c2acSRobert Elliott buffer_io_error(bh, ", async page read"); 2612e2dba15SMatthew Wilcox (Oracle) folio_set_error(folio); 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds /* 2651da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if 2661da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both 2671da177e4SLinus Torvalds * decide that the page is now completely done. 2681da177e4SLinus Torvalds */ 2692e2dba15SMatthew Wilcox (Oracle) first = folio_buffers(folio); 270f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags); 2711da177e4SLinus Torvalds clear_buffer_async_read(bh); 2721da177e4SLinus Torvalds unlock_buffer(bh); 2731da177e4SLinus Torvalds tmp = bh; 2741da177e4SLinus Torvalds do { 2751da177e4SLinus Torvalds if (!buffer_uptodate(tmp)) 2762e2dba15SMatthew Wilcox (Oracle) folio_uptodate = 0; 2771da177e4SLinus Torvalds if (buffer_async_read(tmp)) { 2781da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 2791da177e4SLinus Torvalds goto still_busy; 2801da177e4SLinus Torvalds } 2811da177e4SLinus Torvalds tmp = tmp->b_this_page; 2821da177e4SLinus Torvalds } while (tmp != bh); 283f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 2841da177e4SLinus Torvalds 2851da177e4SLinus Torvalds /* 2866e8e79fcSMatthew Wilcox (Oracle) * If all of the buffers are uptodate then we can set the page 2876e8e79fcSMatthew Wilcox (Oracle) * uptodate. 2881da177e4SLinus Torvalds */ 2892e2dba15SMatthew Wilcox (Oracle) if (folio_uptodate) 2902e2dba15SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 2912e2dba15SMatthew Wilcox (Oracle) folio_unlock(folio); 2921da177e4SLinus Torvalds return; 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds still_busy: 295f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 2961da177e4SLinus Torvalds return; 2971da177e4SLinus Torvalds } 2981da177e4SLinus Torvalds 2994fa512ceSEric Biggers struct postprocess_bh_ctx { 30031fb992cSEric Biggers struct work_struct work; 30131fb992cSEric Biggers struct buffer_head *bh; 30231fb992cSEric Biggers }; 30331fb992cSEric Biggers 3044fa512ceSEric Biggers static void verify_bh(struct work_struct *work) 3054fa512ceSEric Biggers { 3064fa512ceSEric Biggers struct postprocess_bh_ctx *ctx = 3074fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work); 3084fa512ceSEric Biggers struct buffer_head *bh = ctx->bh; 3094fa512ceSEric Biggers bool valid; 3104fa512ceSEric Biggers 3118b7d3fe9SEric Biggers valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); 3124fa512ceSEric Biggers end_buffer_async_read(bh, valid); 3134fa512ceSEric Biggers kfree(ctx); 3144fa512ceSEric Biggers } 3154fa512ceSEric Biggers 3164fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh) 3174fa512ceSEric Biggers { 3188b7d3fe9SEric Biggers struct folio *folio = bh->b_folio; 3198b7d3fe9SEric Biggers struct inode *inode = folio->mapping->host; 3204fa512ceSEric Biggers 3214fa512ceSEric Biggers return fsverity_active(inode) && 3224fa512ceSEric Biggers /* needed by ext4 */ 3238b7d3fe9SEric Biggers folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 3244fa512ceSEric Biggers } 3254fa512ceSEric Biggers 32631fb992cSEric Biggers static void decrypt_bh(struct work_struct *work) 32731fb992cSEric Biggers { 3284fa512ceSEric Biggers struct postprocess_bh_ctx *ctx = 3294fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work); 33031fb992cSEric Biggers struct buffer_head *bh = ctx->bh; 33131fb992cSEric Biggers int err; 33231fb992cSEric Biggers 3339c7fb7f7SEric Biggers err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 3349c7fb7f7SEric Biggers bh_offset(bh)); 3354fa512ceSEric Biggers if (err == 0 && need_fsverity(bh)) { 3364fa512ceSEric Biggers /* 3374fa512ceSEric Biggers * We use different work queues for decryption and for verity 3384fa512ceSEric Biggers * because verity may require reading metadata pages that need 3394fa512ceSEric Biggers * decryption, and we shouldn't recurse to the same workqueue. 3404fa512ceSEric Biggers */ 3414fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh); 3424fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work); 3434fa512ceSEric Biggers return; 3444fa512ceSEric Biggers } 34531fb992cSEric Biggers end_buffer_async_read(bh, err == 0); 34631fb992cSEric Biggers kfree(ctx); 34731fb992cSEric Biggers } 34831fb992cSEric Biggers 34931fb992cSEric Biggers /* 3502c69e205SMatthew Wilcox (Oracle) * I/O completion handler for block_read_full_folio() - pages 35131fb992cSEric Biggers * which come unlocked at the end of I/O. 35231fb992cSEric Biggers */ 35331fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 35431fb992cSEric Biggers { 3553822a7c4SLinus Torvalds struct inode *inode = bh->b_folio->mapping->host; 3564fa512ceSEric Biggers bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 3574fa512ceSEric Biggers bool verify = need_fsverity(bh); 3584fa512ceSEric Biggers 3594fa512ceSEric Biggers /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 3604fa512ceSEric Biggers if (uptodate && (decrypt || verify)) { 3614fa512ceSEric Biggers struct postprocess_bh_ctx *ctx = 3624fa512ceSEric Biggers kmalloc(sizeof(*ctx), GFP_ATOMIC); 36331fb992cSEric Biggers 36431fb992cSEric Biggers if (ctx) { 36531fb992cSEric Biggers ctx->bh = bh; 3664fa512ceSEric Biggers if (decrypt) { 3674fa512ceSEric Biggers INIT_WORK(&ctx->work, decrypt_bh); 36831fb992cSEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work); 3694fa512ceSEric Biggers } else { 3704fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh); 3714fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work); 3724fa512ceSEric Biggers } 37331fb992cSEric Biggers return; 37431fb992cSEric Biggers } 37531fb992cSEric Biggers uptodate = 0; 37631fb992cSEric Biggers } 37731fb992cSEric Biggers end_buffer_async_read(bh, uptodate); 37831fb992cSEric Biggers } 37931fb992cSEric Biggers 3801da177e4SLinus Torvalds /* 3811da177e4SLinus Torvalds * Completion handler for block_write_full_page() - pages which are unlocked 3821da177e4SLinus Torvalds * during I/O, and which have PageWriteback cleared upon I/O completion. 3831da177e4SLinus Torvalds */ 38435c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate) 3851da177e4SLinus Torvalds { 3861da177e4SLinus Torvalds unsigned long flags; 387a3972203SNick Piggin struct buffer_head *first; 3881da177e4SLinus Torvalds struct buffer_head *tmp; 389743ed81eSMatthew Wilcox (Oracle) struct folio *folio; 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh)); 3921da177e4SLinus Torvalds 393743ed81eSMatthew Wilcox (Oracle) folio = bh->b_folio; 3941da177e4SLinus Torvalds if (uptodate) { 3951da177e4SLinus Torvalds set_buffer_uptodate(bh); 3961da177e4SLinus Torvalds } else { 397b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write"); 39887354e5dSJeff Layton mark_buffer_write_io_error(bh); 3991da177e4SLinus Torvalds clear_buffer_uptodate(bh); 400743ed81eSMatthew Wilcox (Oracle) folio_set_error(folio); 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds 403743ed81eSMatthew Wilcox (Oracle) first = folio_buffers(folio); 404f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags); 405a3972203SNick Piggin 4061da177e4SLinus Torvalds clear_buffer_async_write(bh); 4071da177e4SLinus Torvalds unlock_buffer(bh); 4081da177e4SLinus Torvalds tmp = bh->b_this_page; 4091da177e4SLinus Torvalds while (tmp != bh) { 4101da177e4SLinus Torvalds if (buffer_async_write(tmp)) { 4111da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 4121da177e4SLinus Torvalds goto still_busy; 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds tmp = tmp->b_this_page; 4151da177e4SLinus Torvalds } 416f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 417743ed81eSMatthew Wilcox (Oracle) folio_end_writeback(folio); 4181da177e4SLinus Torvalds return; 4191da177e4SLinus Torvalds 4201da177e4SLinus Torvalds still_busy: 421f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 4221da177e4SLinus Torvalds return; 4231da177e4SLinus Torvalds } 4241fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write); 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds /* 4271da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read 4281da177e4SLinus Torvalds * completion) then there is a possibility that another thread of 4291da177e4SLinus Torvalds * control could lock one of the buffers after it has completed 4301da177e4SLinus Torvalds * but while some of the other buffers have not completed. This 4311da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking 4321da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 4331da177e4SLinus Torvalds * that this buffer is not under async I/O. 4341da177e4SLinus Torvalds * 4351da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers 4361da177e4SLinus Torvalds * left. 4371da177e4SLinus Torvalds * 4381da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of 4391da177e4SLinus Torvalds * the buffers. 4401da177e4SLinus Torvalds * 4411da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same 4421da177e4SLinus Torvalds * page. 4431da177e4SLinus Torvalds * 4441da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is 4451da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page). 4461da177e4SLinus Torvalds */ 4471da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh) 4481da177e4SLinus Torvalds { 44931fb992cSEric Biggers bh->b_end_io = end_buffer_async_read_io; 4501da177e4SLinus Torvalds set_buffer_async_read(bh); 4511da177e4SLinus Torvalds } 4521da177e4SLinus Torvalds 4531fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh, 45435c80d5fSChris Mason bh_end_io_t *handler) 45535c80d5fSChris Mason { 45635c80d5fSChris Mason bh->b_end_io = handler; 45735c80d5fSChris Mason set_buffer_async_write(bh); 45835c80d5fSChris Mason } 45935c80d5fSChris Mason 4601da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh) 4611da177e4SLinus Torvalds { 46235c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write); 4631da177e4SLinus Torvalds } 4641da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write); 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds /* 4681da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's 4691da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is 4701da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for 4711da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be 4721da177e4SLinus Torvalds * written back and waited upon before fsync() returns. 4731da177e4SLinus Torvalds * 4741da177e4SLinus Torvalds * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 4751da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the 4761da177e4SLinus Torvalds * management of a list of dependent buffers at ->i_mapping->private_list. 4771da177e4SLinus Torvalds * 4781da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers 4791da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But 4801da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping 4811da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers. 4821da177e4SLinus Torvalds * So the locking for private_list is via the private_lock in the address_space 4831da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space 4841da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space, 4851da177e4SLinus Torvalds * mapping->private_lock does *not* protect mapping->private_list! In fact, 4861da177e4SLinus Torvalds * mapping->private_list will always be protected by the backing blockdev's 4871da177e4SLinus Torvalds * ->private_lock. 4881da177e4SLinus Torvalds * 4891da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's 4901da177e4SLinus Torvalds * ->private_list must be from the same address_space: the blockdev's. 4911da177e4SLinus Torvalds * 4921da177e4SLinus Torvalds * address_spaces which do not place buffers at ->private_list via these 4931da177e4SLinus Torvalds * utility functions are free to use private_lock and private_list for 4941da177e4SLinus Torvalds * whatever they want. The only requirement is that list_empty(private_list) 4951da177e4SLinus Torvalds * be true at clear_inode() time. 4961da177e4SLinus Torvalds * 4971da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The 4981da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go 4991da177e4SLinus Torvalds * BUG_ON(!list_empty). 5001da177e4SLinus Torvalds * 5011da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 5021da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called 5031da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being 5041da177e4SLinus Torvalds * queued up. 5051da177e4SLinus Torvalds * 5061da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 5071da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list, 5081da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being 5091da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure 5101da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed 5111da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all 5121da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing 5131da177e4SLinus Torvalds * b_inode back. 5141da177e4SLinus Torvalds */ 5151da177e4SLinus Torvalds 5161da177e4SLinus Torvalds /* 5171da177e4SLinus Torvalds * The buffer's backing address_space's private_lock must be held 5181da177e4SLinus Torvalds */ 519dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh) 5201da177e4SLinus Torvalds { 5211da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 52258ff407bSJan Kara WARN_ON(!bh->b_assoc_map); 52358ff407bSJan Kara bh->b_assoc_map = NULL; 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds 5261da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode) 5271da177e4SLinus Torvalds { 5281da177e4SLinus Torvalds return !list_empty(&inode->i_data.private_list); 5291da177e4SLinus Torvalds } 5301da177e4SLinus Torvalds 5311da177e4SLinus Torvalds /* 5321da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for 5331da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new 5341da177e4SLinus Torvalds * writes to the disk. 5351da177e4SLinus Torvalds * 53679f59784SZhang Yi * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 53779f59784SZhang Yi * as you dirty the buffers, and then use osync_inode_buffers to wait for 5381da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for 5391da177e4SLinus Torvalds * write will not be flushed to disk by the osync. 5401da177e4SLinus Torvalds */ 5411da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 5421da177e4SLinus Torvalds { 5431da177e4SLinus Torvalds struct buffer_head *bh; 5441da177e4SLinus Torvalds struct list_head *p; 5451da177e4SLinus Torvalds int err = 0; 5461da177e4SLinus Torvalds 5471da177e4SLinus Torvalds spin_lock(lock); 5481da177e4SLinus Torvalds repeat: 5491da177e4SLinus Torvalds list_for_each_prev(p, list) { 5501da177e4SLinus Torvalds bh = BH_ENTRY(p); 5511da177e4SLinus Torvalds if (buffer_locked(bh)) { 5521da177e4SLinus Torvalds get_bh(bh); 5531da177e4SLinus Torvalds spin_unlock(lock); 5541da177e4SLinus Torvalds wait_on_buffer(bh); 5551da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 5561da177e4SLinus Torvalds err = -EIO; 5571da177e4SLinus Torvalds brelse(bh); 5581da177e4SLinus Torvalds spin_lock(lock); 5591da177e4SLinus Torvalds goto repeat; 5601da177e4SLinus Torvalds } 5611da177e4SLinus Torvalds } 5621da177e4SLinus Torvalds spin_unlock(lock); 5631da177e4SLinus Torvalds return err; 5641da177e4SLinus Torvalds } 5651da177e4SLinus Torvalds 56608fdc8a0SMateusz Guzik void emergency_thaw_bdev(struct super_block *sb) 567c2d75438SEric Sandeen { 568040f04bdSChristoph Hellwig while (sb->s_bdev && !thaw_bdev(sb->s_bdev)) 569a1c6f057SDmitry Monakhov printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 570c2d75438SEric Sandeen } 57101a05b33SAl Viro 5721da177e4SLinus Torvalds /** 57378a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 57467be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written 5751da177e4SLinus Torvalds * 5761da177e4SLinus Torvalds * Starts I/O against the buffers at mapping->private_list, and waits upon 5771da177e4SLinus Torvalds * that I/O. 5781da177e4SLinus Torvalds * 57967be2dd1SMartin Waitz * Basically, this is a convenience function for fsync(). 58067be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for 58167be2dd1SMartin Waitz * a successful fsync(). 5821da177e4SLinus Torvalds */ 5831da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping) 5841da177e4SLinus Torvalds { 585252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 5861da177e4SLinus Torvalds 5871da177e4SLinus Torvalds if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 5881da177e4SLinus Torvalds return 0; 5891da177e4SLinus Torvalds 5901da177e4SLinus Torvalds return fsync_buffers_list(&buffer_mapping->private_lock, 5911da177e4SLinus Torvalds &mapping->private_list); 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers); 5941da177e4SLinus Torvalds 595*31b2ebc0SRitesh Harjani (IBM) /** 596*31b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync_noflush - generic buffer fsync implementation 597*31b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock 598*31b2ebc0SRitesh Harjani (IBM) * 599*31b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize 600*31b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes 601*31b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive) 602*31b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true 603*31b2ebc0SRitesh Harjani (IBM) * 604*31b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple 605*31b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list 606*31b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. 607*31b2ebc0SRitesh Harjani (IBM) */ 608*31b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 609*31b2ebc0SRitesh Harjani (IBM) bool datasync) 610*31b2ebc0SRitesh Harjani (IBM) { 611*31b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host; 612*31b2ebc0SRitesh Harjani (IBM) int err; 613*31b2ebc0SRitesh Harjani (IBM) int ret; 614*31b2ebc0SRitesh Harjani (IBM) 615*31b2ebc0SRitesh Harjani (IBM) err = file_write_and_wait_range(file, start, end); 616*31b2ebc0SRitesh Harjani (IBM) if (err) 617*31b2ebc0SRitesh Harjani (IBM) return err; 618*31b2ebc0SRitesh Harjani (IBM) 619*31b2ebc0SRitesh Harjani (IBM) ret = sync_mapping_buffers(inode->i_mapping); 620*31b2ebc0SRitesh Harjani (IBM) if (!(inode->i_state & I_DIRTY_ALL)) 621*31b2ebc0SRitesh Harjani (IBM) goto out; 622*31b2ebc0SRitesh Harjani (IBM) if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 623*31b2ebc0SRitesh Harjani (IBM) goto out; 624*31b2ebc0SRitesh Harjani (IBM) 625*31b2ebc0SRitesh Harjani (IBM) err = sync_inode_metadata(inode, 1); 626*31b2ebc0SRitesh Harjani (IBM) if (ret == 0) 627*31b2ebc0SRitesh Harjani (IBM) ret = err; 628*31b2ebc0SRitesh Harjani (IBM) 629*31b2ebc0SRitesh Harjani (IBM) out: 630*31b2ebc0SRitesh Harjani (IBM) /* check and advance again to catch errors after syncing out buffers */ 631*31b2ebc0SRitesh Harjani (IBM) err = file_check_and_advance_wb_err(file); 632*31b2ebc0SRitesh Harjani (IBM) if (ret == 0) 633*31b2ebc0SRitesh Harjani (IBM) ret = err; 634*31b2ebc0SRitesh Harjani (IBM) return ret; 635*31b2ebc0SRitesh Harjani (IBM) } 636*31b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush); 637*31b2ebc0SRitesh Harjani (IBM) 638*31b2ebc0SRitesh Harjani (IBM) /** 639*31b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync - generic buffer fsync implementation 640*31b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock 641*31b2ebc0SRitesh Harjani (IBM) * 642*31b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize 643*31b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes 644*31b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive) 645*31b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true 646*31b2ebc0SRitesh Harjani (IBM) * 647*31b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple 648*31b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list 649*31b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. This also makes sure that 650*31b2ebc0SRitesh Harjani (IBM) * a device cache flush operation is called at the end. 651*31b2ebc0SRitesh Harjani (IBM) */ 652*31b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 653*31b2ebc0SRitesh Harjani (IBM) bool datasync) 654*31b2ebc0SRitesh Harjani (IBM) { 655*31b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host; 656*31b2ebc0SRitesh Harjani (IBM) int ret; 657*31b2ebc0SRitesh Harjani (IBM) 658*31b2ebc0SRitesh Harjani (IBM) ret = generic_buffers_fsync_noflush(file, start, end, datasync); 659*31b2ebc0SRitesh Harjani (IBM) if (!ret) 660*31b2ebc0SRitesh Harjani (IBM) ret = blkdev_issue_flush(inode->i_sb->s_bdev); 661*31b2ebc0SRitesh Harjani (IBM) return ret; 662*31b2ebc0SRitesh Harjani (IBM) } 663*31b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync); 664*31b2ebc0SRitesh Harjani (IBM) 6651da177e4SLinus Torvalds /* 6661da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that 6671da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at 6681da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 6691da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data. 6701da177e4SLinus Torvalds */ 6711da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 6721da177e4SLinus Torvalds sector_t bblock, unsigned blocksize) 6731da177e4SLinus Torvalds { 6741da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 6751da177e4SLinus Torvalds if (bh) { 6761da177e4SLinus Torvalds if (buffer_dirty(bh)) 677e7ea1129SZhang Yi write_dirty_buffer(bh, 0); 6781da177e4SLinus Torvalds put_bh(bh); 6791da177e4SLinus Torvalds } 6801da177e4SLinus Torvalds } 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 6831da177e4SLinus Torvalds { 6841da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 685abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping; 6861da177e4SLinus Torvalds 6871da177e4SLinus Torvalds mark_buffer_dirty(bh); 688252aa6f5SRafael Aquini if (!mapping->private_data) { 689252aa6f5SRafael Aquini mapping->private_data = buffer_mapping; 6901da177e4SLinus Torvalds } else { 691252aa6f5SRafael Aquini BUG_ON(mapping->private_data != buffer_mapping); 6921da177e4SLinus Torvalds } 693535ee2fbSJan Kara if (!bh->b_assoc_map) { 6941da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 6951da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers, 6961da177e4SLinus Torvalds &mapping->private_list); 69758ff407bSJan Kara bh->b_assoc_map = mapping; 6981da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 6991da177e4SLinus Torvalds } 7001da177e4SLinus Torvalds } 7011da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode); 7021da177e4SLinus Torvalds 7031da177e4SLinus Torvalds /* 7041da177e4SLinus Torvalds * Add a page to the dirty page list. 7051da177e4SLinus Torvalds * 7061da177e4SLinus Torvalds * It is a sad fact of life that this function is called from several places 7071da177e4SLinus Torvalds * deeply under spinlocking. It may not sleep. 7081da177e4SLinus Torvalds * 7091da177e4SLinus Torvalds * If the page has buffers, the uptodate buffers are set dirty, to preserve 7101da177e4SLinus Torvalds * dirty-state coherency between the page and the buffers. It the page does 7111da177e4SLinus Torvalds * not have buffers then when they are later attached they will all be set 7121da177e4SLinus Torvalds * dirty. 7131da177e4SLinus Torvalds * 7141da177e4SLinus Torvalds * The buffers are dirtied before the page is dirtied. There's a small race 7151da177e4SLinus Torvalds * window in which a writepage caller may see the page cleanness but not the 7161da177e4SLinus Torvalds * buffer dirtiness. That's fine. If this code were to set the page dirty 7171da177e4SLinus Torvalds * before the buffers, a concurrent writepage caller could clear the page dirty 7181da177e4SLinus Torvalds * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 7191da177e4SLinus Torvalds * page on the dirty page list. 7201da177e4SLinus Torvalds * 7211da177e4SLinus Torvalds * We use private_lock to lock against try_to_free_buffers while using the 7221da177e4SLinus Torvalds * page's buffer list. Also use this to protect against clean buffers being 7231da177e4SLinus Torvalds * added to the page after it was set dirty. 7241da177e4SLinus Torvalds * 7251da177e4SLinus Torvalds * FIXME: may need to call ->reservepage here as well. That's rather up to the 7261da177e4SLinus Torvalds * address_space though. 7271da177e4SLinus Torvalds */ 728e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 7291da177e4SLinus Torvalds { 730e621900aSMatthew Wilcox (Oracle) struct buffer_head *head; 731e621900aSMatthew Wilcox (Oracle) bool newly_dirty; 7321da177e4SLinus Torvalds 7331da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 734e621900aSMatthew Wilcox (Oracle) head = folio_buffers(folio); 735e621900aSMatthew Wilcox (Oracle) if (head) { 7361da177e4SLinus Torvalds struct buffer_head *bh = head; 7371da177e4SLinus Torvalds 7381da177e4SLinus Torvalds do { 7391da177e4SLinus Torvalds set_buffer_dirty(bh); 7401da177e4SLinus Torvalds bh = bh->b_this_page; 7411da177e4SLinus Torvalds } while (bh != head); 7421da177e4SLinus Torvalds } 743c4843a75SGreg Thelen /* 744bcfe06bfSRoman Gushchin * Lock out page's memcg migration to keep PageDirty 74581f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters. 746c4843a75SGreg Thelen */ 747e621900aSMatthew Wilcox (Oracle) folio_memcg_lock(folio); 748e621900aSMatthew Wilcox (Oracle) newly_dirty = !folio_test_set_dirty(folio); 7491da177e4SLinus Torvalds spin_unlock(&mapping->private_lock); 7501da177e4SLinus Torvalds 751a8e7d49aSLinus Torvalds if (newly_dirty) 752e621900aSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 1); 753c4843a75SGreg Thelen 754e621900aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio); 755c4843a75SGreg Thelen 756c4843a75SGreg Thelen if (newly_dirty) 757c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 758c4843a75SGreg Thelen 759a8e7d49aSLinus Torvalds return newly_dirty; 7601da177e4SLinus Torvalds } 761e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio); 7621da177e4SLinus Torvalds 7631da177e4SLinus Torvalds /* 7641da177e4SLinus Torvalds * Write out and wait upon a list of buffers. 7651da177e4SLinus Torvalds * 7661da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all 7671da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently 7681da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last 7691da177e4SLinus Torvalds * forever if somebody is actively writing to the file. 7701da177e4SLinus Torvalds * 7711da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a 7721da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean 7731da177e4SLinus Torvalds * up, waiting for those writes to complete. 7741da177e4SLinus Torvalds * 7751da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end 7761da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so 7771da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but 7781da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through 7791da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing 7801da177e4SLinus Torvalds * any newly dirty buffers for write. 7811da177e4SLinus Torvalds */ 7821da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 7831da177e4SLinus Torvalds { 7841da177e4SLinus Torvalds struct buffer_head *bh; 7851da177e4SLinus Torvalds struct list_head tmp; 7867eaceaccSJens Axboe struct address_space *mapping; 7871da177e4SLinus Torvalds int err = 0, err2; 7884ee2491eSJens Axboe struct blk_plug plug; 7891da177e4SLinus Torvalds 7901da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp); 7914ee2491eSJens Axboe blk_start_plug(&plug); 7921da177e4SLinus Torvalds 7931da177e4SLinus Torvalds spin_lock(lock); 7941da177e4SLinus Torvalds while (!list_empty(list)) { 7951da177e4SLinus Torvalds bh = BH_ENTRY(list->next); 796535ee2fbSJan Kara mapping = bh->b_assoc_map; 79758ff407bSJan Kara __remove_assoc_queue(bh); 798535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 799535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 800535ee2fbSJan Kara smp_mb(); 8011da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) { 8021da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp); 803535ee2fbSJan Kara bh->b_assoc_map = mapping; 8041da177e4SLinus Torvalds if (buffer_dirty(bh)) { 8051da177e4SLinus Torvalds get_bh(bh); 8061da177e4SLinus Torvalds spin_unlock(lock); 8071da177e4SLinus Torvalds /* 8081da177e4SLinus Torvalds * Ensure any pending I/O completes so that 8099cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the 8109cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is 8119cb569d6SChristoph Hellwig * still in flight on potentially older 8129cb569d6SChristoph Hellwig * contents. 8131da177e4SLinus Torvalds */ 81470fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC); 8159cf6b720SJens Axboe 8169cf6b720SJens Axboe /* 8179cf6b720SJens Axboe * Kick off IO for the previous mapping. Note 8189cf6b720SJens Axboe * that we will not run the very last mapping, 8199cf6b720SJens Axboe * wait_on_buffer() will do that for us 8209cf6b720SJens Axboe * through sync_buffer(). 8219cf6b720SJens Axboe */ 8221da177e4SLinus Torvalds brelse(bh); 8231da177e4SLinus Torvalds spin_lock(lock); 8241da177e4SLinus Torvalds } 8251da177e4SLinus Torvalds } 8261da177e4SLinus Torvalds } 8271da177e4SLinus Torvalds 8284ee2491eSJens Axboe spin_unlock(lock); 8294ee2491eSJens Axboe blk_finish_plug(&plug); 8304ee2491eSJens Axboe spin_lock(lock); 8314ee2491eSJens Axboe 8321da177e4SLinus Torvalds while (!list_empty(&tmp)) { 8331da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev); 8341da177e4SLinus Torvalds get_bh(bh); 835535ee2fbSJan Kara mapping = bh->b_assoc_map; 836535ee2fbSJan Kara __remove_assoc_queue(bh); 837535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 838535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 839535ee2fbSJan Kara smp_mb(); 840535ee2fbSJan Kara if (buffer_dirty(bh)) { 841535ee2fbSJan Kara list_add(&bh->b_assoc_buffers, 842e3892296SJan Kara &mapping->private_list); 843535ee2fbSJan Kara bh->b_assoc_map = mapping; 844535ee2fbSJan Kara } 8451da177e4SLinus Torvalds spin_unlock(lock); 8461da177e4SLinus Torvalds wait_on_buffer(bh); 8471da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 8481da177e4SLinus Torvalds err = -EIO; 8491da177e4SLinus Torvalds brelse(bh); 8501da177e4SLinus Torvalds spin_lock(lock); 8511da177e4SLinus Torvalds } 8521da177e4SLinus Torvalds 8531da177e4SLinus Torvalds spin_unlock(lock); 8541da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list); 8551da177e4SLinus Torvalds if (err) 8561da177e4SLinus Torvalds return err; 8571da177e4SLinus Torvalds else 8581da177e4SLinus Torvalds return err2; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds /* 8621da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are 8631da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already 8641da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list. 8651da177e4SLinus Torvalds * 8661da177e4SLinus Torvalds * NOTE: we take the inode's blockdev's mapping's private_lock. Which 8671da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true 8681da177e4SLinus Torvalds * for reiserfs. 8691da177e4SLinus Torvalds */ 8701da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode) 8711da177e4SLinus Torvalds { 8721da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8731da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8741da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 875252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 8781da177e4SLinus Torvalds while (!list_empty(list)) 8791da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next)); 8801da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 8811da177e4SLinus Torvalds } 8821da177e4SLinus Torvalds } 88352b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers); 8841da177e4SLinus Torvalds 8851da177e4SLinus Torvalds /* 8861da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called 8871da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it. 8881da177e4SLinus Torvalds * 8891da177e4SLinus Torvalds * Returns true if all buffers were removed. 8901da177e4SLinus Torvalds */ 8911da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode) 8921da177e4SLinus Torvalds { 8931da177e4SLinus Torvalds int ret = 1; 8941da177e4SLinus Torvalds 8951da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8961da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 8971da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 898252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 8991da177e4SLinus Torvalds 9001da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 9011da177e4SLinus Torvalds while (!list_empty(list)) { 9021da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next); 9031da177e4SLinus Torvalds if (buffer_dirty(bh)) { 9041da177e4SLinus Torvalds ret = 0; 9051da177e4SLinus Torvalds break; 9061da177e4SLinus Torvalds } 9071da177e4SLinus Torvalds __remove_assoc_queue(bh); 9081da177e4SLinus Torvalds } 9091da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds return ret; 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds 9141da177e4SLinus Torvalds /* 915c71124a8SPankaj Raghav * Create the appropriate buffers when given a folio for data area and 9161da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to 9171da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more 9181da177e4SLinus Torvalds * buffers. 9191da177e4SLinus Torvalds * 9201da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping) 9211da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations. 9221da177e4SLinus Torvalds */ 923c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 924640ab98fSJens Axboe bool retry) 9251da177e4SLinus Torvalds { 9261da177e4SLinus Torvalds struct buffer_head *bh, *head; 927f745c6f5SShakeel Butt gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 9281da177e4SLinus Torvalds long offset; 929b87d8cefSRoman Gushchin struct mem_cgroup *memcg, *old_memcg; 9301da177e4SLinus Torvalds 931640ab98fSJens Axboe if (retry) 932640ab98fSJens Axboe gfp |= __GFP_NOFAIL; 933640ab98fSJens Axboe 934c71124a8SPankaj Raghav /* The folio lock pins the memcg */ 935c71124a8SPankaj Raghav memcg = folio_memcg(folio); 936b87d8cefSRoman Gushchin old_memcg = set_active_memcg(memcg); 937f745c6f5SShakeel Butt 9381da177e4SLinus Torvalds head = NULL; 939c71124a8SPankaj Raghav offset = folio_size(folio); 9401da177e4SLinus Torvalds while ((offset -= size) >= 0) { 941640ab98fSJens Axboe bh = alloc_buffer_head(gfp); 9421da177e4SLinus Torvalds if (!bh) 9431da177e4SLinus Torvalds goto no_grow; 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds bh->b_this_page = head; 9461da177e4SLinus Torvalds bh->b_blocknr = -1; 9471da177e4SLinus Torvalds head = bh; 9481da177e4SLinus Torvalds 9491da177e4SLinus Torvalds bh->b_size = size; 9501da177e4SLinus Torvalds 951c71124a8SPankaj Raghav /* Link the buffer to its folio */ 952c71124a8SPankaj Raghav folio_set_bh(bh, folio, offset); 9531da177e4SLinus Torvalds } 954f745c6f5SShakeel Butt out: 955b87d8cefSRoman Gushchin set_active_memcg(old_memcg); 9561da177e4SLinus Torvalds return head; 9571da177e4SLinus Torvalds /* 9581da177e4SLinus Torvalds * In case anything failed, we just free everything we got. 9591da177e4SLinus Torvalds */ 9601da177e4SLinus Torvalds no_grow: 9611da177e4SLinus Torvalds if (head) { 9621da177e4SLinus Torvalds do { 9631da177e4SLinus Torvalds bh = head; 9641da177e4SLinus Torvalds head = head->b_this_page; 9651da177e4SLinus Torvalds free_buffer_head(bh); 9661da177e4SLinus Torvalds } while (head); 9671da177e4SLinus Torvalds } 9681da177e4SLinus Torvalds 969f745c6f5SShakeel Butt goto out; 9701da177e4SLinus Torvalds } 971c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers); 972c71124a8SPankaj Raghav 973c71124a8SPankaj Raghav struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 974c71124a8SPankaj Raghav bool retry) 975c71124a8SPankaj Raghav { 976c71124a8SPankaj Raghav return folio_alloc_buffers(page_folio(page), size, retry); 977c71124a8SPankaj Raghav } 9781da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers); 9791da177e4SLinus Torvalds 9801da177e4SLinus Torvalds static inline void 9811da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head) 9821da177e4SLinus Torvalds { 9831da177e4SLinus Torvalds struct buffer_head *bh, *tail; 9841da177e4SLinus Torvalds 9851da177e4SLinus Torvalds bh = head; 9861da177e4SLinus Torvalds do { 9871da177e4SLinus Torvalds tail = bh; 9881da177e4SLinus Torvalds bh = bh->b_this_page; 9891da177e4SLinus Torvalds } while (bh); 9901da177e4SLinus Torvalds tail->b_this_page = head; 99145dcfc27SGuoqing Jiang attach_page_private(page, head); 9921da177e4SLinus Torvalds } 9931da177e4SLinus Torvalds 994bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 995bbec0270SLinus Torvalds { 996bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0); 997b86058f9SChristoph Hellwig loff_t sz = bdev_nr_bytes(bdev); 998bbec0270SLinus Torvalds 999bbec0270SLinus Torvalds if (sz) { 1000bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size); 1001bbec0270SLinus Torvalds retval = (sz >> sizebits); 1002bbec0270SLinus Torvalds } 1003bbec0270SLinus Torvalds return retval; 1004bbec0270SLinus Torvalds } 1005bbec0270SLinus Torvalds 10061da177e4SLinus Torvalds /* 10071da177e4SLinus Torvalds * Initialise the state of a blockdev page's buffers. 10081da177e4SLinus Torvalds */ 1009676ce6d5SHugh Dickins static sector_t 10101da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev, 10111da177e4SLinus Torvalds sector_t block, int size) 10121da177e4SLinus Torvalds { 10131da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 10141da177e4SLinus Torvalds struct buffer_head *bh = head; 10151da177e4SLinus Torvalds int uptodate = PageUptodate(page); 1016bcd1d063SChristoph Hellwig sector_t end_block = blkdev_max_block(bdev, size); 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds do { 10191da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 102001950a34SEric Biggers bh->b_end_io = NULL; 102101950a34SEric Biggers bh->b_private = NULL; 10221da177e4SLinus Torvalds bh->b_bdev = bdev; 10231da177e4SLinus Torvalds bh->b_blocknr = block; 10241da177e4SLinus Torvalds if (uptodate) 10251da177e4SLinus Torvalds set_buffer_uptodate(bh); 1026080399aaSJeff Moyer if (block < end_block) 10271da177e4SLinus Torvalds set_buffer_mapped(bh); 10281da177e4SLinus Torvalds } 10291da177e4SLinus Torvalds block++; 10301da177e4SLinus Torvalds bh = bh->b_this_page; 10311da177e4SLinus Torvalds } while (bh != head); 1032676ce6d5SHugh Dickins 1033676ce6d5SHugh Dickins /* 1034676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device. 1035676ce6d5SHugh Dickins */ 1036676ce6d5SHugh Dickins return end_block; 10371da177e4SLinus Torvalds } 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds /* 10401da177e4SLinus Torvalds * Create the page-cache page that contains the requested block. 10411da177e4SLinus Torvalds * 1042676ce6d5SHugh Dickins * This is used purely for blockdev mappings. 10431da177e4SLinus Torvalds */ 1044676ce6d5SHugh Dickins static int 10451da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block, 10463b5e6454SGioh Kim pgoff_t index, int size, int sizebits, gfp_t gfp) 10471da177e4SLinus Torvalds { 10481da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode; 10491da177e4SLinus Torvalds struct page *page; 10501da177e4SLinus Torvalds struct buffer_head *bh; 1051676ce6d5SHugh Dickins sector_t end_block; 1052c4b4c2a7SZhiqiang Liu int ret = 0; 105384235de3SJohannes Weiner gfp_t gfp_mask; 10541da177e4SLinus Torvalds 1055c62d2555SMichal Hocko gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; 10563b5e6454SGioh Kim 105784235de3SJohannes Weiner /* 105884235de3SJohannes Weiner * XXX: __getblk_slow() can not really deal with failure and 105984235de3SJohannes Weiner * will endlessly loop on improvised global reclaim. Prefer 106084235de3SJohannes Weiner * looping in the allocator rather than here, at least that 106184235de3SJohannes Weiner * code knows what it's doing. 106284235de3SJohannes Weiner */ 106384235de3SJohannes Weiner gfp_mask |= __GFP_NOFAIL; 106484235de3SJohannes Weiner 106584235de3SJohannes Weiner page = find_or_create_page(inode->i_mapping, index, gfp_mask); 10661da177e4SLinus Torvalds 1067e827f923SEric Sesterhenn BUG_ON(!PageLocked(page)); 10681da177e4SLinus Torvalds 10691da177e4SLinus Torvalds if (page_has_buffers(page)) { 10701da177e4SLinus Torvalds bh = page_buffers(page); 10711da177e4SLinus Torvalds if (bh->b_size == size) { 1072676ce6d5SHugh Dickins end_block = init_page_buffers(page, bdev, 1073f2d5a944SAnton Altaparmakov (sector_t)index << sizebits, 1074f2d5a944SAnton Altaparmakov size); 1075676ce6d5SHugh Dickins goto done; 10761da177e4SLinus Torvalds } 107768189fefSMatthew Wilcox (Oracle) if (!try_to_free_buffers(page_folio(page))) 10781da177e4SLinus Torvalds goto failed; 10791da177e4SLinus Torvalds } 10801da177e4SLinus Torvalds 10811da177e4SLinus Torvalds /* 10821da177e4SLinus Torvalds * Allocate some buffers for this page 10831da177e4SLinus Torvalds */ 108494dc24c0SJens Axboe bh = alloc_page_buffers(page, size, true); 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds /* 10871da177e4SLinus Torvalds * Link the page to the buffers and initialise them. Take the 10881da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not 10891da177e4SLinus Torvalds * run under the page lock. 10901da177e4SLinus Torvalds */ 10911da177e4SLinus Torvalds spin_lock(&inode->i_mapping->private_lock); 10921da177e4SLinus Torvalds link_dev_buffers(page, bh); 1093f2d5a944SAnton Altaparmakov end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, 1094f2d5a944SAnton Altaparmakov size); 10951da177e4SLinus Torvalds spin_unlock(&inode->i_mapping->private_lock); 1096676ce6d5SHugh Dickins done: 1097676ce6d5SHugh Dickins ret = (block < end_block) ? 1 : -ENXIO; 10981da177e4SLinus Torvalds failed: 10991da177e4SLinus Torvalds unlock_page(page); 110009cbfeafSKirill A. Shutemov put_page(page); 1101676ce6d5SHugh Dickins return ret; 11021da177e4SLinus Torvalds } 11031da177e4SLinus Torvalds 11041da177e4SLinus Torvalds /* 11051da177e4SLinus Torvalds * Create buffers for the specified block device block's page. If 11061da177e4SLinus Torvalds * that page was dirty, the buffers are set dirty also. 11071da177e4SLinus Torvalds */ 1108858119e1SArjan van de Ven static int 11093b5e6454SGioh Kim grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) 11101da177e4SLinus Torvalds { 11111da177e4SLinus Torvalds pgoff_t index; 11121da177e4SLinus Torvalds int sizebits; 11131da177e4SLinus Torvalds 111490432e60SMikulas Patocka sizebits = PAGE_SHIFT - __ffs(size); 11151da177e4SLinus Torvalds index = block >> sizebits; 11161da177e4SLinus Torvalds 1117e5657933SAndrew Morton /* 1118e5657933SAndrew Morton * Check for a block which wants to lie outside our maximum possible 1119e5657933SAndrew Morton * pagecache index. (this comparison is done using sector_t types). 1120e5657933SAndrew Morton */ 1121e5657933SAndrew Morton if (unlikely(index != block >> sizebits)) { 1122e5657933SAndrew Morton printk(KERN_ERR "%s: requested out-of-range block %llu for " 1123a1c6f057SDmitry Monakhov "device %pg\n", 11248e24eea7SHarvey Harrison __func__, (unsigned long long)block, 1125a1c6f057SDmitry Monakhov bdev); 1126e5657933SAndrew Morton return -EIO; 1127e5657933SAndrew Morton } 1128676ce6d5SHugh Dickins 11291da177e4SLinus Torvalds /* Create a page with the proper size buffers.. */ 11303b5e6454SGioh Kim return grow_dev_page(bdev, block, index, size, sizebits, gfp); 11311da177e4SLinus Torvalds } 11321da177e4SLinus Torvalds 11330026ba40SEric Biggers static struct buffer_head * 11343b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block, 11353b5e6454SGioh Kim unsigned size, gfp_t gfp) 11361da177e4SLinus Torvalds { 11371da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */ 1138e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 11391da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) { 11401da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n", 11411da177e4SLinus Torvalds size); 1142e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n", 1143e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev)); 11441da177e4SLinus Torvalds 11451da177e4SLinus Torvalds dump_stack(); 11461da177e4SLinus Torvalds return NULL; 11471da177e4SLinus Torvalds } 11481da177e4SLinus Torvalds 1149676ce6d5SHugh Dickins for (;;) { 1150676ce6d5SHugh Dickins struct buffer_head *bh; 1151676ce6d5SHugh Dickins int ret; 1152676ce6d5SHugh Dickins 11531da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size); 11541da177e4SLinus Torvalds if (bh) 11551da177e4SLinus Torvalds return bh; 11561da177e4SLinus Torvalds 11573b5e6454SGioh Kim ret = grow_buffers(bdev, block, size, gfp); 1158676ce6d5SHugh Dickins if (ret < 0) 115991f68c89SJeff Moyer return NULL; 1160676ce6d5SHugh Dickins } 11611da177e4SLinus Torvalds } 11621da177e4SLinus Torvalds 11631da177e4SLinus Torvalds /* 11641da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages: 11651da177e4SLinus Torvalds * 11661da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1167ec82e1c1SMatthew Wilcox * the page is tagged dirty in the page cache. 11681da177e4SLinus Torvalds * 11691da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of 11701da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is 11711da177e4SLinus Torvalds * merely a hint about the true dirty state. 11721da177e4SLinus Torvalds * 11731da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty 11741da177e4SLinus Torvalds * (if the page has buffers). 11751da177e4SLinus Torvalds * 11761da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other 11771da177e4SLinus Torvalds * buffers are not. 11781da177e4SLinus Torvalds * 11791da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they 11801da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not 11811da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent 11822c69e205SMatthew Wilcox (Oracle) * block_read_full_folio() against that folio will discover all the uptodate 11832c69e205SMatthew Wilcox (Oracle) * buffers, will set the folio uptodate and will perform no I/O. 11841da177e4SLinus Torvalds */ 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds /** 11871da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout 118867be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty 11891da177e4SLinus Torvalds * 1190ec82e1c1SMatthew Wilcox * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1191ec82e1c1SMatthew Wilcox * its backing page dirty, then tag the page as dirty in the page cache 1192ec82e1c1SMatthew Wilcox * and then attach the address_space's inode to its superblock's dirty 11931da177e4SLinus Torvalds * inode list. 11941da177e4SLinus Torvalds * 1195abc8a8a2SMatthew Wilcox (Oracle) * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock, 1196b93b0163SMatthew Wilcox * i_pages lock and mapping->host->i_lock. 11971da177e4SLinus Torvalds */ 1198fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh) 11991da177e4SLinus Torvalds { 1200787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh)); 12011be62dc1SLinus Torvalds 12025305cb83STejun Heo trace_block_dirty_buffer(bh); 12035305cb83STejun Heo 12041be62dc1SLinus Torvalds /* 12051be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case. 12061be62dc1SLinus Torvalds * 12071be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we 12081be62dc1SLinus Torvalds * perhaps modified the buffer. 12091be62dc1SLinus Torvalds */ 12101be62dc1SLinus Torvalds if (buffer_dirty(bh)) { 12111be62dc1SLinus Torvalds smp_mb(); 12121be62dc1SLinus Torvalds if (buffer_dirty(bh)) 12131be62dc1SLinus Torvalds return; 12141be62dc1SLinus Torvalds } 12151be62dc1SLinus Torvalds 1216a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) { 1217cf1d3417SMatthew Wilcox (Oracle) struct folio *folio = bh->b_folio; 1218c4843a75SGreg Thelen struct address_space *mapping = NULL; 1219c4843a75SGreg Thelen 1220cf1d3417SMatthew Wilcox (Oracle) folio_memcg_lock(folio); 1221cf1d3417SMatthew Wilcox (Oracle) if (!folio_test_set_dirty(folio)) { 1222cf1d3417SMatthew Wilcox (Oracle) mapping = folio->mapping; 12238e9d78edSLinus Torvalds if (mapping) 1224cf1d3417SMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 0); 12258e9d78edSLinus Torvalds } 1226cf1d3417SMatthew Wilcox (Oracle) folio_memcg_unlock(folio); 1227c4843a75SGreg Thelen if (mapping) 1228c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1229a8e7d49aSLinus Torvalds } 12301da177e4SLinus Torvalds } 12311fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty); 12321da177e4SLinus Torvalds 123387354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh) 123487354e5dSJeff Layton { 1235485e9605SJeff Layton struct super_block *sb; 1236485e9605SJeff Layton 123787354e5dSJeff Layton set_buffer_write_io_error(bh); 123887354e5dSJeff Layton /* FIXME: do we need to set this in both places? */ 1239abc8a8a2SMatthew Wilcox (Oracle) if (bh->b_folio && bh->b_folio->mapping) 1240abc8a8a2SMatthew Wilcox (Oracle) mapping_set_error(bh->b_folio->mapping, -EIO); 124187354e5dSJeff Layton if (bh->b_assoc_map) 124287354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO); 1243485e9605SJeff Layton rcu_read_lock(); 1244485e9605SJeff Layton sb = READ_ONCE(bh->b_bdev->bd_super); 1245485e9605SJeff Layton if (sb) 1246485e9605SJeff Layton errseq_set(&sb->s_wb_err, -EIO); 1247485e9605SJeff Layton rcu_read_unlock(); 124887354e5dSJeff Layton } 124987354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error); 125087354e5dSJeff Layton 12511da177e4SLinus Torvalds /* 12521da177e4SLinus Torvalds * Decrement a buffer_head's reference count. If all buffers against a page 12531da177e4SLinus Torvalds * have zero reference count, are clean and unlocked, and if the page is clean 12541da177e4SLinus Torvalds * and unlocked then try_to_free_buffers() may strip the buffers from the page 12551da177e4SLinus Torvalds * in preparation for freeing it (sometimes, rarely, buffers are removed from 12561da177e4SLinus Torvalds * a page but it ends up not being freed, and buffers may later be reattached). 12571da177e4SLinus Torvalds */ 12581da177e4SLinus Torvalds void __brelse(struct buffer_head * buf) 12591da177e4SLinus Torvalds { 12601da177e4SLinus Torvalds if (atomic_read(&buf->b_count)) { 12611da177e4SLinus Torvalds put_bh(buf); 12621da177e4SLinus Torvalds return; 12631da177e4SLinus Torvalds } 12645c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 12651da177e4SLinus Torvalds } 12661fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse); 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds /* 12691da177e4SLinus Torvalds * bforget() is like brelse(), except it discards any 12701da177e4SLinus Torvalds * potentially dirty data. 12711da177e4SLinus Torvalds */ 12721da177e4SLinus Torvalds void __bforget(struct buffer_head *bh) 12731da177e4SLinus Torvalds { 12741da177e4SLinus Torvalds clear_buffer_dirty(bh); 1275535ee2fbSJan Kara if (bh->b_assoc_map) { 1276abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping; 12771da177e4SLinus Torvalds 12781da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 12791da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 128058ff407bSJan Kara bh->b_assoc_map = NULL; 12811da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 12821da177e4SLinus Torvalds } 12831da177e4SLinus Torvalds __brelse(bh); 12841da177e4SLinus Torvalds } 12851fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget); 12861da177e4SLinus Torvalds 12871da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh) 12881da177e4SLinus Torvalds { 12891da177e4SLinus Torvalds lock_buffer(bh); 12901da177e4SLinus Torvalds if (buffer_uptodate(bh)) { 12911da177e4SLinus Torvalds unlock_buffer(bh); 12921da177e4SLinus Torvalds return bh; 12931da177e4SLinus Torvalds } else { 12941da177e4SLinus Torvalds get_bh(bh); 12951da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync; 12961420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh); 12971da177e4SLinus Torvalds wait_on_buffer(bh); 12981da177e4SLinus Torvalds if (buffer_uptodate(bh)) 12991da177e4SLinus Torvalds return bh; 13001da177e4SLinus Torvalds } 13011da177e4SLinus Torvalds brelse(bh); 13021da177e4SLinus Torvalds return NULL; 13031da177e4SLinus Torvalds } 13041da177e4SLinus Torvalds 13051da177e4SLinus Torvalds /* 13061da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 13071da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 13081da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear 13091da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple 13101da177e4SLinus Torvalds * CPU's LRUs at the same time. 13111da177e4SLinus Torvalds * 13121da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and 13131da177e4SLinus Torvalds * sb_find_get_block(). 13141da177e4SLinus Torvalds * 13151da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use 13161da177e4SLinus Torvalds * a local interrupt disable for that. 13171da177e4SLinus Torvalds */ 13181da177e4SLinus Torvalds 131986cf78d7SSebastien Buisson #define BH_LRU_SIZE 16 13201da177e4SLinus Torvalds 13211da177e4SLinus Torvalds struct bh_lru { 13221da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 13231da177e4SLinus Torvalds }; 13241da177e4SLinus Torvalds 13251da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 13261da177e4SLinus Torvalds 13271da177e4SLinus Torvalds #ifdef CONFIG_SMP 13281da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable() 13291da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable() 13301da177e4SLinus Torvalds #else 13311da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable() 13321da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable() 13331da177e4SLinus Torvalds #endif 13341da177e4SLinus Torvalds 13351da177e4SLinus Torvalds static inline void check_irqs_on(void) 13361da177e4SLinus Torvalds { 13371da177e4SLinus Torvalds #ifdef irqs_disabled 13381da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 13391da177e4SLinus Torvalds #endif 13401da177e4SLinus Torvalds } 13411da177e4SLinus Torvalds 13421da177e4SLinus Torvalds /* 1343241f01fbSEric Biggers * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1344241f01fbSEric Biggers * inserted at the front, and the buffer_head at the back if any is evicted. 1345241f01fbSEric Biggers * Or, if already in the LRU it is moved to the front. 13461da177e4SLinus Torvalds */ 13471da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh) 13481da177e4SLinus Torvalds { 1349241f01fbSEric Biggers struct buffer_head *evictee = bh; 1350241f01fbSEric Biggers struct bh_lru *b; 1351241f01fbSEric Biggers int i; 13521da177e4SLinus Torvalds 13531da177e4SLinus Torvalds check_irqs_on(); 1354c0226eb8SMinchan Kim bh_lru_lock(); 1355c0226eb8SMinchan Kim 13568cc621d2SMinchan Kim /* 13578cc621d2SMinchan Kim * the refcount of buffer_head in bh_lru prevents dropping the 13588cc621d2SMinchan Kim * attached page(i.e., try_to_free_buffers) so it could cause 13598cc621d2SMinchan Kim * failing page migration. 13608cc621d2SMinchan Kim * Skip putting upcoming bh into bh_lru until migration is done. 13618cc621d2SMinchan Kim */ 1362c0226eb8SMinchan Kim if (lru_cache_disabled()) { 1363c0226eb8SMinchan Kim bh_lru_unlock(); 13648cc621d2SMinchan Kim return; 1365c0226eb8SMinchan Kim } 1366241f01fbSEric Biggers 1367241f01fbSEric Biggers b = this_cpu_ptr(&bh_lrus); 1368241f01fbSEric Biggers for (i = 0; i < BH_LRU_SIZE; i++) { 1369241f01fbSEric Biggers swap(evictee, b->bhs[i]); 1370241f01fbSEric Biggers if (evictee == bh) { 1371241f01fbSEric Biggers bh_lru_unlock(); 1372241f01fbSEric Biggers return; 1373241f01fbSEric Biggers } 1374241f01fbSEric Biggers } 13751da177e4SLinus Torvalds 13761da177e4SLinus Torvalds get_bh(bh); 13771da177e4SLinus Torvalds bh_lru_unlock(); 1378241f01fbSEric Biggers brelse(evictee); 13791da177e4SLinus Torvalds } 13801da177e4SLinus Torvalds 13811da177e4SLinus Torvalds /* 13821da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head. 13831da177e4SLinus Torvalds */ 1384858119e1SArjan van de Ven static struct buffer_head * 13853991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 13861da177e4SLinus Torvalds { 13871da177e4SLinus Torvalds struct buffer_head *ret = NULL; 13883991d3bdSTomasz Kvarsin unsigned int i; 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds check_irqs_on(); 13911da177e4SLinus Torvalds bh_lru_lock(); 13921da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 1393c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 13941da177e4SLinus Torvalds 13959470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 13969470dd5dSZach Brown bh->b_size == size) { 13971da177e4SLinus Torvalds if (i) { 13981da177e4SLinus Torvalds while (i) { 1399c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i], 1400c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1])); 14011da177e4SLinus Torvalds i--; 14021da177e4SLinus Torvalds } 1403c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh); 14041da177e4SLinus Torvalds } 14051da177e4SLinus Torvalds get_bh(bh); 14061da177e4SLinus Torvalds ret = bh; 14071da177e4SLinus Torvalds break; 14081da177e4SLinus Torvalds } 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds bh_lru_unlock(); 14111da177e4SLinus Torvalds return ret; 14121da177e4SLinus Torvalds } 14131da177e4SLinus Torvalds 14141da177e4SLinus Torvalds /* 14151da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh 14161da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return 14171da177e4SLinus Torvalds * NULL 14181da177e4SLinus Torvalds */ 14191da177e4SLinus Torvalds struct buffer_head * 14203991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 14211da177e4SLinus Torvalds { 14221da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 14231da177e4SLinus Torvalds 14241da177e4SLinus Torvalds if (bh == NULL) { 14252457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */ 1426385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block); 14271da177e4SLinus Torvalds if (bh) 14281da177e4SLinus Torvalds bh_lru_install(bh); 14292457aec6SMel Gorman } else 14301da177e4SLinus Torvalds touch_buffer(bh); 14312457aec6SMel Gorman 14321da177e4SLinus Torvalds return bh; 14331da177e4SLinus Torvalds } 14341da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block); 14351da177e4SLinus Torvalds 14361da177e4SLinus Torvalds /* 14373b5e6454SGioh Kim * __getblk_gfp() will locate (and, if necessary, create) the buffer_head 14381da177e4SLinus Torvalds * which corresponds to the passed block_device, block and size. The 14391da177e4SLinus Torvalds * returned buffer has its reference count incremented. 14401da177e4SLinus Torvalds * 14413b5e6454SGioh Kim * __getblk_gfp() will lock up the machine if grow_dev_page's 14423b5e6454SGioh Kim * try_to_free_buffers() attempt is failing. FIXME, perhaps? 14431da177e4SLinus Torvalds */ 14441da177e4SLinus Torvalds struct buffer_head * 14453b5e6454SGioh Kim __getblk_gfp(struct block_device *bdev, sector_t block, 14463b5e6454SGioh Kim unsigned size, gfp_t gfp) 14471da177e4SLinus Torvalds { 14481da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, block, size); 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds might_sleep(); 14511da177e4SLinus Torvalds if (bh == NULL) 14523b5e6454SGioh Kim bh = __getblk_slow(bdev, block, size, gfp); 14531da177e4SLinus Torvalds return bh; 14541da177e4SLinus Torvalds } 14553b5e6454SGioh Kim EXPORT_SYMBOL(__getblk_gfp); 14561da177e4SLinus Torvalds 14571da177e4SLinus Torvalds /* 14581da177e4SLinus Torvalds * Do async read-ahead on a buffer.. 14591da177e4SLinus Torvalds */ 14603991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 14611da177e4SLinus Torvalds { 14621da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 1463a3e713b5SAndrew Morton if (likely(bh)) { 1464e7ea1129SZhang Yi bh_readahead(bh, REQ_RAHEAD); 14651da177e4SLinus Torvalds brelse(bh); 14661da177e4SLinus Torvalds } 1467a3e713b5SAndrew Morton } 14681da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead); 14691da177e4SLinus Torvalds 14701da177e4SLinus Torvalds /** 14713b5e6454SGioh Kim * __bread_gfp() - reads a specified block and returns the bh 147267be2dd1SMartin Waitz * @bdev: the block_device to read from 14731da177e4SLinus Torvalds * @block: number of block 14741da177e4SLinus Torvalds * @size: size (in bytes) to read 14753b5e6454SGioh Kim * @gfp: page allocation flag 14761da177e4SLinus Torvalds * 14771da177e4SLinus Torvalds * Reads a specified block, and returns buffer head that contains it. 14783b5e6454SGioh Kim * The page cache can be allocated from non-movable area 14793b5e6454SGioh Kim * not to prevent page migration if you set gfp to zero. 14801da177e4SLinus Torvalds * It returns NULL if the block was unreadable. 14811da177e4SLinus Torvalds */ 14821da177e4SLinus Torvalds struct buffer_head * 14833b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block, 14843b5e6454SGioh Kim unsigned size, gfp_t gfp) 14851da177e4SLinus Torvalds { 14863b5e6454SGioh Kim struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 14871da177e4SLinus Torvalds 1488a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh)) 14891da177e4SLinus Torvalds bh = __bread_slow(bh); 14901da177e4SLinus Torvalds return bh; 14911da177e4SLinus Torvalds } 14923b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp); 14931da177e4SLinus Torvalds 14948cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b) 14958cc621d2SMinchan Kim { 14968cc621d2SMinchan Kim int i; 14978cc621d2SMinchan Kim 14988cc621d2SMinchan Kim for (i = 0; i < BH_LRU_SIZE; i++) { 14998cc621d2SMinchan Kim brelse(b->bhs[i]); 15008cc621d2SMinchan Kim b->bhs[i] = NULL; 15018cc621d2SMinchan Kim } 15028cc621d2SMinchan Kim } 15031da177e4SLinus Torvalds /* 15041da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount. 15051da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq 15061da177e4SLinus Torvalds * or with preempt disabled. 15071da177e4SLinus Torvalds */ 15081da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg) 15091da177e4SLinus Torvalds { 15101da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus); 15111da177e4SLinus Torvalds 15128cc621d2SMinchan Kim __invalidate_bh_lrus(b); 15131da177e4SLinus Torvalds put_cpu_var(bh_lrus); 15141da177e4SLinus Torvalds } 15151da177e4SLinus Torvalds 15168cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy) 151742be35d0SGilad Ben-Yossef { 151842be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 151942be35d0SGilad Ben-Yossef int i; 152042be35d0SGilad Ben-Yossef 152142be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) { 152242be35d0SGilad Ben-Yossef if (b->bhs[i]) 15231d706679SSaurav Girepunje return true; 152442be35d0SGilad Ben-Yossef } 152542be35d0SGilad Ben-Yossef 15261d706679SSaurav Girepunje return false; 152742be35d0SGilad Ben-Yossef } 152842be35d0SGilad Ben-Yossef 1529f9a14399SPeter Zijlstra void invalidate_bh_lrus(void) 15301da177e4SLinus Torvalds { 1531cb923159SSebastian Andrzej Siewior on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 15321da177e4SLinus Torvalds } 15339db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 15341da177e4SLinus Torvalds 1535243418e3SMinchan Kim /* 1536243418e3SMinchan Kim * It's called from workqueue context so we need a bh_lru_lock to close 1537243418e3SMinchan Kim * the race with preemption/irq. 1538243418e3SMinchan Kim */ 1539243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void) 15408cc621d2SMinchan Kim { 15418cc621d2SMinchan Kim struct bh_lru *b; 15428cc621d2SMinchan Kim 15438cc621d2SMinchan Kim bh_lru_lock(); 1544243418e3SMinchan Kim b = this_cpu_ptr(&bh_lrus); 15458cc621d2SMinchan Kim __invalidate_bh_lrus(b); 15468cc621d2SMinchan Kim bh_lru_unlock(); 15478cc621d2SMinchan Kim } 15488cc621d2SMinchan Kim 15491da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh, 15501da177e4SLinus Torvalds struct page *page, unsigned long offset) 15511da177e4SLinus Torvalds { 15521da177e4SLinus Torvalds bh->b_page = page; 1553e827f923SEric Sesterhenn BUG_ON(offset >= PAGE_SIZE); 15541da177e4SLinus Torvalds if (PageHighMem(page)) 15551da177e4SLinus Torvalds /* 15561da177e4SLinus Torvalds * This catches illegal uses and preserves the offset: 15571da177e4SLinus Torvalds */ 15581da177e4SLinus Torvalds bh->b_data = (char *)(0 + offset); 15591da177e4SLinus Torvalds else 15601da177e4SLinus Torvalds bh->b_data = page_address(page) + offset; 15611da177e4SLinus Torvalds } 15621da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page); 15631da177e4SLinus Torvalds 1564465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1565465e5e6aSPankaj Raghav unsigned long offset) 1566465e5e6aSPankaj Raghav { 1567465e5e6aSPankaj Raghav bh->b_folio = folio; 1568465e5e6aSPankaj Raghav BUG_ON(offset >= folio_size(folio)); 1569465e5e6aSPankaj Raghav if (folio_test_highmem(folio)) 1570465e5e6aSPankaj Raghav /* 1571465e5e6aSPankaj Raghav * This catches illegal uses and preserves the offset: 1572465e5e6aSPankaj Raghav */ 1573465e5e6aSPankaj Raghav bh->b_data = (char *)(0 + offset); 1574465e5e6aSPankaj Raghav else 1575465e5e6aSPankaj Raghav bh->b_data = folio_address(folio) + offset; 1576465e5e6aSPankaj Raghav } 1577465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh); 1578465e5e6aSPankaj Raghav 15791da177e4SLinus Torvalds /* 15801da177e4SLinus Torvalds * Called when truncating a buffer on a page completely. 15811da177e4SLinus Torvalds */ 1582e7470ee8SMel Gorman 1583e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */ 1584e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \ 1585e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1586e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten) 1587e7470ee8SMel Gorman 1588858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh) 15891da177e4SLinus Torvalds { 1590b0192296SUros Bizjak unsigned long b_state; 1591e7470ee8SMel Gorman 15921da177e4SLinus Torvalds lock_buffer(bh); 15931da177e4SLinus Torvalds clear_buffer_dirty(bh); 15941da177e4SLinus Torvalds bh->b_bdev = NULL; 1595b0192296SUros Bizjak b_state = READ_ONCE(bh->b_state); 1596b0192296SUros Bizjak do { 1597b0192296SUros Bizjak } while (!try_cmpxchg(&bh->b_state, &b_state, 1598b0192296SUros Bizjak b_state & ~BUFFER_FLAGS_DISCARD)); 15991da177e4SLinus Torvalds unlock_buffer(bh); 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 16021da177e4SLinus Torvalds /** 16037ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 16047ba13abbSMatthew Wilcox (Oracle) * @folio: The folio which is affected. 1605d47992f8SLukas Czerner * @offset: start of the range to invalidate 1606d47992f8SLukas Czerner * @length: length of the range to invalidate 16071da177e4SLinus Torvalds * 16087ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() is called when all or part of the folio has been 16091da177e4SLinus Torvalds * invalidated by a truncate operation. 16101da177e4SLinus Torvalds * 16117ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() does not have to release all buffers, but it must 16121da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O 16131da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation 16141da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those 16151da177e4SLinus Torvalds * blocks on-disk. 16161da177e4SLinus Torvalds */ 16177ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 16181da177e4SLinus Torvalds { 16191da177e4SLinus Torvalds struct buffer_head *head, *bh, *next; 16207ba13abbSMatthew Wilcox (Oracle) size_t curr_off = 0; 16217ba13abbSMatthew Wilcox (Oracle) size_t stop = length + offset; 16221da177e4SLinus Torvalds 16237ba13abbSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 16241da177e4SLinus Torvalds 1625d47992f8SLukas Czerner /* 1626d47992f8SLukas Czerner * Check for overflow 1627d47992f8SLukas Czerner */ 16287ba13abbSMatthew Wilcox (Oracle) BUG_ON(stop > folio_size(folio) || stop < length); 1629d47992f8SLukas Czerner 16307ba13abbSMatthew Wilcox (Oracle) head = folio_buffers(folio); 16317ba13abbSMatthew Wilcox (Oracle) if (!head) 16327ba13abbSMatthew Wilcox (Oracle) return; 16337ba13abbSMatthew Wilcox (Oracle) 16341da177e4SLinus Torvalds bh = head; 16351da177e4SLinus Torvalds do { 16367ba13abbSMatthew Wilcox (Oracle) size_t next_off = curr_off + bh->b_size; 16371da177e4SLinus Torvalds next = bh->b_this_page; 16381da177e4SLinus Torvalds 16391da177e4SLinus Torvalds /* 1640d47992f8SLukas Czerner * Are we still fully in range ? 1641d47992f8SLukas Czerner */ 1642d47992f8SLukas Czerner if (next_off > stop) 1643d47992f8SLukas Czerner goto out; 1644d47992f8SLukas Czerner 1645d47992f8SLukas Czerner /* 16461da177e4SLinus Torvalds * is this block fully invalidated? 16471da177e4SLinus Torvalds */ 16481da177e4SLinus Torvalds if (offset <= curr_off) 16491da177e4SLinus Torvalds discard_buffer(bh); 16501da177e4SLinus Torvalds curr_off = next_off; 16511da177e4SLinus Torvalds bh = next; 16521da177e4SLinus Torvalds } while (bh != head); 16531da177e4SLinus Torvalds 16541da177e4SLinus Torvalds /* 16557ba13abbSMatthew Wilcox (Oracle) * We release buffers only if the entire folio is being invalidated. 16561da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated, 16571da177e4SLinus Torvalds * so real IO is not possible anymore. 16581da177e4SLinus Torvalds */ 16597ba13abbSMatthew Wilcox (Oracle) if (length == folio_size(folio)) 16607ba13abbSMatthew Wilcox (Oracle) filemap_release_folio(folio, 0); 16611da177e4SLinus Torvalds out: 16622ff28e22SNeilBrown return; 16631da177e4SLinus Torvalds } 16647ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio); 16651da177e4SLinus Torvalds 16661da177e4SLinus Torvalds /* 16671da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt 1668e621900aSMatthew Wilcox (Oracle) * block_dirty_folio() via private_lock. try_to_free_buffers 16698e2e1756SPankaj Raghav * is already excluded via the folio lock. 16701da177e4SLinus Torvalds */ 16718e2e1756SPankaj Raghav void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize, 16728e2e1756SPankaj Raghav unsigned long b_state) 16731da177e4SLinus Torvalds { 16741da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail; 16751da177e4SLinus Torvalds 16768e2e1756SPankaj Raghav head = folio_alloc_buffers(folio, blocksize, true); 16771da177e4SLinus Torvalds bh = head; 16781da177e4SLinus Torvalds do { 16791da177e4SLinus Torvalds bh->b_state |= b_state; 16801da177e4SLinus Torvalds tail = bh; 16811da177e4SLinus Torvalds bh = bh->b_this_page; 16821da177e4SLinus Torvalds } while (bh); 16831da177e4SLinus Torvalds tail->b_this_page = head; 16841da177e4SLinus Torvalds 16858e2e1756SPankaj Raghav spin_lock(&folio->mapping->private_lock); 16868e2e1756SPankaj Raghav if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 16871da177e4SLinus Torvalds bh = head; 16881da177e4SLinus Torvalds do { 16898e2e1756SPankaj Raghav if (folio_test_dirty(folio)) 16901da177e4SLinus Torvalds set_buffer_dirty(bh); 16918e2e1756SPankaj Raghav if (folio_test_uptodate(folio)) 16921da177e4SLinus Torvalds set_buffer_uptodate(bh); 16931da177e4SLinus Torvalds bh = bh->b_this_page; 16941da177e4SLinus Torvalds } while (bh != head); 16951da177e4SLinus Torvalds } 16968e2e1756SPankaj Raghav folio_attach_private(folio, head); 16978e2e1756SPankaj Raghav spin_unlock(&folio->mapping->private_lock); 16988e2e1756SPankaj Raghav } 16998e2e1756SPankaj Raghav EXPORT_SYMBOL(folio_create_empty_buffers); 17008e2e1756SPankaj Raghav 17018e2e1756SPankaj Raghav void create_empty_buffers(struct page *page, 17028e2e1756SPankaj Raghav unsigned long blocksize, unsigned long b_state) 17038e2e1756SPankaj Raghav { 17048e2e1756SPankaj Raghav folio_create_empty_buffers(page_folio(page), blocksize, b_state); 17051da177e4SLinus Torvalds } 17061da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers); 17071da177e4SLinus Torvalds 170829f3ad7dSJan Kara /** 170929f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device 171029f3ad7dSJan Kara * @bdev: Block device to clean buffers in 171129f3ad7dSJan Kara * @block: Start of a range of blocks to clean 171229f3ad7dSJan Kara * @len: Number of blocks to clean 17131da177e4SLinus Torvalds * 171429f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any 171529f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the 171629f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that 171729f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark 171829f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer 171929f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was 172029f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it 172129f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards... 172229f3ad7dSJan Kara * 172329f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be 172429f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that 172529f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really 172629f3ad7dSJan Kara * need to. That happens here. 17271da177e4SLinus Torvalds */ 172829f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 17291da177e4SLinus Torvalds { 173029f3ad7dSJan Kara struct inode *bd_inode = bdev->bd_inode; 173129f3ad7dSJan Kara struct address_space *bd_mapping = bd_inode->i_mapping; 17329e0b6f31SMatthew Wilcox (Oracle) struct folio_batch fbatch; 173329f3ad7dSJan Kara pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 173429f3ad7dSJan Kara pgoff_t end; 1735c10f778dSJan Kara int i, count; 173629f3ad7dSJan Kara struct buffer_head *bh; 173729f3ad7dSJan Kara struct buffer_head *head; 17381da177e4SLinus Torvalds 173929f3ad7dSJan Kara end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 17409e0b6f31SMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 17419e0b6f31SMatthew Wilcox (Oracle) while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 17429e0b6f31SMatthew Wilcox (Oracle) count = folio_batch_count(&fbatch); 1743c10f778dSJan Kara for (i = 0; i < count; i++) { 17449e0b6f31SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i]; 17451da177e4SLinus Torvalds 17469e0b6f31SMatthew Wilcox (Oracle) if (!folio_buffers(folio)) 174729f3ad7dSJan Kara continue; 174829f3ad7dSJan Kara /* 17499e0b6f31SMatthew Wilcox (Oracle) * We use folio lock instead of bd_mapping->private_lock 175029f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and 175129f3ad7dSJan Kara * it scales better than a global spinlock lock. 175229f3ad7dSJan Kara */ 17539e0b6f31SMatthew Wilcox (Oracle) folio_lock(folio); 17549e0b6f31SMatthew Wilcox (Oracle) /* Recheck when the folio is locked which pins bhs */ 17559e0b6f31SMatthew Wilcox (Oracle) head = folio_buffers(folio); 17569e0b6f31SMatthew Wilcox (Oracle) if (!head) 175729f3ad7dSJan Kara goto unlock_page; 175829f3ad7dSJan Kara bh = head; 175929f3ad7dSJan Kara do { 17606c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 176129f3ad7dSJan Kara goto next; 176229f3ad7dSJan Kara if (bh->b_blocknr >= block + len) 176329f3ad7dSJan Kara break; 176429f3ad7dSJan Kara clear_buffer_dirty(bh); 176529f3ad7dSJan Kara wait_on_buffer(bh); 176629f3ad7dSJan Kara clear_buffer_req(bh); 176729f3ad7dSJan Kara next: 176829f3ad7dSJan Kara bh = bh->b_this_page; 176929f3ad7dSJan Kara } while (bh != head); 177029f3ad7dSJan Kara unlock_page: 17719e0b6f31SMatthew Wilcox (Oracle) folio_unlock(folio); 177229f3ad7dSJan Kara } 17739e0b6f31SMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 177429f3ad7dSJan Kara cond_resched(); 1775c10f778dSJan Kara /* End of range already reached? */ 1776c10f778dSJan Kara if (index > end || !index) 1777c10f778dSJan Kara break; 17781da177e4SLinus Torvalds } 17791da177e4SLinus Torvalds } 178029f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases); 17811da177e4SLinus Torvalds 17821da177e4SLinus Torvalds /* 178345bce8f3SLinus Torvalds * Size is a power-of-two in the range 512..PAGE_SIZE, 178445bce8f3SLinus Torvalds * and the case we care about most is PAGE_SIZE. 178545bce8f3SLinus Torvalds * 178645bce8f3SLinus Torvalds * So this *could* possibly be written with those 178745bce8f3SLinus Torvalds * constraints in mind (relevant mostly if some 178845bce8f3SLinus Torvalds * architecture has a slow bit-scan instruction) 178945bce8f3SLinus Torvalds */ 179045bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize) 179145bce8f3SLinus Torvalds { 179245bce8f3SLinus Torvalds return ilog2(blocksize); 179345bce8f3SLinus Torvalds } 179445bce8f3SLinus Torvalds 1795c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio, 1796c6c8c3e7SPankaj Raghav struct inode *inode, 1797c6c8c3e7SPankaj Raghav unsigned int b_state) 179845bce8f3SLinus Torvalds { 1799c6c8c3e7SPankaj Raghav BUG_ON(!folio_test_locked(folio)); 180045bce8f3SLinus Torvalds 1801c6c8c3e7SPankaj Raghav if (!folio_buffers(folio)) 1802c6c8c3e7SPankaj Raghav folio_create_empty_buffers(folio, 1803c6c8c3e7SPankaj Raghav 1 << READ_ONCE(inode->i_blkbits), 18046aa7de05SMark Rutland b_state); 1805c6c8c3e7SPankaj Raghav return folio_buffers(folio); 180645bce8f3SLinus Torvalds } 180745bce8f3SLinus Torvalds 180845bce8f3SLinus Torvalds /* 18091da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid: 18101da177e4SLinus Torvalds * 18111da177e4SLinus Torvalds * Mapped Uptodate Meaning 18121da177e4SLinus Torvalds * 18131da177e4SLinus Torvalds * No No "unknown" - must do get_block() 18141da177e4SLinus Torvalds * No Yes "hole" - zero-filled 18151da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in 18161da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory. 18171da177e4SLinus Torvalds * 18181da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate). 18191da177e4SLinus Torvalds */ 18201da177e4SLinus Torvalds 18211da177e4SLinus Torvalds /* 18221da177e4SLinus Torvalds * While block_write_full_page is writing back the dirty buffers under 18231da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them 18241da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer 18251da177e4SLinus Torvalds * state inside lock_buffer(). 18261da177e4SLinus Torvalds * 18271da177e4SLinus Torvalds * If block_write_full_page() is called for regular writeback 18281da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 18291da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer 18301da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback 18311da177e4SLinus Torvalds * prevents this contention from occurring. 18326e34eeddSTheodore Ts'o * 18336e34eeddSTheodore Ts'o * If block_write_full_page() is called with wbc->sync_mode == 183470fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1835721a9602SJens Axboe * causes the writes to be flagged as synchronous writes. 18361da177e4SLinus Torvalds */ 1837b4bba389SBenjamin Marzinski int __block_write_full_page(struct inode *inode, struct page *page, 183835c80d5fSChris Mason get_block_t *get_block, struct writeback_control *wbc, 183935c80d5fSChris Mason bh_end_io_t *handler) 18401da177e4SLinus Torvalds { 18411da177e4SLinus Torvalds int err; 18421da177e4SLinus Torvalds sector_t block; 18431da177e4SLinus Torvalds sector_t last_block; 1844f0fbd5fcSAndrew Morton struct buffer_head *bh, *head; 184545bce8f3SLinus Torvalds unsigned int blocksize, bbits; 18461da177e4SLinus Torvalds int nr_underway = 0; 18473ae72869SBart Van Assche blk_opf_t write_flags = wbc_to_write_flags(wbc); 18481da177e4SLinus Torvalds 1849c6c8c3e7SPankaj Raghav head = folio_create_buffers(page_folio(page), inode, 18501da177e4SLinus Torvalds (1 << BH_Dirty) | (1 << BH_Uptodate)); 18511da177e4SLinus Torvalds 18521da177e4SLinus Torvalds /* 1853e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio 18541da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at 18551da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it 18561da177e4SLinus Torvalds * then we just miss that fact, and the page stays dirty. 18571da177e4SLinus Torvalds * 1858e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio; 18591da177e4SLinus Torvalds * handle that here by just cleaning them. 18601da177e4SLinus Torvalds */ 18611da177e4SLinus Torvalds 18621da177e4SLinus Torvalds bh = head; 186345bce8f3SLinus Torvalds blocksize = bh->b_size; 186445bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 186545bce8f3SLinus Torvalds 186609cbfeafSKirill A. Shutemov block = (sector_t)page->index << (PAGE_SHIFT - bbits); 186745bce8f3SLinus Torvalds last_block = (i_size_read(inode) - 1) >> bbits; 18681da177e4SLinus Torvalds 18691da177e4SLinus Torvalds /* 18701da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and 18711da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping. 18721da177e4SLinus Torvalds */ 18731da177e4SLinus Torvalds do { 18741da177e4SLinus Torvalds if (block > last_block) { 18751da177e4SLinus Torvalds /* 18761da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because 18771da177e4SLinus Torvalds * this page can be outside i_size when there is a 18781da177e4SLinus Torvalds * truncate in progress. 18791da177e4SLinus Torvalds */ 18801da177e4SLinus Torvalds /* 18811da177e4SLinus Torvalds * The buffer was zeroed by block_write_full_page() 18821da177e4SLinus Torvalds */ 18831da177e4SLinus Torvalds clear_buffer_dirty(bh); 18841da177e4SLinus Torvalds set_buffer_uptodate(bh); 188529a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 188629a814d2SAlex Tomas buffer_dirty(bh)) { 1887b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 18881da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 18891da177e4SLinus Torvalds if (err) 18901da177e4SLinus Torvalds goto recover; 189129a814d2SAlex Tomas clear_buffer_delay(bh); 18921da177e4SLinus Torvalds if (buffer_new(bh)) { 18931da177e4SLinus Torvalds /* blockdev mappings never come here */ 18941da177e4SLinus Torvalds clear_buffer_new(bh); 1895e64855c6SJan Kara clean_bdev_bh_alias(bh); 18961da177e4SLinus Torvalds } 18971da177e4SLinus Torvalds } 18981da177e4SLinus Torvalds bh = bh->b_this_page; 18991da177e4SLinus Torvalds block++; 19001da177e4SLinus Torvalds } while (bh != head); 19011da177e4SLinus Torvalds 19021da177e4SLinus Torvalds do { 19031da177e4SLinus Torvalds if (!buffer_mapped(bh)) 19041da177e4SLinus Torvalds continue; 19051da177e4SLinus Torvalds /* 19061da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot 19071da177e4SLinus Torvalds * lock the buffer then redirty the page. Note that this can 19085b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads 19095b0830cbSJens Axboe * and kswapd activity, but those code paths have their own 19105b0830cbSJens Axboe * higher-level throttling. 19111da177e4SLinus Torvalds */ 19121b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) { 19131da177e4SLinus Torvalds lock_buffer(bh); 1914ca5de404SNick Piggin } else if (!trylock_buffer(bh)) { 19151da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page); 19161da177e4SLinus Torvalds continue; 19171da177e4SLinus Torvalds } 19181da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 191935c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler); 19201da177e4SLinus Torvalds } else { 19211da177e4SLinus Torvalds unlock_buffer(bh); 19221da177e4SLinus Torvalds } 19231da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 19241da177e4SLinus Torvalds 19251da177e4SLinus Torvalds /* 19261da177e4SLinus Torvalds * The page and its buffers are protected by PageWriteback(), so we can 19271da177e4SLinus Torvalds * drop the bh refcounts early. 19281da177e4SLinus Torvalds */ 19291da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 19301da177e4SLinus Torvalds set_page_writeback(page); 19311da177e4SLinus Torvalds 19321da177e4SLinus Torvalds do { 19331da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 19341da177e4SLinus Torvalds if (buffer_async_write(bh)) { 19351420c4a5SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 19361da177e4SLinus Torvalds nr_underway++; 1937ad576e63SNick Piggin } 19381da177e4SLinus Torvalds bh = next; 19391da177e4SLinus Torvalds } while (bh != head); 194005937baaSAndrew Morton unlock_page(page); 19411da177e4SLinus Torvalds 19421da177e4SLinus Torvalds err = 0; 19431da177e4SLinus Torvalds done: 19441da177e4SLinus Torvalds if (nr_underway == 0) { 19451da177e4SLinus Torvalds /* 19461da177e4SLinus Torvalds * The page was marked dirty, but the buffers were 19471da177e4SLinus Torvalds * clean. Someone wrote them back by hand with 194879f59784SZhang Yi * write_dirty_buffer/submit_bh. A rare case. 19491da177e4SLinus Torvalds */ 19501da177e4SLinus Torvalds end_page_writeback(page); 19513d67f2d7SNick Piggin 19521da177e4SLinus Torvalds /* 19531da177e4SLinus Torvalds * The page and buffer_heads can be released at any time from 19541da177e4SLinus Torvalds * here on. 19551da177e4SLinus Torvalds */ 19561da177e4SLinus Torvalds } 19571da177e4SLinus Torvalds return err; 19581da177e4SLinus Torvalds 19591da177e4SLinus Torvalds recover: 19601da177e4SLinus Torvalds /* 19611da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some 19621da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid 19631da177e4SLinus Torvalds * exposing stale data. 19641da177e4SLinus Torvalds * The page is currently locked and not marked for writeback 19651da177e4SLinus Torvalds */ 19661da177e4SLinus Torvalds bh = head; 19671da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */ 19681da177e4SLinus Torvalds do { 196929a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) && 197029a814d2SAlex Tomas !buffer_delay(bh)) { 19711da177e4SLinus Torvalds lock_buffer(bh); 197235c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler); 19731da177e4SLinus Torvalds } else { 19741da177e4SLinus Torvalds /* 19751da177e4SLinus Torvalds * The buffer may have been set dirty during 19761da177e4SLinus Torvalds * attachment to a dirty page. 19771da177e4SLinus Torvalds */ 19781da177e4SLinus Torvalds clear_buffer_dirty(bh); 19791da177e4SLinus Torvalds } 19801da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 19811da177e4SLinus Torvalds SetPageError(page); 19821da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 19837e4c3690SAndrew Morton mapping_set_error(page->mapping, err); 19841da177e4SLinus Torvalds set_page_writeback(page); 19851da177e4SLinus Torvalds do { 19861da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 19871da177e4SLinus Torvalds if (buffer_async_write(bh)) { 19881da177e4SLinus Torvalds clear_buffer_dirty(bh); 19891420c4a5SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 19901da177e4SLinus Torvalds nr_underway++; 1991ad576e63SNick Piggin } 19921da177e4SLinus Torvalds bh = next; 19931da177e4SLinus Torvalds } while (bh != head); 1994ffda9d30SNick Piggin unlock_page(page); 19951da177e4SLinus Torvalds goto done; 19961da177e4SLinus Torvalds } 1997b4bba389SBenjamin Marzinski EXPORT_SYMBOL(__block_write_full_page); 19981da177e4SLinus Torvalds 1999afddba49SNick Piggin /* 2000afddba49SNick Piggin * If a page has any new buffers, zero them out here, and mark them uptodate 2001afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised 2002afddba49SNick Piggin * block data from leaking). And clear the new bit. 2003afddba49SNick Piggin */ 2004afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 2005afddba49SNick Piggin { 2006afddba49SNick Piggin unsigned int block_start, block_end; 2007afddba49SNick Piggin struct buffer_head *head, *bh; 2008afddba49SNick Piggin 2009afddba49SNick Piggin BUG_ON(!PageLocked(page)); 2010afddba49SNick Piggin if (!page_has_buffers(page)) 2011afddba49SNick Piggin return; 2012afddba49SNick Piggin 2013afddba49SNick Piggin bh = head = page_buffers(page); 2014afddba49SNick Piggin block_start = 0; 2015afddba49SNick Piggin do { 2016afddba49SNick Piggin block_end = block_start + bh->b_size; 2017afddba49SNick Piggin 2018afddba49SNick Piggin if (buffer_new(bh)) { 2019afddba49SNick Piggin if (block_end > from && block_start < to) { 2020afddba49SNick Piggin if (!PageUptodate(page)) { 2021afddba49SNick Piggin unsigned start, size; 2022afddba49SNick Piggin 2023afddba49SNick Piggin start = max(from, block_start); 2024afddba49SNick Piggin size = min(to, block_end) - start; 2025afddba49SNick Piggin 2026eebd2aa3SChristoph Lameter zero_user(page, start, size); 2027afddba49SNick Piggin set_buffer_uptodate(bh); 2028afddba49SNick Piggin } 2029afddba49SNick Piggin 2030afddba49SNick Piggin clear_buffer_new(bh); 2031afddba49SNick Piggin mark_buffer_dirty(bh); 2032afddba49SNick Piggin } 2033afddba49SNick Piggin } 2034afddba49SNick Piggin 2035afddba49SNick Piggin block_start = block_end; 2036afddba49SNick Piggin bh = bh->b_this_page; 2037afddba49SNick Piggin } while (bh != head); 2038afddba49SNick Piggin } 2039afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers); 2040afddba49SNick Piggin 2041ae259a9cSChristoph Hellwig static void 2042ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 20436d49cc85SChristoph Hellwig const struct iomap *iomap) 2044ae259a9cSChristoph Hellwig { 2045ae259a9cSChristoph Hellwig loff_t offset = block << inode->i_blkbits; 2046ae259a9cSChristoph Hellwig 2047ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev; 2048ae259a9cSChristoph Hellwig 2049ae259a9cSChristoph Hellwig /* 2050ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains 2051ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the 2052ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller 2053ae259a9cSChristoph Hellwig * handle it. 2054ae259a9cSChristoph Hellwig */ 2055ae259a9cSChristoph Hellwig BUG_ON(offset >= iomap->offset + iomap->length); 2056ae259a9cSChristoph Hellwig 2057ae259a9cSChristoph Hellwig switch (iomap->type) { 2058ae259a9cSChristoph Hellwig case IOMAP_HOLE: 2059ae259a9cSChristoph Hellwig /* 2060ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF, 2061ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is 2062ae259a9cSChristoph Hellwig * executed if necessary. 2063ae259a9cSChristoph Hellwig */ 2064ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 2065ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 2066ae259a9cSChristoph Hellwig set_buffer_new(bh); 2067ae259a9cSChristoph Hellwig break; 2068ae259a9cSChristoph Hellwig case IOMAP_DELALLOC: 2069ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 2070ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 2071ae259a9cSChristoph Hellwig set_buffer_new(bh); 2072ae259a9cSChristoph Hellwig set_buffer_uptodate(bh); 2073ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 2074ae259a9cSChristoph Hellwig set_buffer_delay(bh); 2075ae259a9cSChristoph Hellwig break; 2076ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN: 2077ae259a9cSChristoph Hellwig /* 20783d7b6b21SAndreas Gruenbacher * For unwritten regions, we always need to ensure that regions 20793d7b6b21SAndreas Gruenbacher * in the block we are not writing to are zeroed. Mark the 20803d7b6b21SAndreas Gruenbacher * buffer as new to ensure this. 2081ae259a9cSChristoph Hellwig */ 2082ae259a9cSChristoph Hellwig set_buffer_new(bh); 2083ae259a9cSChristoph Hellwig set_buffer_unwritten(bh); 2084df561f66SGustavo A. R. Silva fallthrough; 2085ae259a9cSChristoph Hellwig case IOMAP_MAPPED: 20863d7b6b21SAndreas Gruenbacher if ((iomap->flags & IOMAP_F_NEW) || 20873d7b6b21SAndreas Gruenbacher offset >= i_size_read(inode)) 2088ae259a9cSChristoph Hellwig set_buffer_new(bh); 208919fe5f64SAndreas Gruenbacher bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 209019fe5f64SAndreas Gruenbacher inode->i_blkbits; 2091ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 2092ae259a9cSChristoph Hellwig break; 2093ae259a9cSChristoph Hellwig } 2094ae259a9cSChristoph Hellwig } 2095ae259a9cSChristoph Hellwig 2096d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 20976d49cc85SChristoph Hellwig get_block_t *get_block, const struct iomap *iomap) 20981da177e4SLinus Torvalds { 209909cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2100ebdec241SChristoph Hellwig unsigned to = from + len; 2101d1bd0b4eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 21021da177e4SLinus Torvalds unsigned block_start, block_end; 21031da177e4SLinus Torvalds sector_t block; 21041da177e4SLinus Torvalds int err = 0; 21051da177e4SLinus Torvalds unsigned blocksize, bbits; 21061da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 21071da177e4SLinus Torvalds 2108d1bd0b4eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 210909cbfeafSKirill A. Shutemov BUG_ON(from > PAGE_SIZE); 211009cbfeafSKirill A. Shutemov BUG_ON(to > PAGE_SIZE); 21111da177e4SLinus Torvalds BUG_ON(from > to); 21121da177e4SLinus Torvalds 2113c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0); 211445bce8f3SLinus Torvalds blocksize = head->b_size; 211545bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 21161da177e4SLinus Torvalds 2117d1bd0b4eSMatthew Wilcox (Oracle) block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 21181da177e4SLinus Torvalds 21191da177e4SLinus Torvalds for(bh = head, block_start = 0; bh != head || !block_start; 21201da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) { 21211da177e4SLinus Torvalds block_end = block_start + blocksize; 21221da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 2123d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 21241da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 21251da177e4SLinus Torvalds set_buffer_uptodate(bh); 21261da177e4SLinus Torvalds } 21271da177e4SLinus Torvalds continue; 21281da177e4SLinus Torvalds } 21291da177e4SLinus Torvalds if (buffer_new(bh)) 21301da177e4SLinus Torvalds clear_buffer_new(bh); 21311da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2132b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2133ae259a9cSChristoph Hellwig if (get_block) { 21341da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 21351da177e4SLinus Torvalds if (err) 2136f3ddbdc6SNick Piggin break; 2137ae259a9cSChristoph Hellwig } else { 2138ae259a9cSChristoph Hellwig iomap_to_bh(inode, block, bh, iomap); 2139ae259a9cSChristoph Hellwig } 2140ae259a9cSChristoph Hellwig 21411da177e4SLinus Torvalds if (buffer_new(bh)) { 2142e64855c6SJan Kara clean_bdev_bh_alias(bh); 2143d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 2144637aff46SNick Piggin clear_buffer_new(bh); 21451da177e4SLinus Torvalds set_buffer_uptodate(bh); 2146637aff46SNick Piggin mark_buffer_dirty(bh); 21471da177e4SLinus Torvalds continue; 21481da177e4SLinus Torvalds } 2149eebd2aa3SChristoph Lameter if (block_end > to || block_start < from) 2150d1bd0b4eSMatthew Wilcox (Oracle) folio_zero_segments(folio, 2151eebd2aa3SChristoph Lameter to, block_end, 2152eebd2aa3SChristoph Lameter block_start, from); 21531da177e4SLinus Torvalds continue; 21541da177e4SLinus Torvalds } 21551da177e4SLinus Torvalds } 2156d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 21571da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 21581da177e4SLinus Torvalds set_buffer_uptodate(bh); 21591da177e4SLinus Torvalds continue; 21601da177e4SLinus Torvalds } 21611da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) && 216233a266ddSDavid Chinner !buffer_unwritten(bh) && 21631da177e4SLinus Torvalds (block_start < from || block_end > to)) { 2164e7ea1129SZhang Yi bh_read_nowait(bh, 0); 21651da177e4SLinus Torvalds *wait_bh++=bh; 21661da177e4SLinus Torvalds } 21671da177e4SLinus Torvalds } 21681da177e4SLinus Torvalds /* 21691da177e4SLinus Torvalds * If we issued read requests - let them complete. 21701da177e4SLinus Torvalds */ 21711da177e4SLinus Torvalds while(wait_bh > wait) { 21721da177e4SLinus Torvalds wait_on_buffer(*--wait_bh); 21731da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh)) 2174f3ddbdc6SNick Piggin err = -EIO; 21751da177e4SLinus Torvalds } 2176f9f07b6cSJan Kara if (unlikely(err)) 2177d1bd0b4eSMatthew Wilcox (Oracle) page_zero_new_buffers(&folio->page, from, to); 21781da177e4SLinus Torvalds return err; 21791da177e4SLinus Torvalds } 2180ae259a9cSChristoph Hellwig 2181ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2182ae259a9cSChristoph Hellwig get_block_t *get_block) 2183ae259a9cSChristoph Hellwig { 2184d1bd0b4eSMatthew Wilcox (Oracle) return __block_write_begin_int(page_folio(page), pos, len, get_block, 2185d1bd0b4eSMatthew Wilcox (Oracle) NULL); 2186ae259a9cSChristoph Hellwig } 2187ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin); 21881da177e4SLinus Torvalds 21891da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page, 21901da177e4SLinus Torvalds unsigned from, unsigned to) 21911da177e4SLinus Torvalds { 21921da177e4SLinus Torvalds unsigned block_start, block_end; 21931da177e4SLinus Torvalds int partial = 0; 21941da177e4SLinus Torvalds unsigned blocksize; 21951da177e4SLinus Torvalds struct buffer_head *bh, *head; 21961da177e4SLinus Torvalds 219745bce8f3SLinus Torvalds bh = head = page_buffers(page); 219845bce8f3SLinus Torvalds blocksize = bh->b_size; 21991da177e4SLinus Torvalds 220045bce8f3SLinus Torvalds block_start = 0; 220145bce8f3SLinus Torvalds do { 22021da177e4SLinus Torvalds block_end = block_start + blocksize; 22031da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 22041da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 22051da177e4SLinus Torvalds partial = 1; 22061da177e4SLinus Torvalds } else { 22071da177e4SLinus Torvalds set_buffer_uptodate(bh); 22081da177e4SLinus Torvalds mark_buffer_dirty(bh); 22091da177e4SLinus Torvalds } 22104ebd3aecSYang Guo if (buffer_new(bh)) 2211afddba49SNick Piggin clear_buffer_new(bh); 221245bce8f3SLinus Torvalds 221345bce8f3SLinus Torvalds block_start = block_end; 221445bce8f3SLinus Torvalds bh = bh->b_this_page; 221545bce8f3SLinus Torvalds } while (bh != head); 22161da177e4SLinus Torvalds 22171da177e4SLinus Torvalds /* 22181da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers 22192c69e205SMatthew Wilcox (Oracle) * uptodate then we can optimize away a bogus read_folio() for 22201da177e4SLinus Torvalds * the next read(). Here we 'discover' whether the page went 22211da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write. 22221da177e4SLinus Torvalds */ 22231da177e4SLinus Torvalds if (!partial) 22241da177e4SLinus Torvalds SetPageUptodate(page); 22251da177e4SLinus Torvalds return 0; 22261da177e4SLinus Torvalds } 22271da177e4SLinus Torvalds 22281da177e4SLinus Torvalds /* 2229155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and 2230155130a4SChristoph Hellwig * bringing partial write blocks uptodate first. 2231155130a4SChristoph Hellwig * 22327bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure. 2233afddba49SNick Piggin */ 2234155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2235b3992d1eSMatthew Wilcox (Oracle) struct page **pagep, get_block_t *get_block) 2236afddba49SNick Piggin { 223709cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2238afddba49SNick Piggin struct page *page; 22396e1db88dSChristoph Hellwig int status; 2240afddba49SNick Piggin 2241b7446e7cSMatthew Wilcox (Oracle) page = grab_cache_page_write_begin(mapping, index); 22426e1db88dSChristoph Hellwig if (!page) 22436e1db88dSChristoph Hellwig return -ENOMEM; 2244afddba49SNick Piggin 22456e1db88dSChristoph Hellwig status = __block_write_begin(page, pos, len, get_block); 2246afddba49SNick Piggin if (unlikely(status)) { 2247afddba49SNick Piggin unlock_page(page); 224809cbfeafSKirill A. Shutemov put_page(page); 22496e1db88dSChristoph Hellwig page = NULL; 2250afddba49SNick Piggin } 2251afddba49SNick Piggin 22526e1db88dSChristoph Hellwig *pagep = page; 2253afddba49SNick Piggin return status; 2254afddba49SNick Piggin } 2255afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin); 2256afddba49SNick Piggin 2257afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping, 2258afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2259afddba49SNick Piggin struct page *page, void *fsdata) 2260afddba49SNick Piggin { 2261afddba49SNick Piggin struct inode *inode = mapping->host; 2262afddba49SNick Piggin unsigned start; 2263afddba49SNick Piggin 226409cbfeafSKirill A. Shutemov start = pos & (PAGE_SIZE - 1); 2265afddba49SNick Piggin 2266afddba49SNick Piggin if (unlikely(copied < len)) { 2267afddba49SNick Piggin /* 22682c69e205SMatthew Wilcox (Oracle) * The buffers that were written will now be uptodate, so 22692c69e205SMatthew Wilcox (Oracle) * we don't have to worry about a read_folio reading them 22702c69e205SMatthew Wilcox (Oracle) * and overwriting a partial write. However if we have 22712c69e205SMatthew Wilcox (Oracle) * encountered a short write and only partially written 22722c69e205SMatthew Wilcox (Oracle) * into a buffer, it will not be marked uptodate, so a 22732c69e205SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write. 2274afddba49SNick Piggin * 2275afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a 2276afddba49SNick Piggin * non uptodate page as a zero-length write, and force the 2277afddba49SNick Piggin * caller to redo the whole thing. 2278afddba49SNick Piggin */ 2279afddba49SNick Piggin if (!PageUptodate(page)) 2280afddba49SNick Piggin copied = 0; 2281afddba49SNick Piggin 2282afddba49SNick Piggin page_zero_new_buffers(page, start+copied, start+len); 2283afddba49SNick Piggin } 2284afddba49SNick Piggin flush_dcache_page(page); 2285afddba49SNick Piggin 2286afddba49SNick Piggin /* This could be a short (even 0-length) commit */ 2287afddba49SNick Piggin __block_commit_write(inode, page, start, start+copied); 2288afddba49SNick Piggin 2289afddba49SNick Piggin return copied; 2290afddba49SNick Piggin } 2291afddba49SNick Piggin EXPORT_SYMBOL(block_write_end); 2292afddba49SNick Piggin 2293afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping, 2294afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2295afddba49SNick Piggin struct page *page, void *fsdata) 2296afddba49SNick Piggin { 22978af54f29SChristoph Hellwig struct inode *inode = mapping->host; 22988af54f29SChristoph Hellwig loff_t old_size = inode->i_size; 22998af54f29SChristoph Hellwig bool i_size_changed = false; 23008af54f29SChristoph Hellwig 2301afddba49SNick Piggin copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 23028af54f29SChristoph Hellwig 23038af54f29SChristoph Hellwig /* 23048af54f29SChristoph Hellwig * No need to use i_size_read() here, the i_size cannot change under us 23058af54f29SChristoph Hellwig * because we hold i_rwsem. 23068af54f29SChristoph Hellwig * 23078af54f29SChristoph Hellwig * But it's important to update i_size while still holding page lock: 23088af54f29SChristoph Hellwig * page writeout could otherwise come in and zero beyond i_size. 23098af54f29SChristoph Hellwig */ 23108af54f29SChristoph Hellwig if (pos + copied > inode->i_size) { 23118af54f29SChristoph Hellwig i_size_write(inode, pos + copied); 23128af54f29SChristoph Hellwig i_size_changed = true; 23138af54f29SChristoph Hellwig } 23148af54f29SChristoph Hellwig 23158af54f29SChristoph Hellwig unlock_page(page); 23167a77dad7SAndreas Gruenbacher put_page(page); 23178af54f29SChristoph Hellwig 23188af54f29SChristoph Hellwig if (old_size < pos) 23198af54f29SChristoph Hellwig pagecache_isize_extended(inode, old_size, pos); 23208af54f29SChristoph Hellwig /* 23218af54f29SChristoph Hellwig * Don't mark the inode dirty under page lock. First, it unnecessarily 23228af54f29SChristoph Hellwig * makes the holding time of page lock longer. Second, it forces lock 23238af54f29SChristoph Hellwig * ordering of page lock and transaction start for journaling 23248af54f29SChristoph Hellwig * filesystems. 23258af54f29SChristoph Hellwig */ 23268af54f29SChristoph Hellwig if (i_size_changed) 23278af54f29SChristoph Hellwig mark_inode_dirty(inode); 232826ddb1f4SAndreas Gruenbacher return copied; 2329afddba49SNick Piggin } 2330afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end); 2331afddba49SNick Piggin 2332afddba49SNick Piggin /* 23332e7e80f7SMatthew Wilcox (Oracle) * block_is_partially_uptodate checks whether buffers within a folio are 23348ab22b9aSHisashi Hifumi * uptodate or not. 23358ab22b9aSHisashi Hifumi * 23362e7e80f7SMatthew Wilcox (Oracle) * Returns true if all buffers which correspond to the specified part 23372e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate. 23388ab22b9aSHisashi Hifumi */ 23392e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 23408ab22b9aSHisashi Hifumi { 23418ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize; 23428ab22b9aSHisashi Hifumi unsigned to; 23438ab22b9aSHisashi Hifumi struct buffer_head *bh, *head; 23442e7e80f7SMatthew Wilcox (Oracle) bool ret = true; 23458ab22b9aSHisashi Hifumi 23462e7e80f7SMatthew Wilcox (Oracle) head = folio_buffers(folio); 23472e7e80f7SMatthew Wilcox (Oracle) if (!head) 23482e7e80f7SMatthew Wilcox (Oracle) return false; 234945bce8f3SLinus Torvalds blocksize = head->b_size; 23502e7e80f7SMatthew Wilcox (Oracle) to = min_t(unsigned, folio_size(folio) - from, count); 23518ab22b9aSHisashi Hifumi to = from + to; 23522e7e80f7SMatthew Wilcox (Oracle) if (from < blocksize && to > folio_size(folio) - blocksize) 23532e7e80f7SMatthew Wilcox (Oracle) return false; 23548ab22b9aSHisashi Hifumi 23558ab22b9aSHisashi Hifumi bh = head; 23568ab22b9aSHisashi Hifumi block_start = 0; 23578ab22b9aSHisashi Hifumi do { 23588ab22b9aSHisashi Hifumi block_end = block_start + blocksize; 23598ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) { 23608ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) { 23612e7e80f7SMatthew Wilcox (Oracle) ret = false; 23628ab22b9aSHisashi Hifumi break; 23638ab22b9aSHisashi Hifumi } 23648ab22b9aSHisashi Hifumi if (block_end >= to) 23658ab22b9aSHisashi Hifumi break; 23668ab22b9aSHisashi Hifumi } 23678ab22b9aSHisashi Hifumi block_start = block_end; 23688ab22b9aSHisashi Hifumi bh = bh->b_this_page; 23698ab22b9aSHisashi Hifumi } while (bh != head); 23708ab22b9aSHisashi Hifumi 23718ab22b9aSHisashi Hifumi return ret; 23728ab22b9aSHisashi Hifumi } 23738ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate); 23748ab22b9aSHisashi Hifumi 23758ab22b9aSHisashi Hifumi /* 23762c69e205SMatthew Wilcox (Oracle) * Generic "read_folio" function for block devices that have the normal 23771da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems. 23782c69e205SMatthew Wilcox (Oracle) * Reads the folio asynchronously --- the unlock_buffer() and 23791da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the 23802c69e205SMatthew Wilcox (Oracle) * folio once IO has completed. 23811da177e4SLinus Torvalds */ 23822c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block) 23831da177e4SLinus Torvalds { 23842c69e205SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 23851da177e4SLinus Torvalds sector_t iblock, lblock; 23861da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 238745bce8f3SLinus Torvalds unsigned int blocksize, bbits; 23881da177e4SLinus Torvalds int nr, i; 23891da177e4SLinus Torvalds int fully_mapped = 1; 2390b7a6eb22SMatthew Wilcox (Oracle) bool page_error = false; 23914fa512ceSEric Biggers loff_t limit = i_size_read(inode); 23924fa512ceSEric Biggers 23934fa512ceSEric Biggers /* This is needed for ext4. */ 23944fa512ceSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 23954fa512ceSEric Biggers limit = inode->i_sb->s_maxbytes; 23961da177e4SLinus Torvalds 23972c69e205SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 23982c69e205SMatthew Wilcox (Oracle) 2399c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0); 240045bce8f3SLinus Torvalds blocksize = head->b_size; 240145bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 24021da177e4SLinus Torvalds 24032c69e205SMatthew Wilcox (Oracle) iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits); 24044fa512ceSEric Biggers lblock = (limit+blocksize-1) >> bbits; 24051da177e4SLinus Torvalds bh = head; 24061da177e4SLinus Torvalds nr = 0; 24071da177e4SLinus Torvalds i = 0; 24081da177e4SLinus Torvalds 24091da177e4SLinus Torvalds do { 24101da177e4SLinus Torvalds if (buffer_uptodate(bh)) 24111da177e4SLinus Torvalds continue; 24121da177e4SLinus Torvalds 24131da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2414c64610baSAndrew Morton int err = 0; 2415c64610baSAndrew Morton 24161da177e4SLinus Torvalds fully_mapped = 0; 24171da177e4SLinus Torvalds if (iblock < lblock) { 2418b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2419c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0); 2420b7a6eb22SMatthew Wilcox (Oracle) if (err) { 24212c69e205SMatthew Wilcox (Oracle) folio_set_error(folio); 2422b7a6eb22SMatthew Wilcox (Oracle) page_error = true; 2423b7a6eb22SMatthew Wilcox (Oracle) } 24241da177e4SLinus Torvalds } 24251da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 24262c69e205SMatthew Wilcox (Oracle) folio_zero_range(folio, i * blocksize, 24272c69e205SMatthew Wilcox (Oracle) blocksize); 2428c64610baSAndrew Morton if (!err) 24291da177e4SLinus Torvalds set_buffer_uptodate(bh); 24301da177e4SLinus Torvalds continue; 24311da177e4SLinus Torvalds } 24321da177e4SLinus Torvalds /* 24331da177e4SLinus Torvalds * get_block() might have updated the buffer 24341da177e4SLinus Torvalds * synchronously 24351da177e4SLinus Torvalds */ 24361da177e4SLinus Torvalds if (buffer_uptodate(bh)) 24371da177e4SLinus Torvalds continue; 24381da177e4SLinus Torvalds } 24391da177e4SLinus Torvalds arr[nr++] = bh; 24401da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head); 24411da177e4SLinus Torvalds 24421da177e4SLinus Torvalds if (fully_mapped) 24432c69e205SMatthew Wilcox (Oracle) folio_set_mappedtodisk(folio); 24441da177e4SLinus Torvalds 24451da177e4SLinus Torvalds if (!nr) { 24461da177e4SLinus Torvalds /* 24472c69e205SMatthew Wilcox (Oracle) * All buffers are uptodate - we can set the folio uptodate 24481da177e4SLinus Torvalds * as well. But not if get_block() returned an error. 24491da177e4SLinus Torvalds */ 2450b7a6eb22SMatthew Wilcox (Oracle) if (!page_error) 24512c69e205SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 24522c69e205SMatthew Wilcox (Oracle) folio_unlock(folio); 24531da177e4SLinus Torvalds return 0; 24541da177e4SLinus Torvalds } 24551da177e4SLinus Torvalds 24561da177e4SLinus Torvalds /* Stage two: lock the buffers */ 24571da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 24581da177e4SLinus Torvalds bh = arr[i]; 24591da177e4SLinus Torvalds lock_buffer(bh); 24601da177e4SLinus Torvalds mark_buffer_async_read(bh); 24611da177e4SLinus Torvalds } 24621da177e4SLinus Torvalds 24631da177e4SLinus Torvalds /* 24641da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness 24651da177e4SLinus Torvalds * inside the buffer lock in case another process reading 24661da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix). 24671da177e4SLinus Torvalds */ 24681da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 24691da177e4SLinus Torvalds bh = arr[i]; 24701da177e4SLinus Torvalds if (buffer_uptodate(bh)) 24711da177e4SLinus Torvalds end_buffer_async_read(bh, 1); 24721da177e4SLinus Torvalds else 24731420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh); 24741da177e4SLinus Torvalds } 24751da177e4SLinus Torvalds return 0; 24761da177e4SLinus Torvalds } 24772c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio); 24781da177e4SLinus Torvalds 24791da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding 248089e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to 24811da177e4SLinus Torvalds * deal with the hole. 24821da177e4SLinus Torvalds */ 248389e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size) 24841da177e4SLinus Torvalds { 24851da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 248653b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops; 24871da177e4SLinus Torvalds struct page *page; 24881468c6f4SAlexander Potapenko void *fsdata = NULL; 24891da177e4SLinus Torvalds int err; 24901da177e4SLinus Torvalds 2491c08d3b0eSnpiggin@suse.de err = inode_newsize_ok(inode, size); 2492c08d3b0eSnpiggin@suse.de if (err) 24931da177e4SLinus Torvalds goto out; 24941da177e4SLinus Torvalds 249553b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata); 249689e10787SNick Piggin if (err) 249705eb0b51SOGAWA Hirofumi goto out; 249805eb0b51SOGAWA Hirofumi 249953b524b8SMatthew Wilcox (Oracle) err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata); 250089e10787SNick Piggin BUG_ON(err > 0); 250105eb0b51SOGAWA Hirofumi 250205eb0b51SOGAWA Hirofumi out: 250305eb0b51SOGAWA Hirofumi return err; 250405eb0b51SOGAWA Hirofumi } 25051fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple); 250605eb0b51SOGAWA Hirofumi 2507f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping, 250889e10787SNick Piggin loff_t pos, loff_t *bytes) 250905eb0b51SOGAWA Hirofumi { 251089e10787SNick Piggin struct inode *inode = mapping->host; 251153b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops; 251293407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 251389e10787SNick Piggin struct page *page; 25141468c6f4SAlexander Potapenko void *fsdata = NULL; 251589e10787SNick Piggin pgoff_t index, curidx; 251689e10787SNick Piggin loff_t curpos; 251789e10787SNick Piggin unsigned zerofrom, offset, len; 251889e10787SNick Piggin int err = 0; 251905eb0b51SOGAWA Hirofumi 252009cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT; 252109cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK; 252289e10787SNick Piggin 252309cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 252409cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 252589e10787SNick Piggin if (zerofrom & (blocksize-1)) { 252689e10787SNick Piggin *bytes |= (blocksize-1); 252789e10787SNick Piggin (*bytes)++; 252889e10787SNick Piggin } 252909cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom; 253089e10787SNick Piggin 253153b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len, 253289e10787SNick Piggin &page, &fsdata); 253389e10787SNick Piggin if (err) 253489e10787SNick Piggin goto out; 2535eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 253653b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len, 253789e10787SNick Piggin page, fsdata); 253889e10787SNick Piggin if (err < 0) 253989e10787SNick Piggin goto out; 254089e10787SNick Piggin BUG_ON(err != len); 254189e10787SNick Piggin err = 0; 2542061e9746SOGAWA Hirofumi 2543061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping); 2544c2ca0fcdSMikulas Patocka 254508d405c8SDavidlohr Bueso if (fatal_signal_pending(current)) { 2546c2ca0fcdSMikulas Patocka err = -EINTR; 2547c2ca0fcdSMikulas Patocka goto out; 2548c2ca0fcdSMikulas Patocka } 254989e10787SNick Piggin } 255089e10787SNick Piggin 255189e10787SNick Piggin /* page covers the boundary, find the boundary offset */ 255289e10787SNick Piggin if (index == curidx) { 255309cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 255489e10787SNick Piggin /* if we will expand the thing last block will be filled */ 255589e10787SNick Piggin if (offset <= zerofrom) { 255689e10787SNick Piggin goto out; 255789e10787SNick Piggin } 255889e10787SNick Piggin if (zerofrom & (blocksize-1)) { 255989e10787SNick Piggin *bytes |= (blocksize-1); 256089e10787SNick Piggin (*bytes)++; 256189e10787SNick Piggin } 256289e10787SNick Piggin len = offset - zerofrom; 256389e10787SNick Piggin 256453b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len, 256589e10787SNick Piggin &page, &fsdata); 256689e10787SNick Piggin if (err) 256789e10787SNick Piggin goto out; 2568eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 256953b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len, 257089e10787SNick Piggin page, fsdata); 257189e10787SNick Piggin if (err < 0) 257289e10787SNick Piggin goto out; 257389e10787SNick Piggin BUG_ON(err != len); 257489e10787SNick Piggin err = 0; 257589e10787SNick Piggin } 257689e10787SNick Piggin out: 257789e10787SNick Piggin return err; 25781da177e4SLinus Torvalds } 25791da177e4SLinus Torvalds 25801da177e4SLinus Torvalds /* 25811da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file. 25821da177e4SLinus Torvalds * We may have to extend the file. 25831da177e4SLinus Torvalds */ 2584282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping, 2585be3bbbc5SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 258689e10787SNick Piggin struct page **pagep, void **fsdata, 258789e10787SNick Piggin get_block_t *get_block, loff_t *bytes) 25881da177e4SLinus Torvalds { 25891da177e4SLinus Torvalds struct inode *inode = mapping->host; 259093407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 259193407472SFabian Frederick unsigned int zerofrom; 259289e10787SNick Piggin int err; 25931da177e4SLinus Torvalds 259489e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes); 259589e10787SNick Piggin if (err) 2596155130a4SChristoph Hellwig return err; 25971da177e4SLinus Torvalds 259809cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK; 259989e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) { 26001da177e4SLinus Torvalds *bytes |= (blocksize-1); 26011da177e4SLinus Torvalds (*bytes)++; 26021da177e4SLinus Torvalds } 26031da177e4SLinus Torvalds 2604b3992d1eSMatthew Wilcox (Oracle) return block_write_begin(mapping, pos, len, pagep, get_block); 26051da177e4SLinus Torvalds } 26061fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin); 26071da177e4SLinus Torvalds 26081da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to) 26091da177e4SLinus Torvalds { 26101da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 26111da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 26121da177e4SLinus Torvalds return 0; 26131da177e4SLinus Torvalds } 26141fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write); 26151da177e4SLinus Torvalds 261654171690SDavid Chinner /* 261754171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets 261854171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must 261954171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly 262054171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into 262154171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that 262254171690SDavid Chinner * support these features. 262354171690SDavid Chinner * 262454171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to 262554171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because 26267bb46a67Snpiggin@suse.de * truncate writes the inode size before removing pages, once we have the 262754171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not 262854171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we 262954171690SDavid Chinner * unlock the page. 2630ea13a864SJan Kara * 263114da9200SJan Kara * Direct callers of this function should protect against filesystem freezing 26325c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions. 263354171690SDavid Chinner */ 26345c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 263554171690SDavid Chinner get_block_t get_block) 263654171690SDavid Chinner { 2637c2ec175cSNick Piggin struct page *page = vmf->page; 2638496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 263954171690SDavid Chinner unsigned long end; 264054171690SDavid Chinner loff_t size; 264124da4fabSJan Kara int ret; 264254171690SDavid Chinner 264354171690SDavid Chinner lock_page(page); 264454171690SDavid Chinner size = i_size_read(inode); 264554171690SDavid Chinner if ((page->mapping != inode->i_mapping) || 264618336338SNick Piggin (page_offset(page) > size)) { 264724da4fabSJan Kara /* We overload EFAULT to mean page got truncated */ 264824da4fabSJan Kara ret = -EFAULT; 264924da4fabSJan Kara goto out_unlock; 265054171690SDavid Chinner } 265154171690SDavid Chinner 265254171690SDavid Chinner /* page is wholly or partially inside EOF */ 265309cbfeafSKirill A. Shutemov if (((page->index + 1) << PAGE_SHIFT) > size) 265409cbfeafSKirill A. Shutemov end = size & ~PAGE_MASK; 265554171690SDavid Chinner else 265609cbfeafSKirill A. Shutemov end = PAGE_SIZE; 265754171690SDavid Chinner 2658ebdec241SChristoph Hellwig ret = __block_write_begin(page, 0, end, get_block); 265954171690SDavid Chinner if (!ret) 266054171690SDavid Chinner ret = block_commit_write(page, 0, end); 266154171690SDavid Chinner 266224da4fabSJan Kara if (unlikely(ret < 0)) 266324da4fabSJan Kara goto out_unlock; 2664ea13a864SJan Kara set_page_dirty(page); 26651d1d1a76SDarrick J. Wong wait_for_stable_page(page); 266624da4fabSJan Kara return 0; 266724da4fabSJan Kara out_unlock: 2668b827e496SNick Piggin unlock_page(page); 266954171690SDavid Chinner return ret; 267054171690SDavid Chinner } 26711fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite); 26721da177e4SLinus Torvalds 26731da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping, 26741da177e4SLinus Torvalds loff_t from, get_block_t *get_block) 26751da177e4SLinus Torvalds { 267609cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT; 267709cbfeafSKirill A. Shutemov unsigned offset = from & (PAGE_SIZE-1); 26781da177e4SLinus Torvalds unsigned blocksize; 267954b21a79SAndrew Morton sector_t iblock; 26801da177e4SLinus Torvalds unsigned length, pos; 26811da177e4SLinus Torvalds struct inode *inode = mapping->host; 26821da177e4SLinus Torvalds struct page *page; 26831da177e4SLinus Torvalds struct buffer_head *bh; 2684dc7cb2d2SJiapeng Chong int err = 0; 26851da177e4SLinus Torvalds 268693407472SFabian Frederick blocksize = i_blocksize(inode); 26871da177e4SLinus Torvalds length = offset & (blocksize - 1); 26881da177e4SLinus Torvalds 26891da177e4SLinus Torvalds /* Block boundary? Nothing to do */ 26901da177e4SLinus Torvalds if (!length) 26911da177e4SLinus Torvalds return 0; 26921da177e4SLinus Torvalds 26931da177e4SLinus Torvalds length = blocksize - length; 269409cbfeafSKirill A. Shutemov iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 26951da177e4SLinus Torvalds 26961da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 26971da177e4SLinus Torvalds if (!page) 2698dc7cb2d2SJiapeng Chong return -ENOMEM; 26991da177e4SLinus Torvalds 27001da177e4SLinus Torvalds if (!page_has_buffers(page)) 27011da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 27021da177e4SLinus Torvalds 27031da177e4SLinus Torvalds /* Find the buffer that contains "offset" */ 27041da177e4SLinus Torvalds bh = page_buffers(page); 27051da177e4SLinus Torvalds pos = blocksize; 27061da177e4SLinus Torvalds while (offset >= pos) { 27071da177e4SLinus Torvalds bh = bh->b_this_page; 27081da177e4SLinus Torvalds iblock++; 27091da177e4SLinus Torvalds pos += blocksize; 27101da177e4SLinus Torvalds } 27111da177e4SLinus Torvalds 27121da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2713b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 27141da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0); 27151da177e4SLinus Torvalds if (err) 27161da177e4SLinus Torvalds goto unlock; 27171da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */ 27181da177e4SLinus Torvalds if (!buffer_mapped(bh)) 27191da177e4SLinus Torvalds goto unlock; 27201da177e4SLinus Torvalds } 27211da177e4SLinus Torvalds 27221da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */ 27231da177e4SLinus Torvalds if (PageUptodate(page)) 27241da177e4SLinus Torvalds set_buffer_uptodate(bh); 27251da177e4SLinus Torvalds 272633a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2727e7ea1129SZhang Yi err = bh_read(bh, 0); 27281da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */ 2729e7ea1129SZhang Yi if (err < 0) 27301da177e4SLinus Torvalds goto unlock; 27311da177e4SLinus Torvalds } 27321da177e4SLinus Torvalds 2733eebd2aa3SChristoph Lameter zero_user(page, offset, length); 27341da177e4SLinus Torvalds mark_buffer_dirty(bh); 27351da177e4SLinus Torvalds 27361da177e4SLinus Torvalds unlock: 27371da177e4SLinus Torvalds unlock_page(page); 273809cbfeafSKirill A. Shutemov put_page(page); 2739dc7cb2d2SJiapeng Chong 27401da177e4SLinus Torvalds return err; 27411da177e4SLinus Torvalds } 27421fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page); 27431da177e4SLinus Torvalds 27441da177e4SLinus Torvalds /* 27451da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces 27461da177e4SLinus Torvalds */ 27471b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block, 27481b938c08SMatthew Wilcox struct writeback_control *wbc) 27491da177e4SLinus Torvalds { 27501da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 27511da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 275209cbfeafSKirill A. Shutemov const pgoff_t end_index = i_size >> PAGE_SHIFT; 27531da177e4SLinus Torvalds unsigned offset; 27541da177e4SLinus Torvalds 27551da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 27561da177e4SLinus Torvalds if (page->index < end_index) 275735c80d5fSChris Mason return __block_write_full_page(inode, page, get_block, wbc, 27581b938c08SMatthew Wilcox end_buffer_async_write); 27591da177e4SLinus Torvalds 27601da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 276109cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 27621da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 27631da177e4SLinus Torvalds unlock_page(page); 27641da177e4SLinus Torvalds return 0; /* don't care */ 27651da177e4SLinus Torvalds } 27661da177e4SLinus Torvalds 27671da177e4SLinus Torvalds /* 27681da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 27692a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped 27701da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 27711da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 27721da177e4SLinus Torvalds * writes to that region are not written out to the file." 27731da177e4SLinus Torvalds */ 277409cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE); 27751b938c08SMatthew Wilcox return __block_write_full_page(inode, page, get_block, wbc, 277635c80d5fSChris Mason end_buffer_async_write); 277735c80d5fSChris Mason } 27781fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page); 277935c80d5fSChris Mason 27801da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 27811da177e4SLinus Torvalds get_block_t *get_block) 27821da177e4SLinus Torvalds { 27831da177e4SLinus Torvalds struct inode *inode = mapping->host; 27842a527d68SAlexander Potapenko struct buffer_head tmp = { 27852a527d68SAlexander Potapenko .b_size = i_blocksize(inode), 27862a527d68SAlexander Potapenko }; 27872a527d68SAlexander Potapenko 27881da177e4SLinus Torvalds get_block(inode, block, &tmp, 0); 27891da177e4SLinus Torvalds return tmp.b_blocknr; 27901da177e4SLinus Torvalds } 27911fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap); 27921da177e4SLinus Torvalds 27934246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio) 27941da177e4SLinus Torvalds { 27951da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private; 27961da177e4SLinus Torvalds 2797b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET))) 279808bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state); 279908bafc03SKeith Mannthey 28004e4cbee9SChristoph Hellwig bh->b_end_io(bh, !bio->bi_status); 28011da177e4SLinus Torvalds bio_put(bio); 28021da177e4SLinus Torvalds } 28031da177e4SLinus Torvalds 28045bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 28051420c4a5SBart Van Assche struct writeback_control *wbc) 28061da177e4SLinus Torvalds { 28071420c4a5SBart Van Assche const enum req_op op = opf & REQ_OP_MASK; 28081da177e4SLinus Torvalds struct bio *bio; 28091da177e4SLinus Torvalds 28101da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh)); 28111da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh)); 28121da177e4SLinus Torvalds BUG_ON(!bh->b_end_io); 28138fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh)); 28148fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh)); 28151da177e4SLinus Torvalds 281648fd4f93SJens Axboe /* 281748fd4f93SJens Axboe * Only clear out a write error when rewriting 28181da177e4SLinus Torvalds */ 28192a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 28201da177e4SLinus Torvalds clear_buffer_write_io_error(bh); 28211da177e4SLinus Torvalds 282207888c66SChristoph Hellwig if (buffer_meta(bh)) 28231420c4a5SBart Van Assche opf |= REQ_META; 282407888c66SChristoph Hellwig if (buffer_prio(bh)) 28251420c4a5SBart Van Assche opf |= REQ_PRIO; 282607888c66SChristoph Hellwig 28271420c4a5SBart Van Assche bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 28281da177e4SLinus Torvalds 28294f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 28304f74d15fSEric Biggers 28314f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 28321da177e4SLinus Torvalds 28336cf66b4cSKent Overstreet bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 28346cf66b4cSKent Overstreet BUG_ON(bio->bi_iter.bi_size != bh->b_size); 28351da177e4SLinus Torvalds 28361da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync; 28371da177e4SLinus Torvalds bio->bi_private = bh; 28381da177e4SLinus Torvalds 283983c9c547SMing Lei /* Take care of bh's that straddle the end of the device */ 284083c9c547SMing Lei guard_bio_eod(bio); 284183c9c547SMing Lei 2842fd42df30SDennis Zhou if (wbc) { 2843fd42df30SDennis Zhou wbc_init_bio(wbc, bio); 284434e51a5eSTejun Heo wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); 2845fd42df30SDennis Zhou } 2846fd42df30SDennis Zhou 28474e49ea4aSMike Christie submit_bio(bio); 28481da177e4SLinus Torvalds } 2849bafc0dbaSTejun Heo 28505bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh) 285171368511SDarrick J. Wong { 28525bdf402aSRitesh Harjani (IBM) submit_bh_wbc(opf, bh, NULL); 285371368511SDarrick J. Wong } 28541fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh); 28551da177e4SLinus Torvalds 28563ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 28579cb569d6SChristoph Hellwig { 28589cb569d6SChristoph Hellwig lock_buffer(bh); 28599cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) { 28609cb569d6SChristoph Hellwig unlock_buffer(bh); 28619cb569d6SChristoph Hellwig return; 28629cb569d6SChristoph Hellwig } 28639cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync; 28649cb569d6SChristoph Hellwig get_bh(bh); 28651420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | op_flags, bh); 28669cb569d6SChristoph Hellwig } 28679cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer); 28689cb569d6SChristoph Hellwig 28691da177e4SLinus Torvalds /* 28701da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O 28711da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on 28721da177e4SLinus Torvalds * the buffer_head. 28731da177e4SLinus Torvalds */ 28743ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 28751da177e4SLinus Torvalds { 28761da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1); 28771da177e4SLinus Torvalds lock_buffer(bh); 28781da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 2879377254b2SXianting Tian /* 2880377254b2SXianting Tian * The bh should be mapped, but it might not be if the 2881377254b2SXianting Tian * device was hot-removed. Not much we can do but fail the I/O. 2882377254b2SXianting Tian */ 2883377254b2SXianting Tian if (!buffer_mapped(bh)) { 2884377254b2SXianting Tian unlock_buffer(bh); 2885377254b2SXianting Tian return -EIO; 2886377254b2SXianting Tian } 2887377254b2SXianting Tian 28881da177e4SLinus Torvalds get_bh(bh); 28891da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync; 2890ab620620SRitesh Harjani (IBM) submit_bh(REQ_OP_WRITE | op_flags, bh); 28911da177e4SLinus Torvalds wait_on_buffer(bh); 2892ab620620SRitesh Harjani (IBM) if (!buffer_uptodate(bh)) 2893ab620620SRitesh Harjani (IBM) return -EIO; 28941da177e4SLinus Torvalds } else { 28951da177e4SLinus Torvalds unlock_buffer(bh); 28961da177e4SLinus Torvalds } 2897ab620620SRitesh Harjani (IBM) return 0; 28981da177e4SLinus Torvalds } 289987e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer); 290087e99511SChristoph Hellwig 290187e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh) 290287e99511SChristoph Hellwig { 290370fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC); 290487e99511SChristoph Hellwig } 29051fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer); 29061da177e4SLinus Torvalds 29071da177e4SLinus Torvalds /* 290868189fefSMatthew Wilcox (Oracle) * try_to_free_buffers() checks if all the buffers on this particular folio 29091da177e4SLinus Torvalds * are unused, and releases them if so. 29101da177e4SLinus Torvalds * 29111da177e4SLinus Torvalds * Exclusion against try_to_free_buffers may be obtained by either 291268189fefSMatthew Wilcox (Oracle) * locking the folio or by holding its mapping's private_lock. 29131da177e4SLinus Torvalds * 291468189fefSMatthew Wilcox (Oracle) * If the folio is dirty but all the buffers are clean then we need to 291568189fefSMatthew Wilcox (Oracle) * be sure to mark the folio clean as well. This is because the folio 29161da177e4SLinus Torvalds * may be against a block device, and a later reattachment of buffers 291768189fefSMatthew Wilcox (Oracle) * to a dirty folio will set *all* buffers dirty. Which would corrupt 29181da177e4SLinus Torvalds * filesystem data on the same device. 29191da177e4SLinus Torvalds * 292068189fefSMatthew Wilcox (Oracle) * The same applies to regular filesystem folios: if all the buffers are 292168189fefSMatthew Wilcox (Oracle) * clean then we set the folio clean and proceed. To do that, we require 2922e621900aSMatthew Wilcox (Oracle) * total exclusion from block_dirty_folio(). That is obtained with 29231da177e4SLinus Torvalds * private_lock. 29241da177e4SLinus Torvalds * 29251da177e4SLinus Torvalds * try_to_free_buffers() is non-blocking. 29261da177e4SLinus Torvalds */ 29271da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh) 29281da177e4SLinus Torvalds { 29291da177e4SLinus Torvalds return atomic_read(&bh->b_count) | 29301da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 29311da177e4SLinus Torvalds } 29321da177e4SLinus Torvalds 293364394763SMatthew Wilcox (Oracle) static bool 293464394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 29351da177e4SLinus Torvalds { 293664394763SMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio); 29371da177e4SLinus Torvalds struct buffer_head *bh; 29381da177e4SLinus Torvalds 29391da177e4SLinus Torvalds bh = head; 29401da177e4SLinus Torvalds do { 29411da177e4SLinus Torvalds if (buffer_busy(bh)) 29421da177e4SLinus Torvalds goto failed; 29431da177e4SLinus Torvalds bh = bh->b_this_page; 29441da177e4SLinus Torvalds } while (bh != head); 29451da177e4SLinus Torvalds 29461da177e4SLinus Torvalds do { 29471da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 29481da177e4SLinus Torvalds 2949535ee2fbSJan Kara if (bh->b_assoc_map) 29501da177e4SLinus Torvalds __remove_assoc_queue(bh); 29511da177e4SLinus Torvalds bh = next; 29521da177e4SLinus Torvalds } while (bh != head); 29531da177e4SLinus Torvalds *buffers_to_free = head; 295464394763SMatthew Wilcox (Oracle) folio_detach_private(folio); 295564394763SMatthew Wilcox (Oracle) return true; 29561da177e4SLinus Torvalds failed: 295764394763SMatthew Wilcox (Oracle) return false; 29581da177e4SLinus Torvalds } 29591da177e4SLinus Torvalds 296068189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio) 29611da177e4SLinus Torvalds { 296268189fefSMatthew Wilcox (Oracle) struct address_space * const mapping = folio->mapping; 29631da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL; 296468189fefSMatthew Wilcox (Oracle) bool ret = 0; 29651da177e4SLinus Torvalds 296668189fefSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 296768189fefSMatthew Wilcox (Oracle) if (folio_test_writeback(folio)) 296868189fefSMatthew Wilcox (Oracle) return false; 29691da177e4SLinus Torvalds 29701da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */ 297164394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free); 29721da177e4SLinus Torvalds goto out; 29731da177e4SLinus Torvalds } 29741da177e4SLinus Torvalds 29751da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 297664394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free); 2977ecdfc978SLinus Torvalds 2978ecdfc978SLinus Torvalds /* 2979ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3) 298068189fefSMatthew Wilcox (Oracle) * then we can have clean buffers against a dirty folio. We 298168189fefSMatthew Wilcox (Oracle) * clean the folio here; otherwise the VM will never notice 2982ecdfc978SLinus Torvalds * that the filesystem did any IO at all. 2983ecdfc978SLinus Torvalds * 2984ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all 298568189fefSMatthew Wilcox (Oracle) * the folio's buffers clean. We discover that here and clean 298668189fefSMatthew Wilcox (Oracle) * the folio also. 298787df7241SNick Piggin * 298887df7241SNick Piggin * private_lock must be held over this entire operation in order 2989e621900aSMatthew Wilcox (Oracle) * to synchronise against block_dirty_folio and prevent the 299087df7241SNick Piggin * dirty bit from being lost. 2991ecdfc978SLinus Torvalds */ 299211f81becSTejun Heo if (ret) 299368189fefSMatthew Wilcox (Oracle) folio_cancel_dirty(folio); 299487df7241SNick Piggin spin_unlock(&mapping->private_lock); 29951da177e4SLinus Torvalds out: 29961da177e4SLinus Torvalds if (buffers_to_free) { 29971da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free; 29981da177e4SLinus Torvalds 29991da177e4SLinus Torvalds do { 30001da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 30011da177e4SLinus Torvalds free_buffer_head(bh); 30021da177e4SLinus Torvalds bh = next; 30031da177e4SLinus Torvalds } while (bh != buffers_to_free); 30041da177e4SLinus Torvalds } 30051da177e4SLinus Torvalds return ret; 30061da177e4SLinus Torvalds } 30071da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers); 30081da177e4SLinus Torvalds 30091da177e4SLinus Torvalds /* 30101da177e4SLinus Torvalds * Buffer-head allocation 30111da177e4SLinus Torvalds */ 3012a0a9b043SShai Fultheim static struct kmem_cache *bh_cachep __read_mostly; 30131da177e4SLinus Torvalds 30141da177e4SLinus Torvalds /* 30151da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start 30161da177e4SLinus Torvalds * stripping them in writeback. 30171da177e4SLinus Torvalds */ 301843be594aSZhang Yanfei static unsigned long max_buffer_heads; 30191da177e4SLinus Torvalds 30201da177e4SLinus Torvalds int buffer_heads_over_limit; 30211da177e4SLinus Torvalds 30221da177e4SLinus Torvalds struct bh_accounting { 30231da177e4SLinus Torvalds int nr; /* Number of live bh's */ 30241da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */ 30251da177e4SLinus Torvalds }; 30261da177e4SLinus Torvalds 30271da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 30281da177e4SLinus Torvalds 30291da177e4SLinus Torvalds static void recalc_bh_state(void) 30301da177e4SLinus Torvalds { 30311da177e4SLinus Torvalds int i; 30321da177e4SLinus Torvalds int tot = 0; 30331da177e4SLinus Torvalds 3034ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 30351da177e4SLinus Torvalds return; 3036c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0); 30378a143426SEric Dumazet for_each_online_cpu(i) 30381da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr; 30391da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads); 30401da177e4SLinus Torvalds } 30411da177e4SLinus Torvalds 3042dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 30431da177e4SLinus Torvalds { 3044019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 30451da177e4SLinus Torvalds if (ret) { 3046a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers); 3047f1e67e35SThomas Gleixner spin_lock_init(&ret->b_uptodate_lock); 3048c7b92516SChristoph Lameter preempt_disable(); 3049c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr); 30501da177e4SLinus Torvalds recalc_bh_state(); 3051c7b92516SChristoph Lameter preempt_enable(); 30521da177e4SLinus Torvalds } 30531da177e4SLinus Torvalds return ret; 30541da177e4SLinus Torvalds } 30551da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head); 30561da177e4SLinus Torvalds 30571da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh) 30581da177e4SLinus Torvalds { 30591da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers)); 30601da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh); 3061c7b92516SChristoph Lameter preempt_disable(); 3062c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr); 30631da177e4SLinus Torvalds recalc_bh_state(); 3064c7b92516SChristoph Lameter preempt_enable(); 30651da177e4SLinus Torvalds } 30661da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head); 30671da177e4SLinus Torvalds 3068fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu) 30691da177e4SLinus Torvalds { 30701da177e4SLinus Torvalds int i; 30711da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu); 30721da177e4SLinus Torvalds 30731da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 30741da177e4SLinus Torvalds brelse(b->bhs[i]); 30751da177e4SLinus Torvalds b->bhs[i] = NULL; 30761da177e4SLinus Torvalds } 3077c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 30788a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0; 3079fc4d24c9SSebastian Andrzej Siewior return 0; 30801da177e4SLinus Torvalds } 30811da177e4SLinus Torvalds 3082389d1b08SAneesh Kumar K.V /** 3083a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate 3084389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3085389d1b08SAneesh Kumar K.V * 3086389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false, 3087389d1b08SAneesh Kumar K.V * with the buffer locked, if not. 3088389d1b08SAneesh Kumar K.V */ 3089389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh) 3090389d1b08SAneesh Kumar K.V { 3091389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) { 3092389d1b08SAneesh Kumar K.V lock_buffer(bh); 3093389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) 3094389d1b08SAneesh Kumar K.V return 0; 3095389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3096389d1b08SAneesh Kumar K.V } 3097389d1b08SAneesh Kumar K.V return 1; 3098389d1b08SAneesh Kumar K.V } 3099389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock); 3100389d1b08SAneesh Kumar K.V 3101389d1b08SAneesh Kumar K.V /** 3102fdee117eSZhang Yi * __bh_read - Submit read for a locked buffer 3103389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3104fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3105fdee117eSZhang Yi * @wait: wait until reading finish 3106389d1b08SAneesh Kumar K.V * 3107fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error. 3108389d1b08SAneesh Kumar K.V */ 3109fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3110389d1b08SAneesh Kumar K.V { 3111fdee117eSZhang Yi int ret = 0; 3112389d1b08SAneesh Kumar K.V 3113fdee117eSZhang Yi BUG_ON(!buffer_locked(bh)); 3114389d1b08SAneesh Kumar K.V 3115389d1b08SAneesh Kumar K.V get_bh(bh); 3116389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync; 3117fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh); 3118fdee117eSZhang Yi if (wait) { 3119389d1b08SAneesh Kumar K.V wait_on_buffer(bh); 3120fdee117eSZhang Yi if (!buffer_uptodate(bh)) 3121fdee117eSZhang Yi ret = -EIO; 3122389d1b08SAneesh Kumar K.V } 3123fdee117eSZhang Yi return ret; 3124fdee117eSZhang Yi } 3125fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read); 3126fdee117eSZhang Yi 3127fdee117eSZhang Yi /** 3128fdee117eSZhang Yi * __bh_read_batch - Submit read for a batch of unlocked buffers 3129fdee117eSZhang Yi * @nr: entry number of the buffer batch 3130fdee117eSZhang Yi * @bhs: a batch of struct buffer_head 3131fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3132fdee117eSZhang Yi * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3133fdee117eSZhang Yi * buffer that cannot lock. 3134fdee117eSZhang Yi * 3135fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error. 3136fdee117eSZhang Yi */ 3137fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[], 3138fdee117eSZhang Yi blk_opf_t op_flags, bool force_lock) 3139fdee117eSZhang Yi { 3140fdee117eSZhang Yi int i; 3141fdee117eSZhang Yi 3142fdee117eSZhang Yi for (i = 0; i < nr; i++) { 3143fdee117eSZhang Yi struct buffer_head *bh = bhs[i]; 3144fdee117eSZhang Yi 3145fdee117eSZhang Yi if (buffer_uptodate(bh)) 3146fdee117eSZhang Yi continue; 3147fdee117eSZhang Yi 3148fdee117eSZhang Yi if (force_lock) 3149fdee117eSZhang Yi lock_buffer(bh); 3150fdee117eSZhang Yi else 3151fdee117eSZhang Yi if (!trylock_buffer(bh)) 3152fdee117eSZhang Yi continue; 3153fdee117eSZhang Yi 3154fdee117eSZhang Yi if (buffer_uptodate(bh)) { 3155fdee117eSZhang Yi unlock_buffer(bh); 3156fdee117eSZhang Yi continue; 3157fdee117eSZhang Yi } 3158fdee117eSZhang Yi 3159fdee117eSZhang Yi bh->b_end_io = end_buffer_read_sync; 3160fdee117eSZhang Yi get_bh(bh); 3161fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh); 3162fdee117eSZhang Yi } 3163fdee117eSZhang Yi } 3164fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch); 3165389d1b08SAneesh Kumar K.V 31661da177e4SLinus Torvalds void __init buffer_init(void) 31671da177e4SLinus Torvalds { 316843be594aSZhang Yanfei unsigned long nrpages; 3169fc4d24c9SSebastian Andrzej Siewior int ret; 31701da177e4SLinus Torvalds 3171b98938c3SChristoph Lameter bh_cachep = kmem_cache_create("buffer_head", 3172b98938c3SChristoph Lameter sizeof(struct buffer_head), 0, 3173b98938c3SChristoph Lameter (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3174b98938c3SChristoph Lameter SLAB_MEM_SPREAD), 3175019b4d12SRichard Kennedy NULL); 31761da177e4SLinus Torvalds 31771da177e4SLinus Torvalds /* 31781da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL 31791da177e4SLinus Torvalds */ 31801da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100; 31811da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3182fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3183fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead); 3184fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0); 31851da177e4SLinus Torvalds } 3186