1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/fs/buffer.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 91da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that 121da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating 151da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 161da177e4SLinus Torvalds * 171da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 201da177e4SLinus Torvalds */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include <linux/kernel.h> 23f361bf4aSIngo Molnar #include <linux/sched/signal.h> 241da177e4SLinus Torvalds #include <linux/syscalls.h> 251da177e4SLinus Torvalds #include <linux/fs.h> 26ae259a9cSChristoph Hellwig #include <linux/iomap.h> 271da177e4SLinus Torvalds #include <linux/mm.h> 281da177e4SLinus Torvalds #include <linux/percpu.h> 291da177e4SLinus Torvalds #include <linux/slab.h> 3016f7e0feSRandy Dunlap #include <linux/capability.h> 311da177e4SLinus Torvalds #include <linux/blkdev.h> 321da177e4SLinus Torvalds #include <linux/file.h> 331da177e4SLinus Torvalds #include <linux/quotaops.h> 341da177e4SLinus Torvalds #include <linux/highmem.h> 35630d9c47SPaul Gortmaker #include <linux/export.h> 36bafc0dbaSTejun Heo #include <linux/backing-dev.h> 371da177e4SLinus Torvalds #include <linux/writeback.h> 381da177e4SLinus Torvalds #include <linux/hash.h> 391da177e4SLinus Torvalds #include <linux/suspend.h> 401da177e4SLinus Torvalds #include <linux/buffer_head.h> 4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h> 421da177e4SLinus Torvalds #include <linux/bio.h> 431da177e4SLinus Torvalds #include <linux/cpu.h> 441da177e4SLinus Torvalds #include <linux/bitops.h> 451da177e4SLinus Torvalds #include <linux/mpage.h> 46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h> 4729f3ad7dSJan Kara #include <linux/pagevec.h> 48f745c6f5SShakeel Butt #include <linux/sched/mm.h> 495305cb83STejun Heo #include <trace/events/block.h> 5031fb992cSEric Biggers #include <linux/fscrypt.h> 514fa512ceSEric Biggers #include <linux/fsverity.h> 528a237adfSMarcelo Tosatti #include <linux/sched/isolation.h> 531da177e4SLinus Torvalds 542b211dc0SBen Dooks #include "internal.h" 552b211dc0SBen Dooks 561da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 575bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 5844981351SBart Van Assche enum rw_hint hint, struct writeback_control *wbc); 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 611da177e4SLinus Torvalds 62f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh) 63f0059afdSTejun Heo { 645305cb83STejun Heo trace_block_touch_buffer(bh); 6503c5f331SMatthew Wilcox (Oracle) folio_mark_accessed(bh->b_folio); 66f0059afdSTejun Heo } 67f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer); 68f0059afdSTejun Heo 69fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh) 701da177e4SLinus Torvalds { 7174316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer); 741da177e4SLinus Torvalds 75fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh) 761da177e4SLinus Torvalds { 7751b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state); 784e857c58SPeter Zijlstra smp_mb__after_atomic(); 791da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock); 801da177e4SLinus Torvalds } 811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer); 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds /* 84520f301cSMatthew Wilcox (Oracle) * Returns if the folio has dirty or writeback buffers. If all the buffers 85520f301cSMatthew Wilcox (Oracle) * are unlocked and clean then the folio_test_dirty information is stale. If 86520f301cSMatthew Wilcox (Oracle) * any of the buffers are locked, it is assumed they are locked for IO. 87b4597226SMel Gorman */ 88520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio, 89b4597226SMel Gorman bool *dirty, bool *writeback) 90b4597226SMel Gorman { 91b4597226SMel Gorman struct buffer_head *head, *bh; 92b4597226SMel Gorman *dirty = false; 93b4597226SMel Gorman *writeback = false; 94b4597226SMel Gorman 95520f301cSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 96b4597226SMel Gorman 97520f301cSMatthew Wilcox (Oracle) head = folio_buffers(folio); 98520f301cSMatthew Wilcox (Oracle) if (!head) 99b4597226SMel Gorman return; 100b4597226SMel Gorman 101520f301cSMatthew Wilcox (Oracle) if (folio_test_writeback(folio)) 102b4597226SMel Gorman *writeback = true; 103b4597226SMel Gorman 104b4597226SMel Gorman bh = head; 105b4597226SMel Gorman do { 106b4597226SMel Gorman if (buffer_locked(bh)) 107b4597226SMel Gorman *writeback = true; 108b4597226SMel Gorman 109b4597226SMel Gorman if (buffer_dirty(bh)) 110b4597226SMel Gorman *dirty = true; 111b4597226SMel Gorman 112b4597226SMel Gorman bh = bh->b_this_page; 113b4597226SMel Gorman } while (bh != head); 114b4597226SMel Gorman } 115b4597226SMel Gorman 116b4597226SMel Gorman /* 1171da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it 1181da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself 1191da177e4SLinus Torvalds * if you want to preserve its state. 1201da177e4SLinus Torvalds */ 1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh) 1221da177e4SLinus Torvalds { 12374316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 1241da177e4SLinus Torvalds } 1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer); 1261da177e4SLinus Torvalds 127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg) 1281da177e4SLinus Torvalds { 129432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state)) 130432f16e6SRobert Elliott printk_ratelimited(KERN_ERR 131a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n", 132a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* 13668671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after 13768671f35SDmitry Monakhov * unlocking it. 13868671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 13968671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for 14068671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh 14168671f35SDmitry Monakhov * itself. 1421da177e4SLinus Torvalds */ 14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 1441da177e4SLinus Torvalds { 1451da177e4SLinus Torvalds if (uptodate) { 1461da177e4SLinus Torvalds set_buffer_uptodate(bh); 1471da177e4SLinus Torvalds } else { 14870246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */ 1491da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1501da177e4SLinus Torvalds } 1511da177e4SLinus Torvalds unlock_buffer(bh); 15268671f35SDmitry Monakhov } 15368671f35SDmitry Monakhov 15468671f35SDmitry Monakhov /* 15568671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and 15679f59784SZhang Yi * unlock the buffer. 15768671f35SDmitry Monakhov */ 15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 15968671f35SDmitry Monakhov { 16068671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 1611da177e4SLinus Torvalds put_bh(bh); 1621da177e4SLinus Torvalds } 1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync); 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 1661da177e4SLinus Torvalds { 1671da177e4SLinus Torvalds if (uptodate) { 1681da177e4SLinus Torvalds set_buffer_uptodate(bh); 1691da177e4SLinus Torvalds } else { 170b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write"); 17187354e5dSJeff Layton mark_buffer_write_io_error(bh); 1721da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1731da177e4SLinus Torvalds } 1741da177e4SLinus Torvalds unlock_buffer(bh); 1751da177e4SLinus Torvalds put_bh(bh); 1761da177e4SLinus Torvalds } 1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync); 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* 1801da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking. 1811da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this, 1821da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's 183600f111eSMatthew Wilcox (Oracle) * i_private_lock. 1841da177e4SLinus Torvalds * 185600f111eSMatthew Wilcox (Oracle) * Hack idea: for the blockdev mapping, i_private_lock contention 1861da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that 187600f111eSMatthew Wilcox (Oracle) * succeeds, there is no need to take i_private_lock. 1881da177e4SLinus Torvalds */ 1891da177e4SLinus Torvalds static struct buffer_head * 190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block) 1911da177e4SLinus Torvalds { 1921da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode; 1931da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping; 1941da177e4SLinus Torvalds struct buffer_head *ret = NULL; 1951da177e4SLinus Torvalds pgoff_t index; 1961da177e4SLinus Torvalds struct buffer_head *bh; 1971da177e4SLinus Torvalds struct buffer_head *head; 198eee25182SMatthew Wilcox (Oracle) struct folio *folio; 1991da177e4SLinus Torvalds int all_mapped = 1; 20043636c80STetsuo Handa static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 2011da177e4SLinus Torvalds 2024b04646cSMatthew Wilcox (Oracle) index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE; 203eee25182SMatthew Wilcox (Oracle) folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 204eee25182SMatthew Wilcox (Oracle) if (IS_ERR(folio)) 2051da177e4SLinus Torvalds goto out; 2061da177e4SLinus Torvalds 207600f111eSMatthew Wilcox (Oracle) spin_lock(&bd_mapping->i_private_lock); 208eee25182SMatthew Wilcox (Oracle) head = folio_buffers(folio); 209eee25182SMatthew Wilcox (Oracle) if (!head) 2101da177e4SLinus Torvalds goto out_unlock; 2111da177e4SLinus Torvalds bh = head; 2121da177e4SLinus Torvalds do { 21397f76d3dSNikanth Karthikesan if (!buffer_mapped(bh)) 21497f76d3dSNikanth Karthikesan all_mapped = 0; 21597f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) { 2161da177e4SLinus Torvalds ret = bh; 2171da177e4SLinus Torvalds get_bh(bh); 2181da177e4SLinus Torvalds goto out_unlock; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds bh = bh->b_this_page; 2211da177e4SLinus Torvalds } while (bh != head); 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are 2241da177e4SLinus Torvalds * not mapped. This is due to various races between 2251da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with 2261da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers 2271da177e4SLinus Torvalds */ 22843636c80STetsuo Handa ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 22943636c80STetsuo Handa if (all_mapped && __ratelimit(&last_warned)) { 23043636c80STetsuo Handa printk("__find_get_block_slow() failed. block=%llu, " 23143636c80STetsuo Handa "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 23243636c80STetsuo Handa "device %pg blocksize: %d\n", 233205f87f6SBadari Pulavarty (unsigned long long)block, 23443636c80STetsuo Handa (unsigned long long)bh->b_blocknr, 23543636c80STetsuo Handa bh->b_state, bh->b_size, bdev, 23672a2ebd8STao Ma 1 << bd_inode->i_blkbits); 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds out_unlock: 239600f111eSMatthew Wilcox (Oracle) spin_unlock(&bd_mapping->i_private_lock); 240eee25182SMatthew Wilcox (Oracle) folio_put(folio); 2411da177e4SLinus Torvalds out: 2421da177e4SLinus Torvalds return ret; 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 2461da177e4SLinus Torvalds { 2471da177e4SLinus Torvalds unsigned long flags; 248a3972203SNick Piggin struct buffer_head *first; 2491da177e4SLinus Torvalds struct buffer_head *tmp; 2502e2dba15SMatthew Wilcox (Oracle) struct folio *folio; 2512e2dba15SMatthew Wilcox (Oracle) int folio_uptodate = 1; 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh)); 2541da177e4SLinus Torvalds 2552e2dba15SMatthew Wilcox (Oracle) folio = bh->b_folio; 2561da177e4SLinus Torvalds if (uptodate) { 2571da177e4SLinus Torvalds set_buffer_uptodate(bh); 2581da177e4SLinus Torvalds } else { 2591da177e4SLinus Torvalds clear_buffer_uptodate(bh); 260b744c2acSRobert Elliott buffer_io_error(bh, ", async page read"); 2612e2dba15SMatthew Wilcox (Oracle) folio_set_error(folio); 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds /* 2651da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if 2661da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both 2671da177e4SLinus Torvalds * decide that the page is now completely done. 2681da177e4SLinus Torvalds */ 2692e2dba15SMatthew Wilcox (Oracle) first = folio_buffers(folio); 270f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags); 2711da177e4SLinus Torvalds clear_buffer_async_read(bh); 2721da177e4SLinus Torvalds unlock_buffer(bh); 2731da177e4SLinus Torvalds tmp = bh; 2741da177e4SLinus Torvalds do { 2751da177e4SLinus Torvalds if (!buffer_uptodate(tmp)) 2762e2dba15SMatthew Wilcox (Oracle) folio_uptodate = 0; 2771da177e4SLinus Torvalds if (buffer_async_read(tmp)) { 2781da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 2791da177e4SLinus Torvalds goto still_busy; 2801da177e4SLinus Torvalds } 2811da177e4SLinus Torvalds tmp = tmp->b_this_page; 2821da177e4SLinus Torvalds } while (tmp != bh); 283f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 2841da177e4SLinus Torvalds 2856ba924d3SMatthew Wilcox (Oracle) folio_end_read(folio, folio_uptodate); 2861da177e4SLinus Torvalds return; 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds still_busy: 289f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 2901da177e4SLinus Torvalds return; 2911da177e4SLinus Torvalds } 2921da177e4SLinus Torvalds 2934fa512ceSEric Biggers struct postprocess_bh_ctx { 29431fb992cSEric Biggers struct work_struct work; 29531fb992cSEric Biggers struct buffer_head *bh; 29631fb992cSEric Biggers }; 29731fb992cSEric Biggers 2984fa512ceSEric Biggers static void verify_bh(struct work_struct *work) 2994fa512ceSEric Biggers { 3004fa512ceSEric Biggers struct postprocess_bh_ctx *ctx = 3014fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work); 3024fa512ceSEric Biggers struct buffer_head *bh = ctx->bh; 3034fa512ceSEric Biggers bool valid; 3044fa512ceSEric Biggers 3058b7d3fe9SEric Biggers valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); 3064fa512ceSEric Biggers end_buffer_async_read(bh, valid); 3074fa512ceSEric Biggers kfree(ctx); 3084fa512ceSEric Biggers } 3094fa512ceSEric Biggers 3104fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh) 3114fa512ceSEric Biggers { 3128b7d3fe9SEric Biggers struct folio *folio = bh->b_folio; 3138b7d3fe9SEric Biggers struct inode *inode = folio->mapping->host; 3144fa512ceSEric Biggers 3154fa512ceSEric Biggers return fsverity_active(inode) && 3164fa512ceSEric Biggers /* needed by ext4 */ 3178b7d3fe9SEric Biggers folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 3184fa512ceSEric Biggers } 3194fa512ceSEric Biggers 32031fb992cSEric Biggers static void decrypt_bh(struct work_struct *work) 32131fb992cSEric Biggers { 3224fa512ceSEric Biggers struct postprocess_bh_ctx *ctx = 3234fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work); 32431fb992cSEric Biggers struct buffer_head *bh = ctx->bh; 32531fb992cSEric Biggers int err; 32631fb992cSEric Biggers 3279c7fb7f7SEric Biggers err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 3289c7fb7f7SEric Biggers bh_offset(bh)); 3294fa512ceSEric Biggers if (err == 0 && need_fsverity(bh)) { 3304fa512ceSEric Biggers /* 3314fa512ceSEric Biggers * We use different work queues for decryption and for verity 3324fa512ceSEric Biggers * because verity may require reading metadata pages that need 3334fa512ceSEric Biggers * decryption, and we shouldn't recurse to the same workqueue. 3344fa512ceSEric Biggers */ 3354fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh); 3364fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work); 3374fa512ceSEric Biggers return; 3384fa512ceSEric Biggers } 33931fb992cSEric Biggers end_buffer_async_read(bh, err == 0); 34031fb992cSEric Biggers kfree(ctx); 34131fb992cSEric Biggers } 34231fb992cSEric Biggers 34331fb992cSEric Biggers /* 3442c69e205SMatthew Wilcox (Oracle) * I/O completion handler for block_read_full_folio() - pages 34531fb992cSEric Biggers * which come unlocked at the end of I/O. 34631fb992cSEric Biggers */ 34731fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 34831fb992cSEric Biggers { 3493822a7c4SLinus Torvalds struct inode *inode = bh->b_folio->mapping->host; 3504fa512ceSEric Biggers bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 3514fa512ceSEric Biggers bool verify = need_fsverity(bh); 3524fa512ceSEric Biggers 3534fa512ceSEric Biggers /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 3544fa512ceSEric Biggers if (uptodate && (decrypt || verify)) { 3554fa512ceSEric Biggers struct postprocess_bh_ctx *ctx = 3564fa512ceSEric Biggers kmalloc(sizeof(*ctx), GFP_ATOMIC); 35731fb992cSEric Biggers 35831fb992cSEric Biggers if (ctx) { 35931fb992cSEric Biggers ctx->bh = bh; 3604fa512ceSEric Biggers if (decrypt) { 3614fa512ceSEric Biggers INIT_WORK(&ctx->work, decrypt_bh); 36231fb992cSEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work); 3634fa512ceSEric Biggers } else { 3644fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh); 3654fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work); 3664fa512ceSEric Biggers } 36731fb992cSEric Biggers return; 36831fb992cSEric Biggers } 36931fb992cSEric Biggers uptodate = 0; 37031fb992cSEric Biggers } 37131fb992cSEric Biggers end_buffer_async_read(bh, uptodate); 37231fb992cSEric Biggers } 37331fb992cSEric Biggers 3741da177e4SLinus Torvalds /* 37514059f66SMatthew Wilcox (Oracle) * Completion handler for block_write_full_folio() - folios which are unlocked 37614059f66SMatthew Wilcox (Oracle) * during I/O, and which have the writeback flag cleared upon I/O completion. 3771da177e4SLinus Torvalds */ 37814059f66SMatthew Wilcox (Oracle) static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 3791da177e4SLinus Torvalds { 3801da177e4SLinus Torvalds unsigned long flags; 381a3972203SNick Piggin struct buffer_head *first; 3821da177e4SLinus Torvalds struct buffer_head *tmp; 383743ed81eSMatthew Wilcox (Oracle) struct folio *folio; 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh)); 3861da177e4SLinus Torvalds 387743ed81eSMatthew Wilcox (Oracle) folio = bh->b_folio; 3881da177e4SLinus Torvalds if (uptodate) { 3891da177e4SLinus Torvalds set_buffer_uptodate(bh); 3901da177e4SLinus Torvalds } else { 391b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write"); 39287354e5dSJeff Layton mark_buffer_write_io_error(bh); 3931da177e4SLinus Torvalds clear_buffer_uptodate(bh); 394743ed81eSMatthew Wilcox (Oracle) folio_set_error(folio); 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds 397743ed81eSMatthew Wilcox (Oracle) first = folio_buffers(folio); 398f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags); 399a3972203SNick Piggin 4001da177e4SLinus Torvalds clear_buffer_async_write(bh); 4011da177e4SLinus Torvalds unlock_buffer(bh); 4021da177e4SLinus Torvalds tmp = bh->b_this_page; 4031da177e4SLinus Torvalds while (tmp != bh) { 4041da177e4SLinus Torvalds if (buffer_async_write(tmp)) { 4051da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 4061da177e4SLinus Torvalds goto still_busy; 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds tmp = tmp->b_this_page; 4091da177e4SLinus Torvalds } 410f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 411743ed81eSMatthew Wilcox (Oracle) folio_end_writeback(folio); 4121da177e4SLinus Torvalds return; 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds still_busy: 415f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 4161da177e4SLinus Torvalds return; 4171da177e4SLinus Torvalds } 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read 4211da177e4SLinus Torvalds * completion) then there is a possibility that another thread of 4221da177e4SLinus Torvalds * control could lock one of the buffers after it has completed 4231da177e4SLinus Torvalds * but while some of the other buffers have not completed. This 4241da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking 4251da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 4261da177e4SLinus Torvalds * that this buffer is not under async I/O. 4271da177e4SLinus Torvalds * 4281da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers 4291da177e4SLinus Torvalds * left. 4301da177e4SLinus Torvalds * 4311da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of 4321da177e4SLinus Torvalds * the buffers. 4331da177e4SLinus Torvalds * 4341da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same 4351da177e4SLinus Torvalds * page. 4361da177e4SLinus Torvalds * 4371da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is 4381da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page). 4391da177e4SLinus Torvalds */ 4401da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh) 4411da177e4SLinus Torvalds { 44231fb992cSEric Biggers bh->b_end_io = end_buffer_async_read_io; 4431da177e4SLinus Torvalds set_buffer_async_read(bh); 4441da177e4SLinus Torvalds } 4451da177e4SLinus Torvalds 4461fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh, 44735c80d5fSChris Mason bh_end_io_t *handler) 44835c80d5fSChris Mason { 44935c80d5fSChris Mason bh->b_end_io = handler; 45035c80d5fSChris Mason set_buffer_async_write(bh); 45135c80d5fSChris Mason } 45235c80d5fSChris Mason 4531da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh) 4541da177e4SLinus Torvalds { 45535c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write); 4561da177e4SLinus Torvalds } 4571da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write); 4581da177e4SLinus Torvalds 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds /* 4611da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's 4621da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is 4631da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for 4641da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be 4651da177e4SLinus Torvalds * written back and waited upon before fsync() returns. 4661da177e4SLinus Torvalds * 46773f65b8bSAndreas Gruenbacher * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(), 4681da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the 469600f111eSMatthew Wilcox (Oracle) * management of a list of dependent buffers at ->i_mapping->i_private_list. 4701da177e4SLinus Torvalds * 4711da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers 4721da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But 4731da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping 4741da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers. 475600f111eSMatthew Wilcox (Oracle) * So the locking for i_private_list is via the i_private_lock in the address_space 4761da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space 4771da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space, 478600f111eSMatthew Wilcox (Oracle) * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact, 479600f111eSMatthew Wilcox (Oracle) * mapping->i_private_list will always be protected by the backing blockdev's 480600f111eSMatthew Wilcox (Oracle) * ->i_private_lock. 4811da177e4SLinus Torvalds * 4821da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's 483600f111eSMatthew Wilcox (Oracle) * ->i_private_list must be from the same address_space: the blockdev's. 4841da177e4SLinus Torvalds * 485600f111eSMatthew Wilcox (Oracle) * address_spaces which do not place buffers at ->i_private_list via these 486600f111eSMatthew Wilcox (Oracle) * utility functions are free to use i_private_lock and i_private_list for 487600f111eSMatthew Wilcox (Oracle) * whatever they want. The only requirement is that list_empty(i_private_list) 4881da177e4SLinus Torvalds * be true at clear_inode() time. 4891da177e4SLinus Torvalds * 4901da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The 4911da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go 4921da177e4SLinus Torvalds * BUG_ON(!list_empty). 4931da177e4SLinus Torvalds * 4941da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 4951da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called 4961da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being 4971da177e4SLinus Torvalds * queued up. 4981da177e4SLinus Torvalds * 4991da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 5001da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list, 5011da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being 5021da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure 5031da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed 5041da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all 5051da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing 5061da177e4SLinus Torvalds * b_inode back. 5071da177e4SLinus Torvalds */ 5081da177e4SLinus Torvalds 5091da177e4SLinus Torvalds /* 510600f111eSMatthew Wilcox (Oracle) * The buffer's backing address_space's i_private_lock must be held 5111da177e4SLinus Torvalds */ 512dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh) 5131da177e4SLinus Torvalds { 5141da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 51558ff407bSJan Kara WARN_ON(!bh->b_assoc_map); 51658ff407bSJan Kara bh->b_assoc_map = NULL; 5171da177e4SLinus Torvalds } 5181da177e4SLinus Torvalds 5191da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode) 5201da177e4SLinus Torvalds { 521600f111eSMatthew Wilcox (Oracle) return !list_empty(&inode->i_data.i_private_list); 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds 5241da177e4SLinus Torvalds /* 5251da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for 5261da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new 5271da177e4SLinus Torvalds * writes to the disk. 5281da177e4SLinus Torvalds * 52979f59784SZhang Yi * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 53079f59784SZhang Yi * as you dirty the buffers, and then use osync_inode_buffers to wait for 5311da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for 5321da177e4SLinus Torvalds * write will not be flushed to disk by the osync. 5331da177e4SLinus Torvalds */ 5341da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 5351da177e4SLinus Torvalds { 5361da177e4SLinus Torvalds struct buffer_head *bh; 5371da177e4SLinus Torvalds struct list_head *p; 5381da177e4SLinus Torvalds int err = 0; 5391da177e4SLinus Torvalds 5401da177e4SLinus Torvalds spin_lock(lock); 5411da177e4SLinus Torvalds repeat: 5421da177e4SLinus Torvalds list_for_each_prev(p, list) { 5431da177e4SLinus Torvalds bh = BH_ENTRY(p); 5441da177e4SLinus Torvalds if (buffer_locked(bh)) { 5451da177e4SLinus Torvalds get_bh(bh); 5461da177e4SLinus Torvalds spin_unlock(lock); 5471da177e4SLinus Torvalds wait_on_buffer(bh); 5481da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 5491da177e4SLinus Torvalds err = -EIO; 5501da177e4SLinus Torvalds brelse(bh); 5511da177e4SLinus Torvalds spin_lock(lock); 5521da177e4SLinus Torvalds goto repeat; 5531da177e4SLinus Torvalds } 5541da177e4SLinus Torvalds } 5551da177e4SLinus Torvalds spin_unlock(lock); 5561da177e4SLinus Torvalds return err; 5571da177e4SLinus Torvalds } 5581da177e4SLinus Torvalds 5591da177e4SLinus Torvalds /** 56078a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 56167be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written 5621da177e4SLinus Torvalds * 563600f111eSMatthew Wilcox (Oracle) * Starts I/O against the buffers at mapping->i_private_list, and waits upon 5641da177e4SLinus Torvalds * that I/O. 5651da177e4SLinus Torvalds * 56667be2dd1SMartin Waitz * Basically, this is a convenience function for fsync(). 56767be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for 56867be2dd1SMartin Waitz * a successful fsync(). 5691da177e4SLinus Torvalds */ 5701da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping) 5711da177e4SLinus Torvalds { 572600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data; 5731da177e4SLinus Torvalds 574600f111eSMatthew Wilcox (Oracle) if (buffer_mapping == NULL || list_empty(&mapping->i_private_list)) 5751da177e4SLinus Torvalds return 0; 5761da177e4SLinus Torvalds 577600f111eSMatthew Wilcox (Oracle) return fsync_buffers_list(&buffer_mapping->i_private_lock, 578600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list); 5791da177e4SLinus Torvalds } 5801da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers); 5811da177e4SLinus Torvalds 58231b2ebc0SRitesh Harjani (IBM) /** 58331b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync_noflush - generic buffer fsync implementation 58431b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock 58531b2ebc0SRitesh Harjani (IBM) * 58631b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize 58731b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes 58831b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive) 58931b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true 59031b2ebc0SRitesh Harjani (IBM) * 59131b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple 59231b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list 59331b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. 59431b2ebc0SRitesh Harjani (IBM) */ 59531b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 59631b2ebc0SRitesh Harjani (IBM) bool datasync) 59731b2ebc0SRitesh Harjani (IBM) { 59831b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host; 59931b2ebc0SRitesh Harjani (IBM) int err; 60031b2ebc0SRitesh Harjani (IBM) int ret; 60131b2ebc0SRitesh Harjani (IBM) 60231b2ebc0SRitesh Harjani (IBM) err = file_write_and_wait_range(file, start, end); 60331b2ebc0SRitesh Harjani (IBM) if (err) 60431b2ebc0SRitesh Harjani (IBM) return err; 60531b2ebc0SRitesh Harjani (IBM) 60631b2ebc0SRitesh Harjani (IBM) ret = sync_mapping_buffers(inode->i_mapping); 60731b2ebc0SRitesh Harjani (IBM) if (!(inode->i_state & I_DIRTY_ALL)) 60831b2ebc0SRitesh Harjani (IBM) goto out; 60931b2ebc0SRitesh Harjani (IBM) if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 61031b2ebc0SRitesh Harjani (IBM) goto out; 61131b2ebc0SRitesh Harjani (IBM) 61231b2ebc0SRitesh Harjani (IBM) err = sync_inode_metadata(inode, 1); 61331b2ebc0SRitesh Harjani (IBM) if (ret == 0) 61431b2ebc0SRitesh Harjani (IBM) ret = err; 61531b2ebc0SRitesh Harjani (IBM) 61631b2ebc0SRitesh Harjani (IBM) out: 61731b2ebc0SRitesh Harjani (IBM) /* check and advance again to catch errors after syncing out buffers */ 61831b2ebc0SRitesh Harjani (IBM) err = file_check_and_advance_wb_err(file); 61931b2ebc0SRitesh Harjani (IBM) if (ret == 0) 62031b2ebc0SRitesh Harjani (IBM) ret = err; 62131b2ebc0SRitesh Harjani (IBM) return ret; 62231b2ebc0SRitesh Harjani (IBM) } 62331b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush); 62431b2ebc0SRitesh Harjani (IBM) 62531b2ebc0SRitesh Harjani (IBM) /** 62631b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync - generic buffer fsync implementation 62731b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock 62831b2ebc0SRitesh Harjani (IBM) * 62931b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize 63031b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes 63131b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive) 63231b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true 63331b2ebc0SRitesh Harjani (IBM) * 63431b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple 63531b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list 63631b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. This also makes sure that 63731b2ebc0SRitesh Harjani (IBM) * a device cache flush operation is called at the end. 63831b2ebc0SRitesh Harjani (IBM) */ 63931b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 64031b2ebc0SRitesh Harjani (IBM) bool datasync) 64131b2ebc0SRitesh Harjani (IBM) { 64231b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host; 64331b2ebc0SRitesh Harjani (IBM) int ret; 64431b2ebc0SRitesh Harjani (IBM) 64531b2ebc0SRitesh Harjani (IBM) ret = generic_buffers_fsync_noflush(file, start, end, datasync); 64631b2ebc0SRitesh Harjani (IBM) if (!ret) 64731b2ebc0SRitesh Harjani (IBM) ret = blkdev_issue_flush(inode->i_sb->s_bdev); 64831b2ebc0SRitesh Harjani (IBM) return ret; 64931b2ebc0SRitesh Harjani (IBM) } 65031b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync); 65131b2ebc0SRitesh Harjani (IBM) 6521da177e4SLinus Torvalds /* 6531da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that 6541da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at 6551da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 6561da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data. 6571da177e4SLinus Torvalds */ 6581da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 6591da177e4SLinus Torvalds sector_t bblock, unsigned blocksize) 6601da177e4SLinus Torvalds { 6611da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 6621da177e4SLinus Torvalds if (bh) { 6631da177e4SLinus Torvalds if (buffer_dirty(bh)) 664e7ea1129SZhang Yi write_dirty_buffer(bh, 0); 6651da177e4SLinus Torvalds put_bh(bh); 6661da177e4SLinus Torvalds } 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 6701da177e4SLinus Torvalds { 6711da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 672abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping; 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds mark_buffer_dirty(bh); 675600f111eSMatthew Wilcox (Oracle) if (!mapping->i_private_data) { 676600f111eSMatthew Wilcox (Oracle) mapping->i_private_data = buffer_mapping; 6771da177e4SLinus Torvalds } else { 678600f111eSMatthew Wilcox (Oracle) BUG_ON(mapping->i_private_data != buffer_mapping); 6791da177e4SLinus Torvalds } 680535ee2fbSJan Kara if (!bh->b_assoc_map) { 681600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock); 6821da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers, 683600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list); 68458ff407bSJan Kara bh->b_assoc_map = mapping; 685600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock); 6861da177e4SLinus Torvalds } 6871da177e4SLinus Torvalds } 6881da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode); 6891da177e4SLinus Torvalds 6903814ec89SMatthew Wilcox (Oracle) /** 6913814ec89SMatthew Wilcox (Oracle) * block_dirty_folio - Mark a folio as dirty. 6923814ec89SMatthew Wilcox (Oracle) * @mapping: The address space containing this folio. 6933814ec89SMatthew Wilcox (Oracle) * @folio: The folio to mark dirty. 6941da177e4SLinus Torvalds * 6953814ec89SMatthew Wilcox (Oracle) * Filesystems which use buffer_heads can use this function as their 6963814ec89SMatthew Wilcox (Oracle) * ->dirty_folio implementation. Some filesystems need to do a little 6973814ec89SMatthew Wilcox (Oracle) * work before calling this function. Filesystems which do not use 6983814ec89SMatthew Wilcox (Oracle) * buffer_heads should call filemap_dirty_folio() instead. 6991da177e4SLinus Torvalds * 7003814ec89SMatthew Wilcox (Oracle) * If the folio has buffers, the uptodate buffers are set dirty, to 7013814ec89SMatthew Wilcox (Oracle) * preserve dirty-state coherency between the folio and the buffers. 7023814ec89SMatthew Wilcox (Oracle) * Buffers added to a dirty folio are created dirty. 7031da177e4SLinus Torvalds * 7043814ec89SMatthew Wilcox (Oracle) * The buffers are dirtied before the folio is dirtied. There's a small 7053814ec89SMatthew Wilcox (Oracle) * race window in which writeback may see the folio cleanness but not the 7063814ec89SMatthew Wilcox (Oracle) * buffer dirtiness. That's fine. If this code were to set the folio 7073814ec89SMatthew Wilcox (Oracle) * dirty before the buffers, writeback could clear the folio dirty flag, 7083814ec89SMatthew Wilcox (Oracle) * see a bunch of clean buffers and we'd end up with dirty buffers/clean 7093814ec89SMatthew Wilcox (Oracle) * folio on the dirty folio list. 7101da177e4SLinus Torvalds * 7113814ec89SMatthew Wilcox (Oracle) * We use i_private_lock to lock against try_to_free_buffers() while 7123814ec89SMatthew Wilcox (Oracle) * using the folio's buffer list. This also prevents clean buffers 7133814ec89SMatthew Wilcox (Oracle) * being added to the folio after it was set dirty. 7141da177e4SLinus Torvalds * 7153814ec89SMatthew Wilcox (Oracle) * Context: May only be called from process context. Does not sleep. 7163814ec89SMatthew Wilcox (Oracle) * Caller must ensure that @folio cannot be truncated during this call, 7173814ec89SMatthew Wilcox (Oracle) * typically by holding the folio lock or having a page in the folio 7183814ec89SMatthew Wilcox (Oracle) * mapped and holding the page table lock. 7193814ec89SMatthew Wilcox (Oracle) * 7203814ec89SMatthew Wilcox (Oracle) * Return: True if the folio was dirtied; false if it was already dirtied. 7211da177e4SLinus Torvalds */ 722e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 7231da177e4SLinus Torvalds { 724e621900aSMatthew Wilcox (Oracle) struct buffer_head *head; 725e621900aSMatthew Wilcox (Oracle) bool newly_dirty; 7261da177e4SLinus Torvalds 727600f111eSMatthew Wilcox (Oracle) spin_lock(&mapping->i_private_lock); 728e621900aSMatthew Wilcox (Oracle) head = folio_buffers(folio); 729e621900aSMatthew Wilcox (Oracle) if (head) { 7301da177e4SLinus Torvalds struct buffer_head *bh = head; 7311da177e4SLinus Torvalds 7321da177e4SLinus Torvalds do { 7331da177e4SLinus Torvalds set_buffer_dirty(bh); 7341da177e4SLinus Torvalds bh = bh->b_this_page; 7351da177e4SLinus Torvalds } while (bh != head); 7361da177e4SLinus Torvalds } 737c4843a75SGreg Thelen /* 738bcfe06bfSRoman Gushchin * Lock out page's memcg migration to keep PageDirty 73981f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters. 740c4843a75SGreg Thelen */ 741e621900aSMatthew Wilcox (Oracle) folio_memcg_lock(folio); 742e621900aSMatthew Wilcox (Oracle) newly_dirty = !folio_test_set_dirty(folio); 743600f111eSMatthew Wilcox (Oracle) spin_unlock(&mapping->i_private_lock); 7441da177e4SLinus Torvalds 745a8e7d49aSLinus Torvalds if (newly_dirty) 746e621900aSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 1); 747c4843a75SGreg Thelen 748e621900aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio); 749c4843a75SGreg Thelen 750c4843a75SGreg Thelen if (newly_dirty) 751c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 752c4843a75SGreg Thelen 753a8e7d49aSLinus Torvalds return newly_dirty; 7541da177e4SLinus Torvalds } 755e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio); 7561da177e4SLinus Torvalds 7571da177e4SLinus Torvalds /* 7581da177e4SLinus Torvalds * Write out and wait upon a list of buffers. 7591da177e4SLinus Torvalds * 7601da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all 7611da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently 7621da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last 7631da177e4SLinus Torvalds * forever if somebody is actively writing to the file. 7641da177e4SLinus Torvalds * 7651da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a 7661da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean 7671da177e4SLinus Torvalds * up, waiting for those writes to complete. 7681da177e4SLinus Torvalds * 7691da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end 7701da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so 7711da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but 7721da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through 7731da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing 7741da177e4SLinus Torvalds * any newly dirty buffers for write. 7751da177e4SLinus Torvalds */ 7761da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 7771da177e4SLinus Torvalds { 7781da177e4SLinus Torvalds struct buffer_head *bh; 7791da177e4SLinus Torvalds struct list_head tmp; 7807eaceaccSJens Axboe struct address_space *mapping; 7811da177e4SLinus Torvalds int err = 0, err2; 7824ee2491eSJens Axboe struct blk_plug plug; 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp); 7854ee2491eSJens Axboe blk_start_plug(&plug); 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds spin_lock(lock); 7881da177e4SLinus Torvalds while (!list_empty(list)) { 7891da177e4SLinus Torvalds bh = BH_ENTRY(list->next); 790535ee2fbSJan Kara mapping = bh->b_assoc_map; 79158ff407bSJan Kara __remove_assoc_queue(bh); 792535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 793535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 794535ee2fbSJan Kara smp_mb(); 7951da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) { 7961da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp); 797535ee2fbSJan Kara bh->b_assoc_map = mapping; 7981da177e4SLinus Torvalds if (buffer_dirty(bh)) { 7991da177e4SLinus Torvalds get_bh(bh); 8001da177e4SLinus Torvalds spin_unlock(lock); 8011da177e4SLinus Torvalds /* 8021da177e4SLinus Torvalds * Ensure any pending I/O completes so that 8039cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the 8049cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is 8059cb569d6SChristoph Hellwig * still in flight on potentially older 8069cb569d6SChristoph Hellwig * contents. 8071da177e4SLinus Torvalds */ 80870fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC); 8099cf6b720SJens Axboe 8109cf6b720SJens Axboe /* 8119cf6b720SJens Axboe * Kick off IO for the previous mapping. Note 8129cf6b720SJens Axboe * that we will not run the very last mapping, 8139cf6b720SJens Axboe * wait_on_buffer() will do that for us 8149cf6b720SJens Axboe * through sync_buffer(). 8159cf6b720SJens Axboe */ 8161da177e4SLinus Torvalds brelse(bh); 8171da177e4SLinus Torvalds spin_lock(lock); 8181da177e4SLinus Torvalds } 8191da177e4SLinus Torvalds } 8201da177e4SLinus Torvalds } 8211da177e4SLinus Torvalds 8224ee2491eSJens Axboe spin_unlock(lock); 8234ee2491eSJens Axboe blk_finish_plug(&plug); 8244ee2491eSJens Axboe spin_lock(lock); 8254ee2491eSJens Axboe 8261da177e4SLinus Torvalds while (!list_empty(&tmp)) { 8271da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev); 8281da177e4SLinus Torvalds get_bh(bh); 829535ee2fbSJan Kara mapping = bh->b_assoc_map; 830535ee2fbSJan Kara __remove_assoc_queue(bh); 831535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 832535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 833535ee2fbSJan Kara smp_mb(); 834535ee2fbSJan Kara if (buffer_dirty(bh)) { 835535ee2fbSJan Kara list_add(&bh->b_assoc_buffers, 836600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list); 837535ee2fbSJan Kara bh->b_assoc_map = mapping; 838535ee2fbSJan Kara } 8391da177e4SLinus Torvalds spin_unlock(lock); 8401da177e4SLinus Torvalds wait_on_buffer(bh); 8411da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 8421da177e4SLinus Torvalds err = -EIO; 8431da177e4SLinus Torvalds brelse(bh); 8441da177e4SLinus Torvalds spin_lock(lock); 8451da177e4SLinus Torvalds } 8461da177e4SLinus Torvalds 8471da177e4SLinus Torvalds spin_unlock(lock); 8481da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list); 8491da177e4SLinus Torvalds if (err) 8501da177e4SLinus Torvalds return err; 8511da177e4SLinus Torvalds else 8521da177e4SLinus Torvalds return err2; 8531da177e4SLinus Torvalds } 8541da177e4SLinus Torvalds 8551da177e4SLinus Torvalds /* 8561da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are 8571da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already 8581da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list. 8591da177e4SLinus Torvalds * 860600f111eSMatthew Wilcox (Oracle) * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which 8611da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true 8621da177e4SLinus Torvalds * for reiserfs. 8631da177e4SLinus Torvalds */ 8641da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode) 8651da177e4SLinus Torvalds { 8661da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8671da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 868600f111eSMatthew Wilcox (Oracle) struct list_head *list = &mapping->i_private_list; 869600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data; 8701da177e4SLinus Torvalds 871600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock); 8721da177e4SLinus Torvalds while (!list_empty(list)) 8731da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next)); 874600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock); 8751da177e4SLinus Torvalds } 8761da177e4SLinus Torvalds } 87752b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers); 8781da177e4SLinus Torvalds 8791da177e4SLinus Torvalds /* 8801da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called 8811da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it. 8821da177e4SLinus Torvalds * 8831da177e4SLinus Torvalds * Returns true if all buffers were removed. 8841da177e4SLinus Torvalds */ 8851da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode) 8861da177e4SLinus Torvalds { 8871da177e4SLinus Torvalds int ret = 1; 8881da177e4SLinus Torvalds 8891da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 8901da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 891600f111eSMatthew Wilcox (Oracle) struct list_head *list = &mapping->i_private_list; 892600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data; 8931da177e4SLinus Torvalds 894600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock); 8951da177e4SLinus Torvalds while (!list_empty(list)) { 8961da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next); 8971da177e4SLinus Torvalds if (buffer_dirty(bh)) { 8981da177e4SLinus Torvalds ret = 0; 8991da177e4SLinus Torvalds break; 9001da177e4SLinus Torvalds } 9011da177e4SLinus Torvalds __remove_assoc_queue(bh); 9021da177e4SLinus Torvalds } 903600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock); 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds return ret; 9061da177e4SLinus Torvalds } 9071da177e4SLinus Torvalds 9081da177e4SLinus Torvalds /* 909c71124a8SPankaj Raghav * Create the appropriate buffers when given a folio for data area and 9101da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to 9111da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more 9121da177e4SLinus Torvalds * buffers. 9131da177e4SLinus Torvalds * 9141da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping) 9151da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations. 9161da177e4SLinus Torvalds */ 917c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 9182a418157SMatthew Wilcox (Oracle) gfp_t gfp) 9191da177e4SLinus Torvalds { 9201da177e4SLinus Torvalds struct buffer_head *bh, *head; 9211da177e4SLinus Torvalds long offset; 922b87d8cefSRoman Gushchin struct mem_cgroup *memcg, *old_memcg; 9231da177e4SLinus Torvalds 924c71124a8SPankaj Raghav /* The folio lock pins the memcg */ 925c71124a8SPankaj Raghav memcg = folio_memcg(folio); 926b87d8cefSRoman Gushchin old_memcg = set_active_memcg(memcg); 927f745c6f5SShakeel Butt 9281da177e4SLinus Torvalds head = NULL; 929c71124a8SPankaj Raghav offset = folio_size(folio); 9301da177e4SLinus Torvalds while ((offset -= size) >= 0) { 931640ab98fSJens Axboe bh = alloc_buffer_head(gfp); 9321da177e4SLinus Torvalds if (!bh) 9331da177e4SLinus Torvalds goto no_grow; 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds bh->b_this_page = head; 9361da177e4SLinus Torvalds bh->b_blocknr = -1; 9371da177e4SLinus Torvalds head = bh; 9381da177e4SLinus Torvalds 9391da177e4SLinus Torvalds bh->b_size = size; 9401da177e4SLinus Torvalds 941c71124a8SPankaj Raghav /* Link the buffer to its folio */ 942c71124a8SPankaj Raghav folio_set_bh(bh, folio, offset); 9431da177e4SLinus Torvalds } 944f745c6f5SShakeel Butt out: 945b87d8cefSRoman Gushchin set_active_memcg(old_memcg); 9461da177e4SLinus Torvalds return head; 9471da177e4SLinus Torvalds /* 9481da177e4SLinus Torvalds * In case anything failed, we just free everything we got. 9491da177e4SLinus Torvalds */ 9501da177e4SLinus Torvalds no_grow: 9511da177e4SLinus Torvalds if (head) { 9521da177e4SLinus Torvalds do { 9531da177e4SLinus Torvalds bh = head; 9541da177e4SLinus Torvalds head = head->b_this_page; 9551da177e4SLinus Torvalds free_buffer_head(bh); 9561da177e4SLinus Torvalds } while (head); 9571da177e4SLinus Torvalds } 9581da177e4SLinus Torvalds 959f745c6f5SShakeel Butt goto out; 9601da177e4SLinus Torvalds } 961c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers); 962c71124a8SPankaj Raghav 963c71124a8SPankaj Raghav struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 964c71124a8SPankaj Raghav bool retry) 965c71124a8SPankaj Raghav { 9662a418157SMatthew Wilcox (Oracle) gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 9672a418157SMatthew Wilcox (Oracle) if (retry) 9682a418157SMatthew Wilcox (Oracle) gfp |= __GFP_NOFAIL; 9692a418157SMatthew Wilcox (Oracle) 9702a418157SMatthew Wilcox (Oracle) return folio_alloc_buffers(page_folio(page), size, gfp); 971c71124a8SPankaj Raghav } 9721da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers); 9731da177e4SLinus Torvalds 97408d84addSMatthew Wilcox (Oracle) static inline void link_dev_buffers(struct folio *folio, 97508d84addSMatthew Wilcox (Oracle) struct buffer_head *head) 9761da177e4SLinus Torvalds { 9771da177e4SLinus Torvalds struct buffer_head *bh, *tail; 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds bh = head; 9801da177e4SLinus Torvalds do { 9811da177e4SLinus Torvalds tail = bh; 9821da177e4SLinus Torvalds bh = bh->b_this_page; 9831da177e4SLinus Torvalds } while (bh); 9841da177e4SLinus Torvalds tail->b_this_page = head; 98508d84addSMatthew Wilcox (Oracle) folio_attach_private(folio, head); 9861da177e4SLinus Torvalds } 9871da177e4SLinus Torvalds 988bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 989bbec0270SLinus Torvalds { 990bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0); 991b86058f9SChristoph Hellwig loff_t sz = bdev_nr_bytes(bdev); 992bbec0270SLinus Torvalds 993bbec0270SLinus Torvalds if (sz) { 994bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size); 995bbec0270SLinus Torvalds retval = (sz >> sizebits); 996bbec0270SLinus Torvalds } 997bbec0270SLinus Torvalds return retval; 998bbec0270SLinus Torvalds } 999bbec0270SLinus Torvalds 10001da177e4SLinus Torvalds /* 10016f24ce6bSMatthew Wilcox (Oracle) * Initialise the state of a blockdev folio's buffers. 10021da177e4SLinus Torvalds */ 10036f24ce6bSMatthew Wilcox (Oracle) static sector_t folio_init_buffers(struct folio *folio, 1004382497adSMatthew Wilcox (Oracle) struct block_device *bdev, unsigned size) 10051da177e4SLinus Torvalds { 10066f24ce6bSMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio); 10071da177e4SLinus Torvalds struct buffer_head *bh = head; 10086f24ce6bSMatthew Wilcox (Oracle) bool uptodate = folio_test_uptodate(folio); 1009382497adSMatthew Wilcox (Oracle) sector_t block = div_u64(folio_pos(folio), size); 1010bcd1d063SChristoph Hellwig sector_t end_block = blkdev_max_block(bdev, size); 10111da177e4SLinus Torvalds 10121da177e4SLinus Torvalds do { 10131da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 101401950a34SEric Biggers bh->b_end_io = NULL; 101501950a34SEric Biggers bh->b_private = NULL; 10161da177e4SLinus Torvalds bh->b_bdev = bdev; 10171da177e4SLinus Torvalds bh->b_blocknr = block; 10181da177e4SLinus Torvalds if (uptodate) 10191da177e4SLinus Torvalds set_buffer_uptodate(bh); 1020080399aaSJeff Moyer if (block < end_block) 10211da177e4SLinus Torvalds set_buffer_mapped(bh); 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds block++; 10241da177e4SLinus Torvalds bh = bh->b_this_page; 10251da177e4SLinus Torvalds } while (bh != head); 1026676ce6d5SHugh Dickins 1027676ce6d5SHugh Dickins /* 1028676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device. 1029676ce6d5SHugh Dickins */ 1030676ce6d5SHugh Dickins return end_block; 10311da177e4SLinus Torvalds } 10321da177e4SLinus Torvalds 10331da177e4SLinus Torvalds /* 10346d840a18SMatthew Wilcox (Oracle) * Create the page-cache folio that contains the requested block. 10351da177e4SLinus Torvalds * 1036676ce6d5SHugh Dickins * This is used purely for blockdev mappings. 10376d840a18SMatthew Wilcox (Oracle) * 1038bcd30d4cSMatthew Wilcox (Oracle) * Returns false if we have a failure which cannot be cured by retrying 1039bcd30d4cSMatthew Wilcox (Oracle) * without sleeping. Returns true if we succeeded, or the caller should retry. 10401da177e4SLinus Torvalds */ 10416d840a18SMatthew Wilcox (Oracle) static bool grow_dev_folio(struct block_device *bdev, sector_t block, 1042382497adSMatthew Wilcox (Oracle) pgoff_t index, unsigned size, gfp_t gfp) 10431da177e4SLinus Torvalds { 10441da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode; 10453c98a41cSMatthew Wilcox (Oracle) struct folio *folio; 10461da177e4SLinus Torvalds struct buffer_head *bh; 10476d840a18SMatthew Wilcox (Oracle) sector_t end_block = 0; 104884235de3SJohannes Weiner 10493c98a41cSMatthew Wilcox (Oracle) folio = __filemap_get_folio(inode->i_mapping, index, 10503ed65f04SMatthew Wilcox (Oracle) FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 10513ed65f04SMatthew Wilcox (Oracle) if (IS_ERR(folio)) 10526d840a18SMatthew Wilcox (Oracle) return false; 10531da177e4SLinus Torvalds 10543c98a41cSMatthew Wilcox (Oracle) bh = folio_buffers(folio); 10553c98a41cSMatthew Wilcox (Oracle) if (bh) { 10561da177e4SLinus Torvalds if (bh->b_size == size) { 1057382497adSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev, size); 10586d840a18SMatthew Wilcox (Oracle) goto unlock; 10591da177e4SLinus Torvalds } 10601da177e4SLinus Torvalds 1061bcd30d4cSMatthew Wilcox (Oracle) /* 1062bcd30d4cSMatthew Wilcox (Oracle) * Retrying may succeed; for example the folio may finish 1063bcd30d4cSMatthew Wilcox (Oracle) * writeback, or buffers may be cleaned. This should not 1064bcd30d4cSMatthew Wilcox (Oracle) * happen very often; maybe we have old buffers attached to 1065bcd30d4cSMatthew Wilcox (Oracle) * this blockdev's page cache and we're trying to change 1066bcd30d4cSMatthew Wilcox (Oracle) * the block size? 1067bcd30d4cSMatthew Wilcox (Oracle) */ 1068bcd30d4cSMatthew Wilcox (Oracle) if (!try_to_free_buffers(folio)) { 10696d840a18SMatthew Wilcox (Oracle) end_block = ~0ULL; 10706d840a18SMatthew Wilcox (Oracle) goto unlock; 10716d840a18SMatthew Wilcox (Oracle) } 1072bcd30d4cSMatthew Wilcox (Oracle) } 10736d840a18SMatthew Wilcox (Oracle) 10743ed65f04SMatthew Wilcox (Oracle) bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT); 10753ed65f04SMatthew Wilcox (Oracle) if (!bh) 10766d840a18SMatthew Wilcox (Oracle) goto unlock; 10771da177e4SLinus Torvalds 10781da177e4SLinus Torvalds /* 10793c98a41cSMatthew Wilcox (Oracle) * Link the folio to the buffers and initialise them. Take the 10801da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not 10813c98a41cSMatthew Wilcox (Oracle) * run under the folio lock. 10821da177e4SLinus Torvalds */ 1083600f111eSMatthew Wilcox (Oracle) spin_lock(&inode->i_mapping->i_private_lock); 108408d84addSMatthew Wilcox (Oracle) link_dev_buffers(folio, bh); 1085382497adSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev, size); 1086600f111eSMatthew Wilcox (Oracle) spin_unlock(&inode->i_mapping->i_private_lock); 10876d840a18SMatthew Wilcox (Oracle) unlock: 10883c98a41cSMatthew Wilcox (Oracle) folio_unlock(folio); 10893c98a41cSMatthew Wilcox (Oracle) folio_put(folio); 10906d840a18SMatthew Wilcox (Oracle) return block < end_block; 10911da177e4SLinus Torvalds } 10921da177e4SLinus Torvalds 10931da177e4SLinus Torvalds /* 10946d840a18SMatthew Wilcox (Oracle) * Create buffers for the specified block device block's folio. If 10956d840a18SMatthew Wilcox (Oracle) * that folio was dirty, the buffers are set dirty also. Returns false 10966d840a18SMatthew Wilcox (Oracle) * if we've hit a permanent error. 10971da177e4SLinus Torvalds */ 10986d840a18SMatthew Wilcox (Oracle) static bool grow_buffers(struct block_device *bdev, sector_t block, 10996d840a18SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp) 11001da177e4SLinus Torvalds { 11015f3bd90dSMatthew Wilcox (Oracle) loff_t pos; 11021da177e4SLinus Torvalds 1103e5657933SAndrew Morton /* 11045f3bd90dSMatthew Wilcox (Oracle) * Check for a block which lies outside our maximum possible 11055f3bd90dSMatthew Wilcox (Oracle) * pagecache index. 1106e5657933SAndrew Morton */ 11075f3bd90dSMatthew Wilcox (Oracle) if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) { 11085f3bd90dSMatthew Wilcox (Oracle) printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n", 11098e24eea7SHarvey Harrison __func__, (unsigned long long)block, 1110a1c6f057SDmitry Monakhov bdev); 11116d840a18SMatthew Wilcox (Oracle) return false; 1112e5657933SAndrew Morton } 1113676ce6d5SHugh Dickins 11146d840a18SMatthew Wilcox (Oracle) /* Create a folio with the proper size buffers */ 11155f3bd90dSMatthew Wilcox (Oracle) return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp); 11161da177e4SLinus Torvalds } 11171da177e4SLinus Torvalds 11180026ba40SEric Biggers static struct buffer_head * 11193b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block, 11203b5e6454SGioh Kim unsigned size, gfp_t gfp) 11211da177e4SLinus Torvalds { 11221da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */ 1123e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 11241da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) { 11251da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n", 11261da177e4SLinus Torvalds size); 1127e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n", 1128e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev)); 11291da177e4SLinus Torvalds 11301da177e4SLinus Torvalds dump_stack(); 11311da177e4SLinus Torvalds return NULL; 11321da177e4SLinus Torvalds } 11331da177e4SLinus Torvalds 1134676ce6d5SHugh Dickins for (;;) { 1135676ce6d5SHugh Dickins struct buffer_head *bh; 1136676ce6d5SHugh Dickins 11371da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size); 11381da177e4SLinus Torvalds if (bh) 11391da177e4SLinus Torvalds return bh; 11401da177e4SLinus Torvalds 11416d840a18SMatthew Wilcox (Oracle) if (!grow_buffers(bdev, block, size, gfp)) 114291f68c89SJeff Moyer return NULL; 1143676ce6d5SHugh Dickins } 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds 11461da177e4SLinus Torvalds /* 11471da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages: 11481da177e4SLinus Torvalds * 11491da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1150ec82e1c1SMatthew Wilcox * the page is tagged dirty in the page cache. 11511da177e4SLinus Torvalds * 11521da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of 11531da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is 11541da177e4SLinus Torvalds * merely a hint about the true dirty state. 11551da177e4SLinus Torvalds * 11561da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty 11571da177e4SLinus Torvalds * (if the page has buffers). 11581da177e4SLinus Torvalds * 11591da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other 11601da177e4SLinus Torvalds * buffers are not. 11611da177e4SLinus Torvalds * 11621da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they 11631da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not 11641da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent 11652c69e205SMatthew Wilcox (Oracle) * block_read_full_folio() against that folio will discover all the uptodate 11662c69e205SMatthew Wilcox (Oracle) * buffers, will set the folio uptodate and will perform no I/O. 11671da177e4SLinus Torvalds */ 11681da177e4SLinus Torvalds 11691da177e4SLinus Torvalds /** 11701da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout 117167be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty 11721da177e4SLinus Torvalds * 1173ec82e1c1SMatthew Wilcox * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1174ec82e1c1SMatthew Wilcox * its backing page dirty, then tag the page as dirty in the page cache 1175ec82e1c1SMatthew Wilcox * and then attach the address_space's inode to its superblock's dirty 11761da177e4SLinus Torvalds * inode list. 11771da177e4SLinus Torvalds * 1178600f111eSMatthew Wilcox (Oracle) * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock, 1179b93b0163SMatthew Wilcox * i_pages lock and mapping->host->i_lock. 11801da177e4SLinus Torvalds */ 1181fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh) 11821da177e4SLinus Torvalds { 1183787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh)); 11841be62dc1SLinus Torvalds 11855305cb83STejun Heo trace_block_dirty_buffer(bh); 11865305cb83STejun Heo 11871be62dc1SLinus Torvalds /* 11881be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case. 11891be62dc1SLinus Torvalds * 11901be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we 11911be62dc1SLinus Torvalds * perhaps modified the buffer. 11921be62dc1SLinus Torvalds */ 11931be62dc1SLinus Torvalds if (buffer_dirty(bh)) { 11941be62dc1SLinus Torvalds smp_mb(); 11951be62dc1SLinus Torvalds if (buffer_dirty(bh)) 11961be62dc1SLinus Torvalds return; 11971be62dc1SLinus Torvalds } 11981be62dc1SLinus Torvalds 1199a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) { 1200cf1d3417SMatthew Wilcox (Oracle) struct folio *folio = bh->b_folio; 1201c4843a75SGreg Thelen struct address_space *mapping = NULL; 1202c4843a75SGreg Thelen 1203cf1d3417SMatthew Wilcox (Oracle) folio_memcg_lock(folio); 1204cf1d3417SMatthew Wilcox (Oracle) if (!folio_test_set_dirty(folio)) { 1205cf1d3417SMatthew Wilcox (Oracle) mapping = folio->mapping; 12068e9d78edSLinus Torvalds if (mapping) 1207cf1d3417SMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 0); 12088e9d78edSLinus Torvalds } 1209cf1d3417SMatthew Wilcox (Oracle) folio_memcg_unlock(folio); 1210c4843a75SGreg Thelen if (mapping) 1211c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1212a8e7d49aSLinus Torvalds } 12131da177e4SLinus Torvalds } 12141fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty); 12151da177e4SLinus Torvalds 121687354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh) 121787354e5dSJeff Layton { 121887354e5dSJeff Layton set_buffer_write_io_error(bh); 121987354e5dSJeff Layton /* FIXME: do we need to set this in both places? */ 1220abc8a8a2SMatthew Wilcox (Oracle) if (bh->b_folio && bh->b_folio->mapping) 1221abc8a8a2SMatthew Wilcox (Oracle) mapping_set_error(bh->b_folio->mapping, -EIO); 12224b2201daSChristoph Hellwig if (bh->b_assoc_map) { 122387354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO); 12244b2201daSChristoph Hellwig errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO); 12254b2201daSChristoph Hellwig } 122687354e5dSJeff Layton } 122787354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error); 122887354e5dSJeff Layton 122966924fdaSMatthew Wilcox (Oracle) /** 123066924fdaSMatthew Wilcox (Oracle) * __brelse - Release a buffer. 123166924fdaSMatthew Wilcox (Oracle) * @bh: The buffer to release. 123266924fdaSMatthew Wilcox (Oracle) * 123366924fdaSMatthew Wilcox (Oracle) * This variant of brelse() can be called if @bh is guaranteed to not be NULL. 12341da177e4SLinus Torvalds */ 123566924fdaSMatthew Wilcox (Oracle) void __brelse(struct buffer_head *bh) 12361da177e4SLinus Torvalds { 123766924fdaSMatthew Wilcox (Oracle) if (atomic_read(&bh->b_count)) { 123866924fdaSMatthew Wilcox (Oracle) put_bh(bh); 12391da177e4SLinus Torvalds return; 12401da177e4SLinus Torvalds } 12415c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 12421da177e4SLinus Torvalds } 12431fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse); 12441da177e4SLinus Torvalds 1245*b73a936fSMatthew Wilcox (Oracle) /** 1246*b73a936fSMatthew Wilcox (Oracle) * __bforget - Discard any dirty data in a buffer. 1247*b73a936fSMatthew Wilcox (Oracle) * @bh: The buffer to forget. 1248*b73a936fSMatthew Wilcox (Oracle) * 1249*b73a936fSMatthew Wilcox (Oracle) * This variant of bforget() can be called if @bh is guaranteed to not 1250*b73a936fSMatthew Wilcox (Oracle) * be NULL. 12511da177e4SLinus Torvalds */ 12521da177e4SLinus Torvalds void __bforget(struct buffer_head *bh) 12531da177e4SLinus Torvalds { 12541da177e4SLinus Torvalds clear_buffer_dirty(bh); 1255535ee2fbSJan Kara if (bh->b_assoc_map) { 1256abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping; 12571da177e4SLinus Torvalds 1258600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock); 12591da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 126058ff407bSJan Kara bh->b_assoc_map = NULL; 1261600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock); 12621da177e4SLinus Torvalds } 12631da177e4SLinus Torvalds __brelse(bh); 12641da177e4SLinus Torvalds } 12651fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget); 12661da177e4SLinus Torvalds 12671da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh) 12681da177e4SLinus Torvalds { 12691da177e4SLinus Torvalds lock_buffer(bh); 12701da177e4SLinus Torvalds if (buffer_uptodate(bh)) { 12711da177e4SLinus Torvalds unlock_buffer(bh); 12721da177e4SLinus Torvalds return bh; 12731da177e4SLinus Torvalds } else { 12741da177e4SLinus Torvalds get_bh(bh); 12751da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync; 12761420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh); 12771da177e4SLinus Torvalds wait_on_buffer(bh); 12781da177e4SLinus Torvalds if (buffer_uptodate(bh)) 12791da177e4SLinus Torvalds return bh; 12801da177e4SLinus Torvalds } 12811da177e4SLinus Torvalds brelse(bh); 12821da177e4SLinus Torvalds return NULL; 12831da177e4SLinus Torvalds } 12841da177e4SLinus Torvalds 12851da177e4SLinus Torvalds /* 12861da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 12871da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 12881da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear 12891da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple 12901da177e4SLinus Torvalds * CPU's LRUs at the same time. 12911da177e4SLinus Torvalds * 12921da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and 12931da177e4SLinus Torvalds * sb_find_get_block(). 12941da177e4SLinus Torvalds * 12951da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use 12961da177e4SLinus Torvalds * a local interrupt disable for that. 12971da177e4SLinus Torvalds */ 12981da177e4SLinus Torvalds 129986cf78d7SSebastien Buisson #define BH_LRU_SIZE 16 13001da177e4SLinus Torvalds 13011da177e4SLinus Torvalds struct bh_lru { 13021da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 13031da177e4SLinus Torvalds }; 13041da177e4SLinus Torvalds 13051da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 13061da177e4SLinus Torvalds 13071da177e4SLinus Torvalds #ifdef CONFIG_SMP 13081da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable() 13091da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable() 13101da177e4SLinus Torvalds #else 13111da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable() 13121da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable() 13131da177e4SLinus Torvalds #endif 13141da177e4SLinus Torvalds 13151da177e4SLinus Torvalds static inline void check_irqs_on(void) 13161da177e4SLinus Torvalds { 13171da177e4SLinus Torvalds #ifdef irqs_disabled 13181da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 13191da177e4SLinus Torvalds #endif 13201da177e4SLinus Torvalds } 13211da177e4SLinus Torvalds 13221da177e4SLinus Torvalds /* 1323241f01fbSEric Biggers * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1324241f01fbSEric Biggers * inserted at the front, and the buffer_head at the back if any is evicted. 1325241f01fbSEric Biggers * Or, if already in the LRU it is moved to the front. 13261da177e4SLinus Torvalds */ 13271da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh) 13281da177e4SLinus Torvalds { 1329241f01fbSEric Biggers struct buffer_head *evictee = bh; 1330241f01fbSEric Biggers struct bh_lru *b; 1331241f01fbSEric Biggers int i; 13321da177e4SLinus Torvalds 13331da177e4SLinus Torvalds check_irqs_on(); 1334c0226eb8SMinchan Kim bh_lru_lock(); 1335c0226eb8SMinchan Kim 13368cc621d2SMinchan Kim /* 13378cc621d2SMinchan Kim * the refcount of buffer_head in bh_lru prevents dropping the 13388cc621d2SMinchan Kim * attached page(i.e., try_to_free_buffers) so it could cause 13398cc621d2SMinchan Kim * failing page migration. 13408cc621d2SMinchan Kim * Skip putting upcoming bh into bh_lru until migration is done. 13418cc621d2SMinchan Kim */ 13428a237adfSMarcelo Tosatti if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) { 1343c0226eb8SMinchan Kim bh_lru_unlock(); 13448cc621d2SMinchan Kim return; 1345c0226eb8SMinchan Kim } 1346241f01fbSEric Biggers 1347241f01fbSEric Biggers b = this_cpu_ptr(&bh_lrus); 1348241f01fbSEric Biggers for (i = 0; i < BH_LRU_SIZE; i++) { 1349241f01fbSEric Biggers swap(evictee, b->bhs[i]); 1350241f01fbSEric Biggers if (evictee == bh) { 1351241f01fbSEric Biggers bh_lru_unlock(); 1352241f01fbSEric Biggers return; 1353241f01fbSEric Biggers } 1354241f01fbSEric Biggers } 13551da177e4SLinus Torvalds 13561da177e4SLinus Torvalds get_bh(bh); 13571da177e4SLinus Torvalds bh_lru_unlock(); 1358241f01fbSEric Biggers brelse(evictee); 13591da177e4SLinus Torvalds } 13601da177e4SLinus Torvalds 13611da177e4SLinus Torvalds /* 13621da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head. 13631da177e4SLinus Torvalds */ 1364858119e1SArjan van de Ven static struct buffer_head * 13653991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 13661da177e4SLinus Torvalds { 13671da177e4SLinus Torvalds struct buffer_head *ret = NULL; 13683991d3bdSTomasz Kvarsin unsigned int i; 13691da177e4SLinus Torvalds 13701da177e4SLinus Torvalds check_irqs_on(); 13711da177e4SLinus Torvalds bh_lru_lock(); 13728a237adfSMarcelo Tosatti if (cpu_is_isolated(smp_processor_id())) { 13738a237adfSMarcelo Tosatti bh_lru_unlock(); 13748a237adfSMarcelo Tosatti return NULL; 13758a237adfSMarcelo Tosatti } 13761da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 1377c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 13781da177e4SLinus Torvalds 13799470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 13809470dd5dSZach Brown bh->b_size == size) { 13811da177e4SLinus Torvalds if (i) { 13821da177e4SLinus Torvalds while (i) { 1383c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i], 1384c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1])); 13851da177e4SLinus Torvalds i--; 13861da177e4SLinus Torvalds } 1387c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh); 13881da177e4SLinus Torvalds } 13891da177e4SLinus Torvalds get_bh(bh); 13901da177e4SLinus Torvalds ret = bh; 13911da177e4SLinus Torvalds break; 13921da177e4SLinus Torvalds } 13931da177e4SLinus Torvalds } 13941da177e4SLinus Torvalds bh_lru_unlock(); 13951da177e4SLinus Torvalds return ret; 13961da177e4SLinus Torvalds } 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds /* 13991da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh 14001da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return 14011da177e4SLinus Torvalds * NULL 14021da177e4SLinus Torvalds */ 14031da177e4SLinus Torvalds struct buffer_head * 14043991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 14051da177e4SLinus Torvalds { 14061da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 14071da177e4SLinus Torvalds 14081da177e4SLinus Torvalds if (bh == NULL) { 14092457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */ 1410385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block); 14111da177e4SLinus Torvalds if (bh) 14121da177e4SLinus Torvalds bh_lru_install(bh); 14132457aec6SMel Gorman } else 14141da177e4SLinus Torvalds touch_buffer(bh); 14152457aec6SMel Gorman 14161da177e4SLinus Torvalds return bh; 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block); 14191da177e4SLinus Torvalds 14203ed65f04SMatthew Wilcox (Oracle) /** 14213ed65f04SMatthew Wilcox (Oracle) * bdev_getblk - Get a buffer_head in a block device's buffer cache. 14223ed65f04SMatthew Wilcox (Oracle) * @bdev: The block device. 14233ed65f04SMatthew Wilcox (Oracle) * @block: The block number. 14243ed65f04SMatthew Wilcox (Oracle) * @size: The size of buffer_heads for this @bdev. 14253ed65f04SMatthew Wilcox (Oracle) * @gfp: The memory allocation flags to use. 14263ed65f04SMatthew Wilcox (Oracle) * 14273ed65f04SMatthew Wilcox (Oracle) * Return: The buffer head, or NULL if memory could not be allocated. 14283ed65f04SMatthew Wilcox (Oracle) */ 14293ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, 14303ed65f04SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp) 14313ed65f04SMatthew Wilcox (Oracle) { 14323ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bh = __find_get_block(bdev, block, size); 14333ed65f04SMatthew Wilcox (Oracle) 14343ed65f04SMatthew Wilcox (Oracle) might_alloc(gfp); 14353ed65f04SMatthew Wilcox (Oracle) if (bh) 14363ed65f04SMatthew Wilcox (Oracle) return bh; 14373ed65f04SMatthew Wilcox (Oracle) 14383ed65f04SMatthew Wilcox (Oracle) return __getblk_slow(bdev, block, size, gfp); 14393ed65f04SMatthew Wilcox (Oracle) } 14403ed65f04SMatthew Wilcox (Oracle) EXPORT_SYMBOL(bdev_getblk); 14413ed65f04SMatthew Wilcox (Oracle) 14421da177e4SLinus Torvalds /* 14431da177e4SLinus Torvalds * Do async read-ahead on a buffer.. 14441da177e4SLinus Torvalds */ 14453991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 14461da177e4SLinus Torvalds { 1447775d9b10SMatthew Wilcox (Oracle) struct buffer_head *bh = bdev_getblk(bdev, block, size, 1448775d9b10SMatthew Wilcox (Oracle) GFP_NOWAIT | __GFP_MOVABLE); 1449775d9b10SMatthew Wilcox (Oracle) 1450a3e713b5SAndrew Morton if (likely(bh)) { 1451e7ea1129SZhang Yi bh_readahead(bh, REQ_RAHEAD); 14521da177e4SLinus Torvalds brelse(bh); 14531da177e4SLinus Torvalds } 1454a3e713b5SAndrew Morton } 14551da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead); 14561da177e4SLinus Torvalds 14571da177e4SLinus Torvalds /** 1458324ecaeeSMatthew Wilcox (Oracle) * __bread_gfp() - Read a block. 1459324ecaeeSMatthew Wilcox (Oracle) * @bdev: The block device to read from. 1460324ecaeeSMatthew Wilcox (Oracle) * @block: Block number in units of block size. 1461324ecaeeSMatthew Wilcox (Oracle) * @size: The block size of this device in bytes. 1462324ecaeeSMatthew Wilcox (Oracle) * @gfp: Not page allocation flags; see below. 14631da177e4SLinus Torvalds * 1464324ecaeeSMatthew Wilcox (Oracle) * You are not expected to call this function. You should use one of 1465324ecaeeSMatthew Wilcox (Oracle) * sb_bread(), sb_bread_unmovable() or __bread(). 1466324ecaeeSMatthew Wilcox (Oracle) * 1467324ecaeeSMatthew Wilcox (Oracle) * Read a specified block, and return the buffer head that refers to it. 1468324ecaeeSMatthew Wilcox (Oracle) * If @gfp is 0, the memory will be allocated using the block device's 1469324ecaeeSMatthew Wilcox (Oracle) * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be 1470324ecaeeSMatthew Wilcox (Oracle) * allocated from a movable area. Do not pass in a complete set of 1471324ecaeeSMatthew Wilcox (Oracle) * GFP flags. 1472324ecaeeSMatthew Wilcox (Oracle) * 1473324ecaeeSMatthew Wilcox (Oracle) * The returned buffer head has its refcount increased. The caller should 1474324ecaeeSMatthew Wilcox (Oracle) * call brelse() when it has finished with the buffer. 1475324ecaeeSMatthew Wilcox (Oracle) * 1476324ecaeeSMatthew Wilcox (Oracle) * Context: May sleep waiting for I/O. 1477324ecaeeSMatthew Wilcox (Oracle) * Return: NULL if the block was unreadable. 14781da177e4SLinus Torvalds */ 1479324ecaeeSMatthew Wilcox (Oracle) struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block, 14803b5e6454SGioh Kim unsigned size, gfp_t gfp) 14811da177e4SLinus Torvalds { 148293b13ecaSMatthew Wilcox (Oracle) struct buffer_head *bh; 148393b13ecaSMatthew Wilcox (Oracle) 148493b13ecaSMatthew Wilcox (Oracle) gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); 148593b13ecaSMatthew Wilcox (Oracle) 148693b13ecaSMatthew Wilcox (Oracle) /* 148793b13ecaSMatthew Wilcox (Oracle) * Prefer looping in the allocator rather than here, at least that 148893b13ecaSMatthew Wilcox (Oracle) * code knows what it's doing. 148993b13ecaSMatthew Wilcox (Oracle) */ 149093b13ecaSMatthew Wilcox (Oracle) gfp |= __GFP_NOFAIL; 149193b13ecaSMatthew Wilcox (Oracle) 149293b13ecaSMatthew Wilcox (Oracle) bh = bdev_getblk(bdev, block, size, gfp); 14931da177e4SLinus Torvalds 1494a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh)) 14951da177e4SLinus Torvalds bh = __bread_slow(bh); 14961da177e4SLinus Torvalds return bh; 14971da177e4SLinus Torvalds } 14983b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp); 14991da177e4SLinus Torvalds 15008cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b) 15018cc621d2SMinchan Kim { 15028cc621d2SMinchan Kim int i; 15038cc621d2SMinchan Kim 15048cc621d2SMinchan Kim for (i = 0; i < BH_LRU_SIZE; i++) { 15058cc621d2SMinchan Kim brelse(b->bhs[i]); 15068cc621d2SMinchan Kim b->bhs[i] = NULL; 15078cc621d2SMinchan Kim } 15088cc621d2SMinchan Kim } 15091da177e4SLinus Torvalds /* 15101da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount. 15111da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq 15121da177e4SLinus Torvalds * or with preempt disabled. 15131da177e4SLinus Torvalds */ 15141da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg) 15151da177e4SLinus Torvalds { 15161da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus); 15171da177e4SLinus Torvalds 15188cc621d2SMinchan Kim __invalidate_bh_lrus(b); 15191da177e4SLinus Torvalds put_cpu_var(bh_lrus); 15201da177e4SLinus Torvalds } 15211da177e4SLinus Torvalds 15228cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy) 152342be35d0SGilad Ben-Yossef { 152442be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 152542be35d0SGilad Ben-Yossef int i; 152642be35d0SGilad Ben-Yossef 152742be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) { 152842be35d0SGilad Ben-Yossef if (b->bhs[i]) 15291d706679SSaurav Girepunje return true; 153042be35d0SGilad Ben-Yossef } 153142be35d0SGilad Ben-Yossef 15321d706679SSaurav Girepunje return false; 153342be35d0SGilad Ben-Yossef } 153442be35d0SGilad Ben-Yossef 1535f9a14399SPeter Zijlstra void invalidate_bh_lrus(void) 15361da177e4SLinus Torvalds { 1537cb923159SSebastian Andrzej Siewior on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 15381da177e4SLinus Torvalds } 15399db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 15401da177e4SLinus Torvalds 1541243418e3SMinchan Kim /* 1542243418e3SMinchan Kim * It's called from workqueue context so we need a bh_lru_lock to close 1543243418e3SMinchan Kim * the race with preemption/irq. 1544243418e3SMinchan Kim */ 1545243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void) 15468cc621d2SMinchan Kim { 15478cc621d2SMinchan Kim struct bh_lru *b; 15488cc621d2SMinchan Kim 15498cc621d2SMinchan Kim bh_lru_lock(); 1550243418e3SMinchan Kim b = this_cpu_ptr(&bh_lrus); 15518cc621d2SMinchan Kim __invalidate_bh_lrus(b); 15528cc621d2SMinchan Kim bh_lru_unlock(); 15538cc621d2SMinchan Kim } 15548cc621d2SMinchan Kim 1555465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1556465e5e6aSPankaj Raghav unsigned long offset) 1557465e5e6aSPankaj Raghav { 1558465e5e6aSPankaj Raghav bh->b_folio = folio; 1559465e5e6aSPankaj Raghav BUG_ON(offset >= folio_size(folio)); 1560465e5e6aSPankaj Raghav if (folio_test_highmem(folio)) 1561465e5e6aSPankaj Raghav /* 1562465e5e6aSPankaj Raghav * This catches illegal uses and preserves the offset: 1563465e5e6aSPankaj Raghav */ 1564465e5e6aSPankaj Raghav bh->b_data = (char *)(0 + offset); 1565465e5e6aSPankaj Raghav else 1566465e5e6aSPankaj Raghav bh->b_data = folio_address(folio) + offset; 1567465e5e6aSPankaj Raghav } 1568465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh); 1569465e5e6aSPankaj Raghav 15701da177e4SLinus Torvalds /* 15711da177e4SLinus Torvalds * Called when truncating a buffer on a page completely. 15721da177e4SLinus Torvalds */ 1573e7470ee8SMel Gorman 1574e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */ 1575e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \ 1576e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1577e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten) 1578e7470ee8SMel Gorman 1579858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh) 15801da177e4SLinus Torvalds { 1581b0192296SUros Bizjak unsigned long b_state; 1582e7470ee8SMel Gorman 15831da177e4SLinus Torvalds lock_buffer(bh); 15841da177e4SLinus Torvalds clear_buffer_dirty(bh); 15851da177e4SLinus Torvalds bh->b_bdev = NULL; 1586b0192296SUros Bizjak b_state = READ_ONCE(bh->b_state); 1587b0192296SUros Bizjak do { 1588b0192296SUros Bizjak } while (!try_cmpxchg(&bh->b_state, &b_state, 1589b0192296SUros Bizjak b_state & ~BUFFER_FLAGS_DISCARD)); 15901da177e4SLinus Torvalds unlock_buffer(bh); 15911da177e4SLinus Torvalds } 15921da177e4SLinus Torvalds 15931da177e4SLinus Torvalds /** 15947ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 15957ba13abbSMatthew Wilcox (Oracle) * @folio: The folio which is affected. 1596d47992f8SLukas Czerner * @offset: start of the range to invalidate 1597d47992f8SLukas Czerner * @length: length of the range to invalidate 15981da177e4SLinus Torvalds * 15997ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() is called when all or part of the folio has been 16001da177e4SLinus Torvalds * invalidated by a truncate operation. 16011da177e4SLinus Torvalds * 16027ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() does not have to release all buffers, but it must 16031da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O 16041da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation 16051da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those 16061da177e4SLinus Torvalds * blocks on-disk. 16071da177e4SLinus Torvalds */ 16087ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 16091da177e4SLinus Torvalds { 16101da177e4SLinus Torvalds struct buffer_head *head, *bh, *next; 16117ba13abbSMatthew Wilcox (Oracle) size_t curr_off = 0; 16127ba13abbSMatthew Wilcox (Oracle) size_t stop = length + offset; 16131da177e4SLinus Torvalds 16147ba13abbSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 16151da177e4SLinus Torvalds 1616d47992f8SLukas Czerner /* 1617d47992f8SLukas Czerner * Check for overflow 1618d47992f8SLukas Czerner */ 16197ba13abbSMatthew Wilcox (Oracle) BUG_ON(stop > folio_size(folio) || stop < length); 1620d47992f8SLukas Czerner 16217ba13abbSMatthew Wilcox (Oracle) head = folio_buffers(folio); 16227ba13abbSMatthew Wilcox (Oracle) if (!head) 16237ba13abbSMatthew Wilcox (Oracle) return; 16247ba13abbSMatthew Wilcox (Oracle) 16251da177e4SLinus Torvalds bh = head; 16261da177e4SLinus Torvalds do { 16277ba13abbSMatthew Wilcox (Oracle) size_t next_off = curr_off + bh->b_size; 16281da177e4SLinus Torvalds next = bh->b_this_page; 16291da177e4SLinus Torvalds 16301da177e4SLinus Torvalds /* 1631d47992f8SLukas Czerner * Are we still fully in range ? 1632d47992f8SLukas Czerner */ 1633d47992f8SLukas Czerner if (next_off > stop) 1634d47992f8SLukas Czerner goto out; 1635d47992f8SLukas Czerner 1636d47992f8SLukas Czerner /* 16371da177e4SLinus Torvalds * is this block fully invalidated? 16381da177e4SLinus Torvalds */ 16391da177e4SLinus Torvalds if (offset <= curr_off) 16401da177e4SLinus Torvalds discard_buffer(bh); 16411da177e4SLinus Torvalds curr_off = next_off; 16421da177e4SLinus Torvalds bh = next; 16431da177e4SLinus Torvalds } while (bh != head); 16441da177e4SLinus Torvalds 16451da177e4SLinus Torvalds /* 16467ba13abbSMatthew Wilcox (Oracle) * We release buffers only if the entire folio is being invalidated. 16471da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated, 16481da177e4SLinus Torvalds * so real IO is not possible anymore. 16491da177e4SLinus Torvalds */ 16507ba13abbSMatthew Wilcox (Oracle) if (length == folio_size(folio)) 16517ba13abbSMatthew Wilcox (Oracle) filemap_release_folio(folio, 0); 16521da177e4SLinus Torvalds out: 16532ff28e22SNeilBrown return; 16541da177e4SLinus Torvalds } 16557ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio); 16561da177e4SLinus Torvalds 16571da177e4SLinus Torvalds /* 16581da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt 1659600f111eSMatthew Wilcox (Oracle) * block_dirty_folio() via i_private_lock. try_to_free_buffers 16608e2e1756SPankaj Raghav * is already excluded via the folio lock. 16611da177e4SLinus Torvalds */ 16620a88810dSMatthew Wilcox (Oracle) struct buffer_head *create_empty_buffers(struct folio *folio, 16633decb856SMatthew Wilcox (Oracle) unsigned long blocksize, unsigned long b_state) 16641da177e4SLinus Torvalds { 16651da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail; 16662a418157SMatthew Wilcox (Oracle) gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL; 16671da177e4SLinus Torvalds 16682a418157SMatthew Wilcox (Oracle) head = folio_alloc_buffers(folio, blocksize, gfp); 16691da177e4SLinus Torvalds bh = head; 16701da177e4SLinus Torvalds do { 16711da177e4SLinus Torvalds bh->b_state |= b_state; 16721da177e4SLinus Torvalds tail = bh; 16731da177e4SLinus Torvalds bh = bh->b_this_page; 16741da177e4SLinus Torvalds } while (bh); 16751da177e4SLinus Torvalds tail->b_this_page = head; 16761da177e4SLinus Torvalds 1677600f111eSMatthew Wilcox (Oracle) spin_lock(&folio->mapping->i_private_lock); 16788e2e1756SPankaj Raghav if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 16791da177e4SLinus Torvalds bh = head; 16801da177e4SLinus Torvalds do { 16818e2e1756SPankaj Raghav if (folio_test_dirty(folio)) 16821da177e4SLinus Torvalds set_buffer_dirty(bh); 16838e2e1756SPankaj Raghav if (folio_test_uptodate(folio)) 16841da177e4SLinus Torvalds set_buffer_uptodate(bh); 16851da177e4SLinus Torvalds bh = bh->b_this_page; 16861da177e4SLinus Torvalds } while (bh != head); 16871da177e4SLinus Torvalds } 16888e2e1756SPankaj Raghav folio_attach_private(folio, head); 1689600f111eSMatthew Wilcox (Oracle) spin_unlock(&folio->mapping->i_private_lock); 16903decb856SMatthew Wilcox (Oracle) 16913decb856SMatthew Wilcox (Oracle) return head; 16928e2e1756SPankaj Raghav } 16931da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers); 16941da177e4SLinus Torvalds 169529f3ad7dSJan Kara /** 169629f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device 169729f3ad7dSJan Kara * @bdev: Block device to clean buffers in 169829f3ad7dSJan Kara * @block: Start of a range of blocks to clean 169929f3ad7dSJan Kara * @len: Number of blocks to clean 17001da177e4SLinus Torvalds * 170129f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any 170229f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the 170329f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that 170429f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark 170529f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer 170629f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was 170729f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it 170829f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards... 170929f3ad7dSJan Kara * 171029f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be 171129f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that 171229f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really 171329f3ad7dSJan Kara * need to. That happens here. 17141da177e4SLinus Torvalds */ 171529f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 17161da177e4SLinus Torvalds { 171729f3ad7dSJan Kara struct inode *bd_inode = bdev->bd_inode; 171829f3ad7dSJan Kara struct address_space *bd_mapping = bd_inode->i_mapping; 17199e0b6f31SMatthew Wilcox (Oracle) struct folio_batch fbatch; 17204b04646cSMatthew Wilcox (Oracle) pgoff_t index = ((loff_t)block << bd_inode->i_blkbits) / PAGE_SIZE; 172129f3ad7dSJan Kara pgoff_t end; 1722c10f778dSJan Kara int i, count; 172329f3ad7dSJan Kara struct buffer_head *bh; 172429f3ad7dSJan Kara struct buffer_head *head; 17251da177e4SLinus Torvalds 17264b04646cSMatthew Wilcox (Oracle) end = ((loff_t)(block + len - 1) << bd_inode->i_blkbits) / PAGE_SIZE; 17279e0b6f31SMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 17289e0b6f31SMatthew Wilcox (Oracle) while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 17299e0b6f31SMatthew Wilcox (Oracle) count = folio_batch_count(&fbatch); 1730c10f778dSJan Kara for (i = 0; i < count; i++) { 17319e0b6f31SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i]; 17321da177e4SLinus Torvalds 17339e0b6f31SMatthew Wilcox (Oracle) if (!folio_buffers(folio)) 173429f3ad7dSJan Kara continue; 173529f3ad7dSJan Kara /* 1736600f111eSMatthew Wilcox (Oracle) * We use folio lock instead of bd_mapping->i_private_lock 173729f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and 173829f3ad7dSJan Kara * it scales better than a global spinlock lock. 173929f3ad7dSJan Kara */ 17409e0b6f31SMatthew Wilcox (Oracle) folio_lock(folio); 17419e0b6f31SMatthew Wilcox (Oracle) /* Recheck when the folio is locked which pins bhs */ 17429e0b6f31SMatthew Wilcox (Oracle) head = folio_buffers(folio); 17439e0b6f31SMatthew Wilcox (Oracle) if (!head) 174429f3ad7dSJan Kara goto unlock_page; 174529f3ad7dSJan Kara bh = head; 174629f3ad7dSJan Kara do { 17476c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 174829f3ad7dSJan Kara goto next; 174929f3ad7dSJan Kara if (bh->b_blocknr >= block + len) 175029f3ad7dSJan Kara break; 175129f3ad7dSJan Kara clear_buffer_dirty(bh); 175229f3ad7dSJan Kara wait_on_buffer(bh); 175329f3ad7dSJan Kara clear_buffer_req(bh); 175429f3ad7dSJan Kara next: 175529f3ad7dSJan Kara bh = bh->b_this_page; 175629f3ad7dSJan Kara } while (bh != head); 175729f3ad7dSJan Kara unlock_page: 17589e0b6f31SMatthew Wilcox (Oracle) folio_unlock(folio); 175929f3ad7dSJan Kara } 17609e0b6f31SMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 176129f3ad7dSJan Kara cond_resched(); 1762c10f778dSJan Kara /* End of range already reached? */ 1763c10f778dSJan Kara if (index > end || !index) 1764c10f778dSJan Kara break; 17651da177e4SLinus Torvalds } 17661da177e4SLinus Torvalds } 176729f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases); 17681da177e4SLinus Torvalds 1769c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio, 1770c6c8c3e7SPankaj Raghav struct inode *inode, 1771c6c8c3e7SPankaj Raghav unsigned int b_state) 177245bce8f3SLinus Torvalds { 17733decb856SMatthew Wilcox (Oracle) struct buffer_head *bh; 17743decb856SMatthew Wilcox (Oracle) 1775c6c8c3e7SPankaj Raghav BUG_ON(!folio_test_locked(folio)); 177645bce8f3SLinus Torvalds 17773decb856SMatthew Wilcox (Oracle) bh = folio_buffers(folio); 17783decb856SMatthew Wilcox (Oracle) if (!bh) 17790a88810dSMatthew Wilcox (Oracle) bh = create_empty_buffers(folio, 17803decb856SMatthew Wilcox (Oracle) 1 << READ_ONCE(inode->i_blkbits), b_state); 17813decb856SMatthew Wilcox (Oracle) return bh; 178245bce8f3SLinus Torvalds } 178345bce8f3SLinus Torvalds 178445bce8f3SLinus Torvalds /* 17851da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid: 17861da177e4SLinus Torvalds * 17871da177e4SLinus Torvalds * Mapped Uptodate Meaning 17881da177e4SLinus Torvalds * 17891da177e4SLinus Torvalds * No No "unknown" - must do get_block() 17901da177e4SLinus Torvalds * No Yes "hole" - zero-filled 17911da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in 17921da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory. 17931da177e4SLinus Torvalds * 17941da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate). 17951da177e4SLinus Torvalds */ 17961da177e4SLinus Torvalds 17971da177e4SLinus Torvalds /* 179817bf23a9SMatthew Wilcox (Oracle) * While block_write_full_folio is writing back the dirty buffers under 17991da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them 18001da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer 18011da177e4SLinus Torvalds * state inside lock_buffer(). 18021da177e4SLinus Torvalds * 180317bf23a9SMatthew Wilcox (Oracle) * If block_write_full_folio() is called for regular writeback 18041da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 18051da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer 18061da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback 18071da177e4SLinus Torvalds * prevents this contention from occurring. 18086e34eeddSTheodore Ts'o * 180917bf23a9SMatthew Wilcox (Oracle) * If block_write_full_folio() is called with wbc->sync_mode == 181070fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1811721a9602SJens Axboe * causes the writes to be flagged as synchronous writes. 18121da177e4SLinus Torvalds */ 181353418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio, 181414059f66SMatthew Wilcox (Oracle) get_block_t *get_block, struct writeback_control *wbc) 18151da177e4SLinus Torvalds { 18161da177e4SLinus Torvalds int err; 18171da177e4SLinus Torvalds sector_t block; 18181da177e4SLinus Torvalds sector_t last_block; 1819f0fbd5fcSAndrew Morton struct buffer_head *bh, *head; 1820fa399c31SMatthew Wilcox (Oracle) size_t blocksize; 18211da177e4SLinus Torvalds int nr_underway = 0; 18223ae72869SBart Van Assche blk_opf_t write_flags = wbc_to_write_flags(wbc); 18231da177e4SLinus Torvalds 182453418a18SMatthew Wilcox (Oracle) head = folio_create_buffers(folio, inode, 18251da177e4SLinus Torvalds (1 << BH_Dirty) | (1 << BH_Uptodate)); 18261da177e4SLinus Torvalds 18271da177e4SLinus Torvalds /* 1828e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio 18291da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at 18301da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it 183153418a18SMatthew Wilcox (Oracle) * then we just miss that fact, and the folio stays dirty. 18321da177e4SLinus Torvalds * 1833e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio; 18341da177e4SLinus Torvalds * handle that here by just cleaning them. 18351da177e4SLinus Torvalds */ 18361da177e4SLinus Torvalds 18371da177e4SLinus Torvalds bh = head; 183845bce8f3SLinus Torvalds blocksize = bh->b_size; 183945bce8f3SLinus Torvalds 1840fa399c31SMatthew Wilcox (Oracle) block = div_u64(folio_pos(folio), blocksize); 1841fa399c31SMatthew Wilcox (Oracle) last_block = div_u64(i_size_read(inode) - 1, blocksize); 18421da177e4SLinus Torvalds 18431da177e4SLinus Torvalds /* 18441da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and 18451da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping. 18461da177e4SLinus Torvalds */ 18471da177e4SLinus Torvalds do { 18481da177e4SLinus Torvalds if (block > last_block) { 18491da177e4SLinus Torvalds /* 18501da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because 185153418a18SMatthew Wilcox (Oracle) * this folio can be outside i_size when there is a 18521da177e4SLinus Torvalds * truncate in progress. 18531da177e4SLinus Torvalds */ 18541da177e4SLinus Torvalds /* 185517bf23a9SMatthew Wilcox (Oracle) * The buffer was zeroed by block_write_full_folio() 18561da177e4SLinus Torvalds */ 18571da177e4SLinus Torvalds clear_buffer_dirty(bh); 18581da177e4SLinus Torvalds set_buffer_uptodate(bh); 185929a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 186029a814d2SAlex Tomas buffer_dirty(bh)) { 1861b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 18621da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 18631da177e4SLinus Torvalds if (err) 18641da177e4SLinus Torvalds goto recover; 186529a814d2SAlex Tomas clear_buffer_delay(bh); 18661da177e4SLinus Torvalds if (buffer_new(bh)) { 18671da177e4SLinus Torvalds /* blockdev mappings never come here */ 18681da177e4SLinus Torvalds clear_buffer_new(bh); 1869e64855c6SJan Kara clean_bdev_bh_alias(bh); 18701da177e4SLinus Torvalds } 18711da177e4SLinus Torvalds } 18721da177e4SLinus Torvalds bh = bh->b_this_page; 18731da177e4SLinus Torvalds block++; 18741da177e4SLinus Torvalds } while (bh != head); 18751da177e4SLinus Torvalds 18761da177e4SLinus Torvalds do { 18771da177e4SLinus Torvalds if (!buffer_mapped(bh)) 18781da177e4SLinus Torvalds continue; 18791da177e4SLinus Torvalds /* 18801da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot 188153418a18SMatthew Wilcox (Oracle) * lock the buffer then redirty the folio. Note that this can 18825b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads 18835b0830cbSJens Axboe * and kswapd activity, but those code paths have their own 18845b0830cbSJens Axboe * higher-level throttling. 18851da177e4SLinus Torvalds */ 18861b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) { 18871da177e4SLinus Torvalds lock_buffer(bh); 1888ca5de404SNick Piggin } else if (!trylock_buffer(bh)) { 188953418a18SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio); 18901da177e4SLinus Torvalds continue; 18911da177e4SLinus Torvalds } 18921da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 189314059f66SMatthew Wilcox (Oracle) mark_buffer_async_write_endio(bh, 189414059f66SMatthew Wilcox (Oracle) end_buffer_async_write); 18951da177e4SLinus Torvalds } else { 18961da177e4SLinus Torvalds unlock_buffer(bh); 18971da177e4SLinus Torvalds } 18981da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 18991da177e4SLinus Torvalds 19001da177e4SLinus Torvalds /* 190153418a18SMatthew Wilcox (Oracle) * The folio and its buffers are protected by the writeback flag, 190253418a18SMatthew Wilcox (Oracle) * so we can drop the bh refcounts early. 19031da177e4SLinus Torvalds */ 190453418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio)); 190553418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio); 19061da177e4SLinus Torvalds 19071da177e4SLinus Torvalds do { 19081da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 19091da177e4SLinus Torvalds if (buffer_async_write(bh)) { 191044981351SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 191144981351SBart Van Assche inode->i_write_hint, wbc); 19121da177e4SLinus Torvalds nr_underway++; 1913ad576e63SNick Piggin } 19141da177e4SLinus Torvalds bh = next; 19151da177e4SLinus Torvalds } while (bh != head); 191653418a18SMatthew Wilcox (Oracle) folio_unlock(folio); 19171da177e4SLinus Torvalds 19181da177e4SLinus Torvalds err = 0; 19191da177e4SLinus Torvalds done: 19201da177e4SLinus Torvalds if (nr_underway == 0) { 19211da177e4SLinus Torvalds /* 192253418a18SMatthew Wilcox (Oracle) * The folio was marked dirty, but the buffers were 19231da177e4SLinus Torvalds * clean. Someone wrote them back by hand with 192479f59784SZhang Yi * write_dirty_buffer/submit_bh. A rare case. 19251da177e4SLinus Torvalds */ 192653418a18SMatthew Wilcox (Oracle) folio_end_writeback(folio); 19273d67f2d7SNick Piggin 19281da177e4SLinus Torvalds /* 192953418a18SMatthew Wilcox (Oracle) * The folio and buffer_heads can be released at any time from 19301da177e4SLinus Torvalds * here on. 19311da177e4SLinus Torvalds */ 19321da177e4SLinus Torvalds } 19331da177e4SLinus Torvalds return err; 19341da177e4SLinus Torvalds 19351da177e4SLinus Torvalds recover: 19361da177e4SLinus Torvalds /* 19371da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some 19381da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid 19391da177e4SLinus Torvalds * exposing stale data. 194053418a18SMatthew Wilcox (Oracle) * The folio is currently locked and not marked for writeback 19411da177e4SLinus Torvalds */ 19421da177e4SLinus Torvalds bh = head; 19431da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */ 19441da177e4SLinus Torvalds do { 194529a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) && 194629a814d2SAlex Tomas !buffer_delay(bh)) { 19471da177e4SLinus Torvalds lock_buffer(bh); 194814059f66SMatthew Wilcox (Oracle) mark_buffer_async_write_endio(bh, 194914059f66SMatthew Wilcox (Oracle) end_buffer_async_write); 19501da177e4SLinus Torvalds } else { 19511da177e4SLinus Torvalds /* 19521da177e4SLinus Torvalds * The buffer may have been set dirty during 195353418a18SMatthew Wilcox (Oracle) * attachment to a dirty folio. 19541da177e4SLinus Torvalds */ 19551da177e4SLinus Torvalds clear_buffer_dirty(bh); 19561da177e4SLinus Torvalds } 19571da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 195853418a18SMatthew Wilcox (Oracle) folio_set_error(folio); 195953418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio)); 196053418a18SMatthew Wilcox (Oracle) mapping_set_error(folio->mapping, err); 196153418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio); 19621da177e4SLinus Torvalds do { 19631da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 19641da177e4SLinus Torvalds if (buffer_async_write(bh)) { 19651da177e4SLinus Torvalds clear_buffer_dirty(bh); 196644981351SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, 196744981351SBart Van Assche inode->i_write_hint, wbc); 19681da177e4SLinus Torvalds nr_underway++; 1969ad576e63SNick Piggin } 19701da177e4SLinus Torvalds bh = next; 19711da177e4SLinus Torvalds } while (bh != head); 197253418a18SMatthew Wilcox (Oracle) folio_unlock(folio); 19731da177e4SLinus Torvalds goto done; 19741da177e4SLinus Torvalds } 197553418a18SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__block_write_full_folio); 19761da177e4SLinus Torvalds 1977afddba49SNick Piggin /* 19784a9622f2SMatthew Wilcox (Oracle) * If a folio has any new buffers, zero them out here, and mark them uptodate 1979afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised 1980afddba49SNick Piggin * block data from leaking). And clear the new bit. 1981afddba49SNick Piggin */ 19824a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) 1983afddba49SNick Piggin { 19844a9622f2SMatthew Wilcox (Oracle) size_t block_start, block_end; 1985afddba49SNick Piggin struct buffer_head *head, *bh; 1986afddba49SNick Piggin 19874a9622f2SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 19884a9622f2SMatthew Wilcox (Oracle) head = folio_buffers(folio); 19894a9622f2SMatthew Wilcox (Oracle) if (!head) 1990afddba49SNick Piggin return; 1991afddba49SNick Piggin 19924a9622f2SMatthew Wilcox (Oracle) bh = head; 1993afddba49SNick Piggin block_start = 0; 1994afddba49SNick Piggin do { 1995afddba49SNick Piggin block_end = block_start + bh->b_size; 1996afddba49SNick Piggin 1997afddba49SNick Piggin if (buffer_new(bh)) { 1998afddba49SNick Piggin if (block_end > from && block_start < to) { 19994a9622f2SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) { 20004a9622f2SMatthew Wilcox (Oracle) size_t start, xend; 2001afddba49SNick Piggin 2002afddba49SNick Piggin start = max(from, block_start); 20034a9622f2SMatthew Wilcox (Oracle) xend = min(to, block_end); 2004afddba49SNick Piggin 20054a9622f2SMatthew Wilcox (Oracle) folio_zero_segment(folio, start, xend); 2006afddba49SNick Piggin set_buffer_uptodate(bh); 2007afddba49SNick Piggin } 2008afddba49SNick Piggin 2009afddba49SNick Piggin clear_buffer_new(bh); 2010afddba49SNick Piggin mark_buffer_dirty(bh); 2011afddba49SNick Piggin } 2012afddba49SNick Piggin } 2013afddba49SNick Piggin 2014afddba49SNick Piggin block_start = block_end; 2015afddba49SNick Piggin bh = bh->b_this_page; 2016afddba49SNick Piggin } while (bh != head); 2017afddba49SNick Piggin } 20184a9622f2SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_zero_new_buffers); 2019afddba49SNick Piggin 20204aa8cdd5SChristoph Hellwig static int 2021ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 20226d49cc85SChristoph Hellwig const struct iomap *iomap) 2023ae259a9cSChristoph Hellwig { 202480844194SMatthew Wilcox (Oracle) loff_t offset = (loff_t)block << inode->i_blkbits; 2025ae259a9cSChristoph Hellwig 2026ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev; 2027ae259a9cSChristoph Hellwig 2028ae259a9cSChristoph Hellwig /* 2029ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains 2030ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the 2031ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller 2032ae259a9cSChristoph Hellwig * handle it. 2033ae259a9cSChristoph Hellwig */ 20344aa8cdd5SChristoph Hellwig if (offset >= iomap->offset + iomap->length) 20354aa8cdd5SChristoph Hellwig return -EIO; 2036ae259a9cSChristoph Hellwig 2037ae259a9cSChristoph Hellwig switch (iomap->type) { 2038ae259a9cSChristoph Hellwig case IOMAP_HOLE: 2039ae259a9cSChristoph Hellwig /* 2040ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF, 2041ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is 2042ae259a9cSChristoph Hellwig * executed if necessary. 2043ae259a9cSChristoph Hellwig */ 2044ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 2045ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 2046ae259a9cSChristoph Hellwig set_buffer_new(bh); 20474aa8cdd5SChristoph Hellwig return 0; 2048ae259a9cSChristoph Hellwig case IOMAP_DELALLOC: 2049ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 2050ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 2051ae259a9cSChristoph Hellwig set_buffer_new(bh); 2052ae259a9cSChristoph Hellwig set_buffer_uptodate(bh); 2053ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 2054ae259a9cSChristoph Hellwig set_buffer_delay(bh); 20554aa8cdd5SChristoph Hellwig return 0; 2056ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN: 2057ae259a9cSChristoph Hellwig /* 20583d7b6b21SAndreas Gruenbacher * For unwritten regions, we always need to ensure that regions 20593d7b6b21SAndreas Gruenbacher * in the block we are not writing to are zeroed. Mark the 20603d7b6b21SAndreas Gruenbacher * buffer as new to ensure this. 2061ae259a9cSChristoph Hellwig */ 2062ae259a9cSChristoph Hellwig set_buffer_new(bh); 2063ae259a9cSChristoph Hellwig set_buffer_unwritten(bh); 2064df561f66SGustavo A. R. Silva fallthrough; 2065ae259a9cSChristoph Hellwig case IOMAP_MAPPED: 20663d7b6b21SAndreas Gruenbacher if ((iomap->flags & IOMAP_F_NEW) || 2067381c0432SChristoph Hellwig offset >= i_size_read(inode)) { 2068381c0432SChristoph Hellwig /* 2069381c0432SChristoph Hellwig * This can happen if truncating the block device races 2070381c0432SChristoph Hellwig * with the check in the caller as i_size updates on 2071381c0432SChristoph Hellwig * block devices aren't synchronized by i_rwsem for 2072381c0432SChristoph Hellwig * block devices. 2073381c0432SChristoph Hellwig */ 2074381c0432SChristoph Hellwig if (S_ISBLK(inode->i_mode)) 2075381c0432SChristoph Hellwig return -EIO; 2076ae259a9cSChristoph Hellwig set_buffer_new(bh); 2077381c0432SChristoph Hellwig } 207819fe5f64SAndreas Gruenbacher bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 207919fe5f64SAndreas Gruenbacher inode->i_blkbits; 2080ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 20814aa8cdd5SChristoph Hellwig return 0; 20824aa8cdd5SChristoph Hellwig default: 20834aa8cdd5SChristoph Hellwig WARN_ON_ONCE(1); 20844aa8cdd5SChristoph Hellwig return -EIO; 2085ae259a9cSChristoph Hellwig } 2086ae259a9cSChristoph Hellwig } 2087ae259a9cSChristoph Hellwig 2088d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 20896d49cc85SChristoph Hellwig get_block_t *get_block, const struct iomap *iomap) 20901da177e4SLinus Torvalds { 2091b0619401SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos); 2092b0619401SMatthew Wilcox (Oracle) size_t to = from + len; 2093d1bd0b4eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 2094b0619401SMatthew Wilcox (Oracle) size_t block_start, block_end; 20951da177e4SLinus Torvalds sector_t block; 20961da177e4SLinus Torvalds int err = 0; 2097b0619401SMatthew Wilcox (Oracle) size_t blocksize; 20981da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 20991da177e4SLinus Torvalds 2100d1bd0b4eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 2101b0619401SMatthew Wilcox (Oracle) BUG_ON(to > folio_size(folio)); 21021da177e4SLinus Torvalds BUG_ON(from > to); 21031da177e4SLinus Torvalds 2104c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0); 210545bce8f3SLinus Torvalds blocksize = head->b_size; 2106b0619401SMatthew Wilcox (Oracle) block = div_u64(folio_pos(folio), blocksize); 21071da177e4SLinus Torvalds 21081da177e4SLinus Torvalds for (bh = head, block_start = 0; bh != head || !block_start; 21091da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) { 21101da177e4SLinus Torvalds block_end = block_start + blocksize; 21111da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 2112d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 21131da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 21141da177e4SLinus Torvalds set_buffer_uptodate(bh); 21151da177e4SLinus Torvalds } 21161da177e4SLinus Torvalds continue; 21171da177e4SLinus Torvalds } 21181da177e4SLinus Torvalds if (buffer_new(bh)) 21191da177e4SLinus Torvalds clear_buffer_new(bh); 21201da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2121b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 21224aa8cdd5SChristoph Hellwig if (get_block) 21231da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 21244aa8cdd5SChristoph Hellwig else 21254aa8cdd5SChristoph Hellwig err = iomap_to_bh(inode, block, bh, iomap); 21261da177e4SLinus Torvalds if (err) 2127f3ddbdc6SNick Piggin break; 2128ae259a9cSChristoph Hellwig 21291da177e4SLinus Torvalds if (buffer_new(bh)) { 2130e64855c6SJan Kara clean_bdev_bh_alias(bh); 2131d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 2132637aff46SNick Piggin clear_buffer_new(bh); 21331da177e4SLinus Torvalds set_buffer_uptodate(bh); 2134637aff46SNick Piggin mark_buffer_dirty(bh); 21351da177e4SLinus Torvalds continue; 21361da177e4SLinus Torvalds } 2137eebd2aa3SChristoph Lameter if (block_end > to || block_start < from) 2138d1bd0b4eSMatthew Wilcox (Oracle) folio_zero_segments(folio, 2139eebd2aa3SChristoph Lameter to, block_end, 2140eebd2aa3SChristoph Lameter block_start, from); 21411da177e4SLinus Torvalds continue; 21421da177e4SLinus Torvalds } 21431da177e4SLinus Torvalds } 2144d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 21451da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 21461da177e4SLinus Torvalds set_buffer_uptodate(bh); 21471da177e4SLinus Torvalds continue; 21481da177e4SLinus Torvalds } 21491da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) && 215033a266ddSDavid Chinner !buffer_unwritten(bh) && 21511da177e4SLinus Torvalds (block_start < from || block_end > to)) { 2152e7ea1129SZhang Yi bh_read_nowait(bh, 0); 21531da177e4SLinus Torvalds *wait_bh++=bh; 21541da177e4SLinus Torvalds } 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds /* 21571da177e4SLinus Torvalds * If we issued read requests - let them complete. 21581da177e4SLinus Torvalds */ 21591da177e4SLinus Torvalds while(wait_bh > wait) { 21601da177e4SLinus Torvalds wait_on_buffer(*--wait_bh); 21611da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh)) 2162f3ddbdc6SNick Piggin err = -EIO; 21631da177e4SLinus Torvalds } 2164f9f07b6cSJan Kara if (unlikely(err)) 21654a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, from, to); 21661da177e4SLinus Torvalds return err; 21671da177e4SLinus Torvalds } 2168ae259a9cSChristoph Hellwig 2169ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2170ae259a9cSChristoph Hellwig get_block_t *get_block) 2171ae259a9cSChristoph Hellwig { 2172d1bd0b4eSMatthew Wilcox (Oracle) return __block_write_begin_int(page_folio(page), pos, len, get_block, 2173d1bd0b4eSMatthew Wilcox (Oracle) NULL); 2174ae259a9cSChristoph Hellwig } 2175ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin); 21761da177e4SLinus Torvalds 2177a524fcfeSBean Huo static void __block_commit_write(struct folio *folio, size_t from, size_t to) 21781da177e4SLinus Torvalds { 21798c6cb3e3SMatthew Wilcox (Oracle) size_t block_start, block_end; 21808c6cb3e3SMatthew Wilcox (Oracle) bool partial = false; 21811da177e4SLinus Torvalds unsigned blocksize; 21821da177e4SLinus Torvalds struct buffer_head *bh, *head; 21831da177e4SLinus Torvalds 21848c6cb3e3SMatthew Wilcox (Oracle) bh = head = folio_buffers(folio); 218545bce8f3SLinus Torvalds blocksize = bh->b_size; 21861da177e4SLinus Torvalds 218745bce8f3SLinus Torvalds block_start = 0; 218845bce8f3SLinus Torvalds do { 21891da177e4SLinus Torvalds block_end = block_start + blocksize; 21901da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 21911da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 21928c6cb3e3SMatthew Wilcox (Oracle) partial = true; 21931da177e4SLinus Torvalds } else { 21941da177e4SLinus Torvalds set_buffer_uptodate(bh); 21951da177e4SLinus Torvalds mark_buffer_dirty(bh); 21961da177e4SLinus Torvalds } 21974ebd3aecSYang Guo if (buffer_new(bh)) 2198afddba49SNick Piggin clear_buffer_new(bh); 219945bce8f3SLinus Torvalds 220045bce8f3SLinus Torvalds block_start = block_end; 220145bce8f3SLinus Torvalds bh = bh->b_this_page; 220245bce8f3SLinus Torvalds } while (bh != head); 22031da177e4SLinus Torvalds 22041da177e4SLinus Torvalds /* 22051da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers 22062c69e205SMatthew Wilcox (Oracle) * uptodate then we can optimize away a bogus read_folio() for 22078c6cb3e3SMatthew Wilcox (Oracle) * the next read(). Here we 'discover' whether the folio went 22081da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write. 22091da177e4SLinus Torvalds */ 22101da177e4SLinus Torvalds if (!partial) 22118c6cb3e3SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 22121da177e4SLinus Torvalds } 22131da177e4SLinus Torvalds 22141da177e4SLinus Torvalds /* 2215155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and 2216155130a4SChristoph Hellwig * bringing partial write blocks uptodate first. 2217155130a4SChristoph Hellwig * 22187bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure. 2219afddba49SNick Piggin */ 2220155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2221b3992d1eSMatthew Wilcox (Oracle) struct page **pagep, get_block_t *get_block) 2222afddba49SNick Piggin { 222309cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2224afddba49SNick Piggin struct page *page; 22256e1db88dSChristoph Hellwig int status; 2226afddba49SNick Piggin 2227b7446e7cSMatthew Wilcox (Oracle) page = grab_cache_page_write_begin(mapping, index); 22286e1db88dSChristoph Hellwig if (!page) 22296e1db88dSChristoph Hellwig return -ENOMEM; 2230afddba49SNick Piggin 22316e1db88dSChristoph Hellwig status = __block_write_begin(page, pos, len, get_block); 2232afddba49SNick Piggin if (unlikely(status)) { 2233afddba49SNick Piggin unlock_page(page); 223409cbfeafSKirill A. Shutemov put_page(page); 22356e1db88dSChristoph Hellwig page = NULL; 2236afddba49SNick Piggin } 2237afddba49SNick Piggin 22386e1db88dSChristoph Hellwig *pagep = page; 2239afddba49SNick Piggin return status; 2240afddba49SNick Piggin } 2241afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin); 2242afddba49SNick Piggin 2243afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping, 2244afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2245afddba49SNick Piggin struct page *page, void *fsdata) 2246afddba49SNick Piggin { 22478c6cb3e3SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 22488c6cb3e3SMatthew Wilcox (Oracle) size_t start = pos - folio_pos(folio); 2249afddba49SNick Piggin 2250afddba49SNick Piggin if (unlikely(copied < len)) { 2251afddba49SNick Piggin /* 22522c69e205SMatthew Wilcox (Oracle) * The buffers that were written will now be uptodate, so 22532c69e205SMatthew Wilcox (Oracle) * we don't have to worry about a read_folio reading them 22542c69e205SMatthew Wilcox (Oracle) * and overwriting a partial write. However if we have 22552c69e205SMatthew Wilcox (Oracle) * encountered a short write and only partially written 22562c69e205SMatthew Wilcox (Oracle) * into a buffer, it will not be marked uptodate, so a 22572c69e205SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write. 2258afddba49SNick Piggin * 2259afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a 22608c6cb3e3SMatthew Wilcox (Oracle) * non uptodate folio as a zero-length write, and force the 2261afddba49SNick Piggin * caller to redo the whole thing. 2262afddba49SNick Piggin */ 22638c6cb3e3SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) 2264afddba49SNick Piggin copied = 0; 2265afddba49SNick Piggin 22664a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, start+copied, start+len); 2267afddba49SNick Piggin } 22688c6cb3e3SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 2269afddba49SNick Piggin 2270afddba49SNick Piggin /* This could be a short (even 0-length) commit */ 2271489b7e72SBean Huo __block_commit_write(folio, start, start + copied); 2272afddba49SNick Piggin 2273afddba49SNick Piggin return copied; 2274afddba49SNick Piggin } 2275afddba49SNick Piggin EXPORT_SYMBOL(block_write_end); 2276afddba49SNick Piggin 2277afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping, 2278afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2279afddba49SNick Piggin struct page *page, void *fsdata) 2280afddba49SNick Piggin { 22818af54f29SChristoph Hellwig struct inode *inode = mapping->host; 22828af54f29SChristoph Hellwig loff_t old_size = inode->i_size; 22838af54f29SChristoph Hellwig bool i_size_changed = false; 22848af54f29SChristoph Hellwig 2285afddba49SNick Piggin copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 22868af54f29SChristoph Hellwig 22878af54f29SChristoph Hellwig /* 22888af54f29SChristoph Hellwig * No need to use i_size_read() here, the i_size cannot change under us 22898af54f29SChristoph Hellwig * because we hold i_rwsem. 22908af54f29SChristoph Hellwig * 22918af54f29SChristoph Hellwig * But it's important to update i_size while still holding page lock: 22928af54f29SChristoph Hellwig * page writeout could otherwise come in and zero beyond i_size. 22938af54f29SChristoph Hellwig */ 22948af54f29SChristoph Hellwig if (pos + copied > inode->i_size) { 22958af54f29SChristoph Hellwig i_size_write(inode, pos + copied); 22968af54f29SChristoph Hellwig i_size_changed = true; 22978af54f29SChristoph Hellwig } 22988af54f29SChristoph Hellwig 22998af54f29SChristoph Hellwig unlock_page(page); 23007a77dad7SAndreas Gruenbacher put_page(page); 23018af54f29SChristoph Hellwig 23028af54f29SChristoph Hellwig if (old_size < pos) 23038af54f29SChristoph Hellwig pagecache_isize_extended(inode, old_size, pos); 23048af54f29SChristoph Hellwig /* 23058af54f29SChristoph Hellwig * Don't mark the inode dirty under page lock. First, it unnecessarily 23068af54f29SChristoph Hellwig * makes the holding time of page lock longer. Second, it forces lock 23078af54f29SChristoph Hellwig * ordering of page lock and transaction start for journaling 23088af54f29SChristoph Hellwig * filesystems. 23098af54f29SChristoph Hellwig */ 23108af54f29SChristoph Hellwig if (i_size_changed) 23118af54f29SChristoph Hellwig mark_inode_dirty(inode); 231226ddb1f4SAndreas Gruenbacher return copied; 2313afddba49SNick Piggin } 2314afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end); 2315afddba49SNick Piggin 2316afddba49SNick Piggin /* 23172e7e80f7SMatthew Wilcox (Oracle) * block_is_partially_uptodate checks whether buffers within a folio are 23188ab22b9aSHisashi Hifumi * uptodate or not. 23198ab22b9aSHisashi Hifumi * 23202e7e80f7SMatthew Wilcox (Oracle) * Returns true if all buffers which correspond to the specified part 23212e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate. 23228ab22b9aSHisashi Hifumi */ 23232e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 23248ab22b9aSHisashi Hifumi { 23258ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize; 23268ab22b9aSHisashi Hifumi unsigned to; 23278ab22b9aSHisashi Hifumi struct buffer_head *bh, *head; 23282e7e80f7SMatthew Wilcox (Oracle) bool ret = true; 23298ab22b9aSHisashi Hifumi 23302e7e80f7SMatthew Wilcox (Oracle) head = folio_buffers(folio); 23312e7e80f7SMatthew Wilcox (Oracle) if (!head) 23322e7e80f7SMatthew Wilcox (Oracle) return false; 233345bce8f3SLinus Torvalds blocksize = head->b_size; 23342e7e80f7SMatthew Wilcox (Oracle) to = min_t(unsigned, folio_size(folio) - from, count); 23358ab22b9aSHisashi Hifumi to = from + to; 23362e7e80f7SMatthew Wilcox (Oracle) if (from < blocksize && to > folio_size(folio) - blocksize) 23372e7e80f7SMatthew Wilcox (Oracle) return false; 23388ab22b9aSHisashi Hifumi 23398ab22b9aSHisashi Hifumi bh = head; 23408ab22b9aSHisashi Hifumi block_start = 0; 23418ab22b9aSHisashi Hifumi do { 23428ab22b9aSHisashi Hifumi block_end = block_start + blocksize; 23438ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) { 23448ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) { 23452e7e80f7SMatthew Wilcox (Oracle) ret = false; 23468ab22b9aSHisashi Hifumi break; 23478ab22b9aSHisashi Hifumi } 23488ab22b9aSHisashi Hifumi if (block_end >= to) 23498ab22b9aSHisashi Hifumi break; 23508ab22b9aSHisashi Hifumi } 23518ab22b9aSHisashi Hifumi block_start = block_end; 23528ab22b9aSHisashi Hifumi bh = bh->b_this_page; 23538ab22b9aSHisashi Hifumi } while (bh != head); 23548ab22b9aSHisashi Hifumi 23558ab22b9aSHisashi Hifumi return ret; 23568ab22b9aSHisashi Hifumi } 23578ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate); 23588ab22b9aSHisashi Hifumi 23598ab22b9aSHisashi Hifumi /* 23602c69e205SMatthew Wilcox (Oracle) * Generic "read_folio" function for block devices that have the normal 23611da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems. 23622c69e205SMatthew Wilcox (Oracle) * Reads the folio asynchronously --- the unlock_buffer() and 23631da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the 23642c69e205SMatthew Wilcox (Oracle) * folio once IO has completed. 23651da177e4SLinus Torvalds */ 23662c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block) 23671da177e4SLinus Torvalds { 23682c69e205SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 23691da177e4SLinus Torvalds sector_t iblock, lblock; 23701da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2371fa399c31SMatthew Wilcox (Oracle) size_t blocksize; 23721da177e4SLinus Torvalds int nr, i; 23731da177e4SLinus Torvalds int fully_mapped = 1; 2374b7a6eb22SMatthew Wilcox (Oracle) bool page_error = false; 23754fa512ceSEric Biggers loff_t limit = i_size_read(inode); 23764fa512ceSEric Biggers 23774fa512ceSEric Biggers /* This is needed for ext4. */ 23784fa512ceSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 23794fa512ceSEric Biggers limit = inode->i_sb->s_maxbytes; 23801da177e4SLinus Torvalds 23812c69e205SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 23822c69e205SMatthew Wilcox (Oracle) 2383c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0); 238445bce8f3SLinus Torvalds blocksize = head->b_size; 23851da177e4SLinus Torvalds 2386fa399c31SMatthew Wilcox (Oracle) iblock = div_u64(folio_pos(folio), blocksize); 2387fa399c31SMatthew Wilcox (Oracle) lblock = div_u64(limit + blocksize - 1, blocksize); 23881da177e4SLinus Torvalds bh = head; 23891da177e4SLinus Torvalds nr = 0; 23901da177e4SLinus Torvalds i = 0; 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds do { 23931da177e4SLinus Torvalds if (buffer_uptodate(bh)) 23941da177e4SLinus Torvalds continue; 23951da177e4SLinus Torvalds 23961da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2397c64610baSAndrew Morton int err = 0; 2398c64610baSAndrew Morton 23991da177e4SLinus Torvalds fully_mapped = 0; 24001da177e4SLinus Torvalds if (iblock < lblock) { 2401b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2402c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0); 2403b7a6eb22SMatthew Wilcox (Oracle) if (err) { 24042c69e205SMatthew Wilcox (Oracle) folio_set_error(folio); 2405b7a6eb22SMatthew Wilcox (Oracle) page_error = true; 2406b7a6eb22SMatthew Wilcox (Oracle) } 24071da177e4SLinus Torvalds } 24081da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 24092c69e205SMatthew Wilcox (Oracle) folio_zero_range(folio, i * blocksize, 24102c69e205SMatthew Wilcox (Oracle) blocksize); 2411c64610baSAndrew Morton if (!err) 24121da177e4SLinus Torvalds set_buffer_uptodate(bh); 24131da177e4SLinus Torvalds continue; 24141da177e4SLinus Torvalds } 24151da177e4SLinus Torvalds /* 24161da177e4SLinus Torvalds * get_block() might have updated the buffer 24171da177e4SLinus Torvalds * synchronously 24181da177e4SLinus Torvalds */ 24191da177e4SLinus Torvalds if (buffer_uptodate(bh)) 24201da177e4SLinus Torvalds continue; 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds arr[nr++] = bh; 24231da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head); 24241da177e4SLinus Torvalds 24251da177e4SLinus Torvalds if (fully_mapped) 24262c69e205SMatthew Wilcox (Oracle) folio_set_mappedtodisk(folio); 24271da177e4SLinus Torvalds 24281da177e4SLinus Torvalds if (!nr) { 24291da177e4SLinus Torvalds /* 24306ba924d3SMatthew Wilcox (Oracle) * All buffers are uptodate or get_block() returned an 24316ba924d3SMatthew Wilcox (Oracle) * error when trying to map them - we can finish the read. 24321da177e4SLinus Torvalds */ 24336ba924d3SMatthew Wilcox (Oracle) folio_end_read(folio, !page_error); 24341da177e4SLinus Torvalds return 0; 24351da177e4SLinus Torvalds } 24361da177e4SLinus Torvalds 24371da177e4SLinus Torvalds /* Stage two: lock the buffers */ 24381da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 24391da177e4SLinus Torvalds bh = arr[i]; 24401da177e4SLinus Torvalds lock_buffer(bh); 24411da177e4SLinus Torvalds mark_buffer_async_read(bh); 24421da177e4SLinus Torvalds } 24431da177e4SLinus Torvalds 24441da177e4SLinus Torvalds /* 24451da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness 24461da177e4SLinus Torvalds * inside the buffer lock in case another process reading 24471da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix). 24481da177e4SLinus Torvalds */ 24491da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 24501da177e4SLinus Torvalds bh = arr[i]; 24511da177e4SLinus Torvalds if (buffer_uptodate(bh)) 24521da177e4SLinus Torvalds end_buffer_async_read(bh, 1); 24531da177e4SLinus Torvalds else 24541420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh); 24551da177e4SLinus Torvalds } 24561da177e4SLinus Torvalds return 0; 24571da177e4SLinus Torvalds } 24582c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio); 24591da177e4SLinus Torvalds 24601da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding 246189e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to 24621da177e4SLinus Torvalds * deal with the hole. 24631da177e4SLinus Torvalds */ 246489e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size) 24651da177e4SLinus Torvalds { 24661da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 246753b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops; 24681da177e4SLinus Torvalds struct page *page; 24691468c6f4SAlexander Potapenko void *fsdata = NULL; 24701da177e4SLinus Torvalds int err; 24711da177e4SLinus Torvalds 2472c08d3b0eSnpiggin@suse.de err = inode_newsize_ok(inode, size); 2473c08d3b0eSnpiggin@suse.de if (err) 24741da177e4SLinus Torvalds goto out; 24751da177e4SLinus Torvalds 247653b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata); 247789e10787SNick Piggin if (err) 247805eb0b51SOGAWA Hirofumi goto out; 247905eb0b51SOGAWA Hirofumi 248053b524b8SMatthew Wilcox (Oracle) err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata); 248189e10787SNick Piggin BUG_ON(err > 0); 248205eb0b51SOGAWA Hirofumi 248305eb0b51SOGAWA Hirofumi out: 248405eb0b51SOGAWA Hirofumi return err; 248505eb0b51SOGAWA Hirofumi } 24861fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple); 248705eb0b51SOGAWA Hirofumi 2488f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping, 248989e10787SNick Piggin loff_t pos, loff_t *bytes) 249005eb0b51SOGAWA Hirofumi { 249189e10787SNick Piggin struct inode *inode = mapping->host; 249253b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops; 249393407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 249489e10787SNick Piggin struct page *page; 24951468c6f4SAlexander Potapenko void *fsdata = NULL; 249689e10787SNick Piggin pgoff_t index, curidx; 249789e10787SNick Piggin loff_t curpos; 249889e10787SNick Piggin unsigned zerofrom, offset, len; 249989e10787SNick Piggin int err = 0; 250005eb0b51SOGAWA Hirofumi 250109cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT; 250209cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK; 250389e10787SNick Piggin 250409cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 250509cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 250689e10787SNick Piggin if (zerofrom & (blocksize-1)) { 250789e10787SNick Piggin *bytes |= (blocksize-1); 250889e10787SNick Piggin (*bytes)++; 250989e10787SNick Piggin } 251009cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom; 251189e10787SNick Piggin 251253b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len, 251389e10787SNick Piggin &page, &fsdata); 251489e10787SNick Piggin if (err) 251589e10787SNick Piggin goto out; 2516eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 251753b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len, 251889e10787SNick Piggin page, fsdata); 251989e10787SNick Piggin if (err < 0) 252089e10787SNick Piggin goto out; 252189e10787SNick Piggin BUG_ON(err != len); 252289e10787SNick Piggin err = 0; 2523061e9746SOGAWA Hirofumi 2524061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping); 2525c2ca0fcdSMikulas Patocka 252608d405c8SDavidlohr Bueso if (fatal_signal_pending(current)) { 2527c2ca0fcdSMikulas Patocka err = -EINTR; 2528c2ca0fcdSMikulas Patocka goto out; 2529c2ca0fcdSMikulas Patocka } 253089e10787SNick Piggin } 253189e10787SNick Piggin 253289e10787SNick Piggin /* page covers the boundary, find the boundary offset */ 253389e10787SNick Piggin if (index == curidx) { 253409cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 253589e10787SNick Piggin /* if we will expand the thing last block will be filled */ 253689e10787SNick Piggin if (offset <= zerofrom) { 253789e10787SNick Piggin goto out; 253889e10787SNick Piggin } 253989e10787SNick Piggin if (zerofrom & (blocksize-1)) { 254089e10787SNick Piggin *bytes |= (blocksize-1); 254189e10787SNick Piggin (*bytes)++; 254289e10787SNick Piggin } 254389e10787SNick Piggin len = offset - zerofrom; 254489e10787SNick Piggin 254553b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len, 254689e10787SNick Piggin &page, &fsdata); 254789e10787SNick Piggin if (err) 254889e10787SNick Piggin goto out; 2549eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 255053b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len, 255189e10787SNick Piggin page, fsdata); 255289e10787SNick Piggin if (err < 0) 255389e10787SNick Piggin goto out; 255489e10787SNick Piggin BUG_ON(err != len); 255589e10787SNick Piggin err = 0; 255689e10787SNick Piggin } 255789e10787SNick Piggin out: 255889e10787SNick Piggin return err; 25591da177e4SLinus Torvalds } 25601da177e4SLinus Torvalds 25611da177e4SLinus Torvalds /* 25621da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file. 25631da177e4SLinus Torvalds * We may have to extend the file. 25641da177e4SLinus Torvalds */ 2565282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping, 2566be3bbbc5SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 256789e10787SNick Piggin struct page **pagep, void **fsdata, 256889e10787SNick Piggin get_block_t *get_block, loff_t *bytes) 25691da177e4SLinus Torvalds { 25701da177e4SLinus Torvalds struct inode *inode = mapping->host; 257193407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 257293407472SFabian Frederick unsigned int zerofrom; 257389e10787SNick Piggin int err; 25741da177e4SLinus Torvalds 257589e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes); 257689e10787SNick Piggin if (err) 2577155130a4SChristoph Hellwig return err; 25781da177e4SLinus Torvalds 257909cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK; 258089e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) { 25811da177e4SLinus Torvalds *bytes |= (blocksize-1); 25821da177e4SLinus Torvalds (*bytes)++; 25831da177e4SLinus Torvalds } 25841da177e4SLinus Torvalds 2585b3992d1eSMatthew Wilcox (Oracle) return block_write_begin(mapping, pos, len, pagep, get_block); 25861da177e4SLinus Torvalds } 25871fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin); 25881da177e4SLinus Torvalds 2589a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned from, unsigned to) 25901da177e4SLinus Torvalds { 25918c6cb3e3SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2592489b7e72SBean Huo __block_commit_write(folio, from, to); 25931da177e4SLinus Torvalds } 25941fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write); 25951da177e4SLinus Torvalds 259654171690SDavid Chinner /* 259754171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets 259854171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must 259954171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly 260054171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into 260154171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that 260254171690SDavid Chinner * support these features. 260354171690SDavid Chinner * 260454171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to 260554171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because 26067bb46a67Snpiggin@suse.de * truncate writes the inode size before removing pages, once we have the 260754171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not 260854171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we 260954171690SDavid Chinner * unlock the page. 2610ea13a864SJan Kara * 261114da9200SJan Kara * Direct callers of this function should protect against filesystem freezing 26125c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions. 261354171690SDavid Chinner */ 26145c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 261554171690SDavid Chinner get_block_t get_block) 261654171690SDavid Chinner { 2617fe181377SMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page); 2618496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 261954171690SDavid Chinner unsigned long end; 262054171690SDavid Chinner loff_t size; 262124da4fabSJan Kara int ret; 262254171690SDavid Chinner 2623fe181377SMatthew Wilcox (Oracle) folio_lock(folio); 262454171690SDavid Chinner size = i_size_read(inode); 2625fe181377SMatthew Wilcox (Oracle) if ((folio->mapping != inode->i_mapping) || 2626fe181377SMatthew Wilcox (Oracle) (folio_pos(folio) >= size)) { 262724da4fabSJan Kara /* We overload EFAULT to mean page got truncated */ 262824da4fabSJan Kara ret = -EFAULT; 262924da4fabSJan Kara goto out_unlock; 263054171690SDavid Chinner } 263154171690SDavid Chinner 2632fe181377SMatthew Wilcox (Oracle) end = folio_size(folio); 2633fe181377SMatthew Wilcox (Oracle) /* folio is wholly or partially inside EOF */ 2634fe181377SMatthew Wilcox (Oracle) if (folio_pos(folio) + end > size) 2635fe181377SMatthew Wilcox (Oracle) end = size - folio_pos(folio); 263654171690SDavid Chinner 2637fe181377SMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, 0, end, get_block, NULL); 2638a524fcfeSBean Huo if (unlikely(ret)) 263924da4fabSJan Kara goto out_unlock; 2640a524fcfeSBean Huo 2641a524fcfeSBean Huo __block_commit_write(folio, 0, end); 2642a524fcfeSBean Huo 2643fe181377SMatthew Wilcox (Oracle) folio_mark_dirty(folio); 2644fe181377SMatthew Wilcox (Oracle) folio_wait_stable(folio); 264524da4fabSJan Kara return 0; 264624da4fabSJan Kara out_unlock: 2647fe181377SMatthew Wilcox (Oracle) folio_unlock(folio); 264854171690SDavid Chinner return ret; 264954171690SDavid Chinner } 26501fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite); 26511da177e4SLinus Torvalds 26521da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping, 26531da177e4SLinus Torvalds loff_t from, get_block_t *get_block) 26541da177e4SLinus Torvalds { 265509cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT; 26561da177e4SLinus Torvalds unsigned blocksize; 265754b21a79SAndrew Morton sector_t iblock; 26586d68f644SMatthew Wilcox (Oracle) size_t offset, length, pos; 26591da177e4SLinus Torvalds struct inode *inode = mapping->host; 26606d68f644SMatthew Wilcox (Oracle) struct folio *folio; 26611da177e4SLinus Torvalds struct buffer_head *bh; 2662dc7cb2d2SJiapeng Chong int err = 0; 26631da177e4SLinus Torvalds 266493407472SFabian Frederick blocksize = i_blocksize(inode); 26656d68f644SMatthew Wilcox (Oracle) length = from & (blocksize - 1); 26661da177e4SLinus Torvalds 26671da177e4SLinus Torvalds /* Block boundary? Nothing to do */ 26681da177e4SLinus Torvalds if (!length) 26691da177e4SLinus Torvalds return 0; 26701da177e4SLinus Torvalds 26711da177e4SLinus Torvalds length = blocksize - length; 26724b04646cSMatthew Wilcox (Oracle) iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits; 26731da177e4SLinus Torvalds 26746d68f644SMatthew Wilcox (Oracle) folio = filemap_grab_folio(mapping, index); 26756d68f644SMatthew Wilcox (Oracle) if (IS_ERR(folio)) 26766d68f644SMatthew Wilcox (Oracle) return PTR_ERR(folio); 26771da177e4SLinus Torvalds 26786d68f644SMatthew Wilcox (Oracle) bh = folio_buffers(folio); 26793decb856SMatthew Wilcox (Oracle) if (!bh) 26800a88810dSMatthew Wilcox (Oracle) bh = create_empty_buffers(folio, blocksize, 0); 26811da177e4SLinus Torvalds 26821da177e4SLinus Torvalds /* Find the buffer that contains "offset" */ 26836d68f644SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, from); 26841da177e4SLinus Torvalds pos = blocksize; 26851da177e4SLinus Torvalds while (offset >= pos) { 26861da177e4SLinus Torvalds bh = bh->b_this_page; 26871da177e4SLinus Torvalds iblock++; 26881da177e4SLinus Torvalds pos += blocksize; 26891da177e4SLinus Torvalds } 26901da177e4SLinus Torvalds 26911da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2692b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 26931da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0); 26941da177e4SLinus Torvalds if (err) 26951da177e4SLinus Torvalds goto unlock; 26961da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */ 26971da177e4SLinus Torvalds if (!buffer_mapped(bh)) 26981da177e4SLinus Torvalds goto unlock; 26991da177e4SLinus Torvalds } 27001da177e4SLinus Torvalds 27011da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */ 27026d68f644SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 27031da177e4SLinus Torvalds set_buffer_uptodate(bh); 27041da177e4SLinus Torvalds 270533a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2706e7ea1129SZhang Yi err = bh_read(bh, 0); 27071da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */ 2708e7ea1129SZhang Yi if (err < 0) 27091da177e4SLinus Torvalds goto unlock; 27101da177e4SLinus Torvalds } 27111da177e4SLinus Torvalds 27126d68f644SMatthew Wilcox (Oracle) folio_zero_range(folio, offset, length); 27131da177e4SLinus Torvalds mark_buffer_dirty(bh); 27141da177e4SLinus Torvalds 27151da177e4SLinus Torvalds unlock: 27166d68f644SMatthew Wilcox (Oracle) folio_unlock(folio); 27176d68f644SMatthew Wilcox (Oracle) folio_put(folio); 2718dc7cb2d2SJiapeng Chong 27191da177e4SLinus Torvalds return err; 27201da177e4SLinus Torvalds } 27211fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page); 27221da177e4SLinus Torvalds 27231da177e4SLinus Torvalds /* 27241da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces 27251da177e4SLinus Torvalds */ 272617bf23a9SMatthew Wilcox (Oracle) int block_write_full_folio(struct folio *folio, struct writeback_control *wbc, 272717bf23a9SMatthew Wilcox (Oracle) void *get_block) 27281da177e4SLinus Torvalds { 2729bb0ea598SMatthew Wilcox (Oracle) struct inode * const inode = folio->mapping->host; 27301da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 27311da177e4SLinus Torvalds 2732bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully inside i_size? */ 2733bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) + folio_size(folio) <= i_size) 273414059f66SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc); 27351da177e4SLinus Torvalds 2736bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully outside i_size? (truncate in progress) */ 2737bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) >= i_size) { 273853418a18SMatthew Wilcox (Oracle) folio_unlock(folio); 27391da177e4SLinus Torvalds return 0; /* don't care */ 27401da177e4SLinus Torvalds } 27411da177e4SLinus Torvalds 27421da177e4SLinus Torvalds /* 2743bb0ea598SMatthew Wilcox (Oracle) * The folio straddles i_size. It must be zeroed out on each and every 27442a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped 27451da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 27461da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 27471da177e4SLinus Torvalds * writes to that region are not written out to the file." 27481da177e4SLinus Torvalds */ 2749bb0ea598SMatthew Wilcox (Oracle) folio_zero_segment(folio, offset_in_folio(folio, i_size), 2750bb0ea598SMatthew Wilcox (Oracle) folio_size(folio)); 275114059f66SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc); 275235c80d5fSChris Mason } 275335c80d5fSChris Mason 27541da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 27551da177e4SLinus Torvalds get_block_t *get_block) 27561da177e4SLinus Torvalds { 27571da177e4SLinus Torvalds struct inode *inode = mapping->host; 27582a527d68SAlexander Potapenko struct buffer_head tmp = { 27592a527d68SAlexander Potapenko .b_size = i_blocksize(inode), 27602a527d68SAlexander Potapenko }; 27612a527d68SAlexander Potapenko 27621da177e4SLinus Torvalds get_block(inode, block, &tmp, 0); 27631da177e4SLinus Torvalds return tmp.b_blocknr; 27641da177e4SLinus Torvalds } 27651fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap); 27661da177e4SLinus Torvalds 27674246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio) 27681da177e4SLinus Torvalds { 27691da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private; 27701da177e4SLinus Torvalds 2771b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET))) 277208bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state); 277308bafc03SKeith Mannthey 27744e4cbee9SChristoph Hellwig bh->b_end_io(bh, !bio->bi_status); 27751da177e4SLinus Torvalds bio_put(bio); 27761da177e4SLinus Torvalds } 27771da177e4SLinus Torvalds 27785bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 277944981351SBart Van Assche enum rw_hint write_hint, 27801420c4a5SBart Van Assche struct writeback_control *wbc) 27811da177e4SLinus Torvalds { 27821420c4a5SBart Van Assche const enum req_op op = opf & REQ_OP_MASK; 27831da177e4SLinus Torvalds struct bio *bio; 27841da177e4SLinus Torvalds 27851da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh)); 27861da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh)); 27871da177e4SLinus Torvalds BUG_ON(!bh->b_end_io); 27888fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh)); 27898fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh)); 27901da177e4SLinus Torvalds 279148fd4f93SJens Axboe /* 279248fd4f93SJens Axboe * Only clear out a write error when rewriting 27931da177e4SLinus Torvalds */ 27942a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 27951da177e4SLinus Torvalds clear_buffer_write_io_error(bh); 27961da177e4SLinus Torvalds 279707888c66SChristoph Hellwig if (buffer_meta(bh)) 27981420c4a5SBart Van Assche opf |= REQ_META; 279907888c66SChristoph Hellwig if (buffer_prio(bh)) 28001420c4a5SBart Van Assche opf |= REQ_PRIO; 280107888c66SChristoph Hellwig 28021420c4a5SBart Van Assche bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 28031da177e4SLinus Torvalds 28044f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 28054f74d15fSEric Biggers 28064f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 280744981351SBart Van Assche bio->bi_write_hint = write_hint; 28081da177e4SLinus Torvalds 2809741af75dSJohannes Thumshirn __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 28101da177e4SLinus Torvalds 28111da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync; 28121da177e4SLinus Torvalds bio->bi_private = bh; 28131da177e4SLinus Torvalds 281483c9c547SMing Lei /* Take care of bh's that straddle the end of the device */ 281583c9c547SMing Lei guard_bio_eod(bio); 281683c9c547SMing Lei 2817fd42df30SDennis Zhou if (wbc) { 2818fd42df30SDennis Zhou wbc_init_bio(wbc, bio); 281934e51a5eSTejun Heo wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); 2820fd42df30SDennis Zhou } 2821fd42df30SDennis Zhou 28224e49ea4aSMike Christie submit_bio(bio); 28231da177e4SLinus Torvalds } 2824bafc0dbaSTejun Heo 28255bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh) 282671368511SDarrick J. Wong { 282744981351SBart Van Assche submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); 282871368511SDarrick J. Wong } 28291fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh); 28301da177e4SLinus Torvalds 28313ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 28329cb569d6SChristoph Hellwig { 28339cb569d6SChristoph Hellwig lock_buffer(bh); 28349cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) { 28359cb569d6SChristoph Hellwig unlock_buffer(bh); 28369cb569d6SChristoph Hellwig return; 28379cb569d6SChristoph Hellwig } 28389cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync; 28399cb569d6SChristoph Hellwig get_bh(bh); 28401420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | op_flags, bh); 28419cb569d6SChristoph Hellwig } 28429cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer); 28439cb569d6SChristoph Hellwig 28441da177e4SLinus Torvalds /* 28451da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O 28461da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on 28471da177e4SLinus Torvalds * the buffer_head. 28481da177e4SLinus Torvalds */ 28493ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 28501da177e4SLinus Torvalds { 28511da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1); 28521da177e4SLinus Torvalds lock_buffer(bh); 28531da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 2854377254b2SXianting Tian /* 2855377254b2SXianting Tian * The bh should be mapped, but it might not be if the 2856377254b2SXianting Tian * device was hot-removed. Not much we can do but fail the I/O. 2857377254b2SXianting Tian */ 2858377254b2SXianting Tian if (!buffer_mapped(bh)) { 2859377254b2SXianting Tian unlock_buffer(bh); 2860377254b2SXianting Tian return -EIO; 2861377254b2SXianting Tian } 2862377254b2SXianting Tian 28631da177e4SLinus Torvalds get_bh(bh); 28641da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync; 2865ab620620SRitesh Harjani (IBM) submit_bh(REQ_OP_WRITE | op_flags, bh); 28661da177e4SLinus Torvalds wait_on_buffer(bh); 2867ab620620SRitesh Harjani (IBM) if (!buffer_uptodate(bh)) 2868ab620620SRitesh Harjani (IBM) return -EIO; 28691da177e4SLinus Torvalds } else { 28701da177e4SLinus Torvalds unlock_buffer(bh); 28711da177e4SLinus Torvalds } 2872ab620620SRitesh Harjani (IBM) return 0; 28731da177e4SLinus Torvalds } 287487e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer); 287587e99511SChristoph Hellwig 287687e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh) 287787e99511SChristoph Hellwig { 287870fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC); 287987e99511SChristoph Hellwig } 28801fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer); 28811da177e4SLinus Torvalds 28821da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh) 28831da177e4SLinus Torvalds { 28841da177e4SLinus Torvalds return atomic_read(&bh->b_count) | 28851da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 28861da177e4SLinus Torvalds } 28871da177e4SLinus Torvalds 288864394763SMatthew Wilcox (Oracle) static bool 288964394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 28901da177e4SLinus Torvalds { 289164394763SMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio); 28921da177e4SLinus Torvalds struct buffer_head *bh; 28931da177e4SLinus Torvalds 28941da177e4SLinus Torvalds bh = head; 28951da177e4SLinus Torvalds do { 28961da177e4SLinus Torvalds if (buffer_busy(bh)) 28971da177e4SLinus Torvalds goto failed; 28981da177e4SLinus Torvalds bh = bh->b_this_page; 28991da177e4SLinus Torvalds } while (bh != head); 29001da177e4SLinus Torvalds 29011da177e4SLinus Torvalds do { 29021da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 29031da177e4SLinus Torvalds 2904535ee2fbSJan Kara if (bh->b_assoc_map) 29051da177e4SLinus Torvalds __remove_assoc_queue(bh); 29061da177e4SLinus Torvalds bh = next; 29071da177e4SLinus Torvalds } while (bh != head); 29081da177e4SLinus Torvalds *buffers_to_free = head; 290964394763SMatthew Wilcox (Oracle) folio_detach_private(folio); 291064394763SMatthew Wilcox (Oracle) return true; 29111da177e4SLinus Torvalds failed: 291264394763SMatthew Wilcox (Oracle) return false; 29131da177e4SLinus Torvalds } 29141da177e4SLinus Torvalds 2915b1888d14SMatthew Wilcox (Oracle) /** 2916b1888d14SMatthew Wilcox (Oracle) * try_to_free_buffers - Release buffers attached to this folio. 2917b1888d14SMatthew Wilcox (Oracle) * @folio: The folio. 2918b1888d14SMatthew Wilcox (Oracle) * 2919b1888d14SMatthew Wilcox (Oracle) * If any buffers are in use (dirty, under writeback, elevated refcount), 2920b1888d14SMatthew Wilcox (Oracle) * no buffers will be freed. 2921b1888d14SMatthew Wilcox (Oracle) * 2922b1888d14SMatthew Wilcox (Oracle) * If the folio is dirty but all the buffers are clean then we need to 2923b1888d14SMatthew Wilcox (Oracle) * be sure to mark the folio clean as well. This is because the folio 2924b1888d14SMatthew Wilcox (Oracle) * may be against a block device, and a later reattachment of buffers 2925b1888d14SMatthew Wilcox (Oracle) * to a dirty folio will set *all* buffers dirty. Which would corrupt 2926b1888d14SMatthew Wilcox (Oracle) * filesystem data on the same device. 2927b1888d14SMatthew Wilcox (Oracle) * 2928b1888d14SMatthew Wilcox (Oracle) * The same applies to regular filesystem folios: if all the buffers are 2929b1888d14SMatthew Wilcox (Oracle) * clean then we set the folio clean and proceed. To do that, we require 2930b1888d14SMatthew Wilcox (Oracle) * total exclusion from block_dirty_folio(). That is obtained with 2931b1888d14SMatthew Wilcox (Oracle) * i_private_lock. 2932b1888d14SMatthew Wilcox (Oracle) * 2933b1888d14SMatthew Wilcox (Oracle) * Exclusion against try_to_free_buffers may be obtained by either 2934b1888d14SMatthew Wilcox (Oracle) * locking the folio or by holding its mapping's i_private_lock. 2935b1888d14SMatthew Wilcox (Oracle) * 2936b1888d14SMatthew Wilcox (Oracle) * Context: Process context. @folio must be locked. Will not sleep. 2937b1888d14SMatthew Wilcox (Oracle) * Return: true if all buffers attached to this folio were freed. 2938b1888d14SMatthew Wilcox (Oracle) */ 293968189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio) 29401da177e4SLinus Torvalds { 294168189fefSMatthew Wilcox (Oracle) struct address_space * const mapping = folio->mapping; 29421da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL; 294368189fefSMatthew Wilcox (Oracle) bool ret = 0; 29441da177e4SLinus Torvalds 294568189fefSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 294668189fefSMatthew Wilcox (Oracle) if (folio_test_writeback(folio)) 294768189fefSMatthew Wilcox (Oracle) return false; 29481da177e4SLinus Torvalds 29491da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */ 295064394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free); 29511da177e4SLinus Torvalds goto out; 29521da177e4SLinus Torvalds } 29531da177e4SLinus Torvalds 2954600f111eSMatthew Wilcox (Oracle) spin_lock(&mapping->i_private_lock); 295564394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free); 2956ecdfc978SLinus Torvalds 2957ecdfc978SLinus Torvalds /* 2958ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3) 295968189fefSMatthew Wilcox (Oracle) * then we can have clean buffers against a dirty folio. We 296068189fefSMatthew Wilcox (Oracle) * clean the folio here; otherwise the VM will never notice 2961ecdfc978SLinus Torvalds * that the filesystem did any IO at all. 2962ecdfc978SLinus Torvalds * 2963ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all 296468189fefSMatthew Wilcox (Oracle) * the folio's buffers clean. We discover that here and clean 296568189fefSMatthew Wilcox (Oracle) * the folio also. 296687df7241SNick Piggin * 2967600f111eSMatthew Wilcox (Oracle) * i_private_lock must be held over this entire operation in order 2968e621900aSMatthew Wilcox (Oracle) * to synchronise against block_dirty_folio and prevent the 296987df7241SNick Piggin * dirty bit from being lost. 2970ecdfc978SLinus Torvalds */ 297111f81becSTejun Heo if (ret) 297268189fefSMatthew Wilcox (Oracle) folio_cancel_dirty(folio); 2973600f111eSMatthew Wilcox (Oracle) spin_unlock(&mapping->i_private_lock); 29741da177e4SLinus Torvalds out: 29751da177e4SLinus Torvalds if (buffers_to_free) { 29761da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free; 29771da177e4SLinus Torvalds 29781da177e4SLinus Torvalds do { 29791da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 29801da177e4SLinus Torvalds free_buffer_head(bh); 29811da177e4SLinus Torvalds bh = next; 29821da177e4SLinus Torvalds } while (bh != buffers_to_free); 29831da177e4SLinus Torvalds } 29841da177e4SLinus Torvalds return ret; 29851da177e4SLinus Torvalds } 29861da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers); 29871da177e4SLinus Torvalds 29881da177e4SLinus Torvalds /* 29891da177e4SLinus Torvalds * Buffer-head allocation 29901da177e4SLinus Torvalds */ 299168279f9cSAlexey Dobriyan static struct kmem_cache *bh_cachep __ro_after_init; 29921da177e4SLinus Torvalds 29931da177e4SLinus Torvalds /* 29941da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start 29951da177e4SLinus Torvalds * stripping them in writeback. 29961da177e4SLinus Torvalds */ 299768279f9cSAlexey Dobriyan static unsigned long max_buffer_heads __ro_after_init; 29981da177e4SLinus Torvalds 29991da177e4SLinus Torvalds int buffer_heads_over_limit; 30001da177e4SLinus Torvalds 30011da177e4SLinus Torvalds struct bh_accounting { 30021da177e4SLinus Torvalds int nr; /* Number of live bh's */ 30031da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */ 30041da177e4SLinus Torvalds }; 30051da177e4SLinus Torvalds 30061da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 30071da177e4SLinus Torvalds 30081da177e4SLinus Torvalds static void recalc_bh_state(void) 30091da177e4SLinus Torvalds { 30101da177e4SLinus Torvalds int i; 30111da177e4SLinus Torvalds int tot = 0; 30121da177e4SLinus Torvalds 3013ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 30141da177e4SLinus Torvalds return; 3015c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0); 30168a143426SEric Dumazet for_each_online_cpu(i) 30171da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr; 30181da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads); 30191da177e4SLinus Torvalds } 30201da177e4SLinus Torvalds 3021dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 30221da177e4SLinus Torvalds { 3023019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 30241da177e4SLinus Torvalds if (ret) { 3025a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers); 3026f1e67e35SThomas Gleixner spin_lock_init(&ret->b_uptodate_lock); 3027c7b92516SChristoph Lameter preempt_disable(); 3028c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr); 30291da177e4SLinus Torvalds recalc_bh_state(); 3030c7b92516SChristoph Lameter preempt_enable(); 30311da177e4SLinus Torvalds } 30321da177e4SLinus Torvalds return ret; 30331da177e4SLinus Torvalds } 30341da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head); 30351da177e4SLinus Torvalds 30361da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh) 30371da177e4SLinus Torvalds { 30381da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers)); 30391da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh); 3040c7b92516SChristoph Lameter preempt_disable(); 3041c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr); 30421da177e4SLinus Torvalds recalc_bh_state(); 3043c7b92516SChristoph Lameter preempt_enable(); 30441da177e4SLinus Torvalds } 30451da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head); 30461da177e4SLinus Torvalds 3047fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu) 30481da177e4SLinus Torvalds { 30491da177e4SLinus Torvalds int i; 30501da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu); 30511da177e4SLinus Torvalds 30521da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 30531da177e4SLinus Torvalds brelse(b->bhs[i]); 30541da177e4SLinus Torvalds b->bhs[i] = NULL; 30551da177e4SLinus Torvalds } 3056c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 30578a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0; 3058fc4d24c9SSebastian Andrzej Siewior return 0; 30591da177e4SLinus Torvalds } 30601da177e4SLinus Torvalds 3061389d1b08SAneesh Kumar K.V /** 3062a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate 3063389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3064389d1b08SAneesh Kumar K.V * 3065389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false, 3066389d1b08SAneesh Kumar K.V * with the buffer locked, if not. 3067389d1b08SAneesh Kumar K.V */ 3068389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh) 3069389d1b08SAneesh Kumar K.V { 3070389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) { 3071389d1b08SAneesh Kumar K.V lock_buffer(bh); 3072389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) 3073389d1b08SAneesh Kumar K.V return 0; 3074389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3075389d1b08SAneesh Kumar K.V } 3076389d1b08SAneesh Kumar K.V return 1; 3077389d1b08SAneesh Kumar K.V } 3078389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock); 3079389d1b08SAneesh Kumar K.V 3080389d1b08SAneesh Kumar K.V /** 3081fdee117eSZhang Yi * __bh_read - Submit read for a locked buffer 3082389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3083fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3084fdee117eSZhang Yi * @wait: wait until reading finish 3085389d1b08SAneesh Kumar K.V * 3086fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error. 3087389d1b08SAneesh Kumar K.V */ 3088fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3089389d1b08SAneesh Kumar K.V { 3090fdee117eSZhang Yi int ret = 0; 3091389d1b08SAneesh Kumar K.V 3092fdee117eSZhang Yi BUG_ON(!buffer_locked(bh)); 3093389d1b08SAneesh Kumar K.V 3094389d1b08SAneesh Kumar K.V get_bh(bh); 3095389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync; 3096fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh); 3097fdee117eSZhang Yi if (wait) { 3098389d1b08SAneesh Kumar K.V wait_on_buffer(bh); 3099fdee117eSZhang Yi if (!buffer_uptodate(bh)) 3100fdee117eSZhang Yi ret = -EIO; 3101389d1b08SAneesh Kumar K.V } 3102fdee117eSZhang Yi return ret; 3103fdee117eSZhang Yi } 3104fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read); 3105fdee117eSZhang Yi 3106fdee117eSZhang Yi /** 3107fdee117eSZhang Yi * __bh_read_batch - Submit read for a batch of unlocked buffers 3108fdee117eSZhang Yi * @nr: entry number of the buffer batch 3109fdee117eSZhang Yi * @bhs: a batch of struct buffer_head 3110fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3111fdee117eSZhang Yi * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3112fdee117eSZhang Yi * buffer that cannot lock. 3113fdee117eSZhang Yi * 3114fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error. 3115fdee117eSZhang Yi */ 3116fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[], 3117fdee117eSZhang Yi blk_opf_t op_flags, bool force_lock) 3118fdee117eSZhang Yi { 3119fdee117eSZhang Yi int i; 3120fdee117eSZhang Yi 3121fdee117eSZhang Yi for (i = 0; i < nr; i++) { 3122fdee117eSZhang Yi struct buffer_head *bh = bhs[i]; 3123fdee117eSZhang Yi 3124fdee117eSZhang Yi if (buffer_uptodate(bh)) 3125fdee117eSZhang Yi continue; 3126fdee117eSZhang Yi 3127fdee117eSZhang Yi if (force_lock) 3128fdee117eSZhang Yi lock_buffer(bh); 3129fdee117eSZhang Yi else 3130fdee117eSZhang Yi if (!trylock_buffer(bh)) 3131fdee117eSZhang Yi continue; 3132fdee117eSZhang Yi 3133fdee117eSZhang Yi if (buffer_uptodate(bh)) { 3134fdee117eSZhang Yi unlock_buffer(bh); 3135fdee117eSZhang Yi continue; 3136fdee117eSZhang Yi } 3137fdee117eSZhang Yi 3138fdee117eSZhang Yi bh->b_end_io = end_buffer_read_sync; 3139fdee117eSZhang Yi get_bh(bh); 3140fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh); 3141fdee117eSZhang Yi } 3142fdee117eSZhang Yi } 3143fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch); 3144389d1b08SAneesh Kumar K.V 31451da177e4SLinus Torvalds void __init buffer_init(void) 31461da177e4SLinus Torvalds { 314743be594aSZhang Yanfei unsigned long nrpages; 3148fc4d24c9SSebastian Andrzej Siewior int ret; 31491da177e4SLinus Torvalds 3150de8a3207SKunwu Chan bh_cachep = KMEM_CACHE(buffer_head, 3151c997d683SChengming Zhou SLAB_RECLAIM_ACCOUNT|SLAB_PANIC); 31521da177e4SLinus Torvalds /* 31531da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL 31541da177e4SLinus Torvalds */ 31551da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100; 31561da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3157fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3158fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead); 3159fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0); 31601da177e4SLinus Torvalds } 3161