1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/fs/buffer.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 91da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that 121da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating 151da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 161da177e4SLinus Torvalds * 171da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK 181da177e4SLinus Torvalds * 191da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 201da177e4SLinus Torvalds */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include <linux/kernel.h> 23f361bf4aSIngo Molnar #include <linux/sched/signal.h> 241da177e4SLinus Torvalds #include <linux/syscalls.h> 251da177e4SLinus Torvalds #include <linux/fs.h> 26ae259a9cSChristoph Hellwig #include <linux/iomap.h> 271da177e4SLinus Torvalds #include <linux/mm.h> 281da177e4SLinus Torvalds #include <linux/percpu.h> 291da177e4SLinus Torvalds #include <linux/slab.h> 3016f7e0feSRandy Dunlap #include <linux/capability.h> 311da177e4SLinus Torvalds #include <linux/blkdev.h> 321da177e4SLinus Torvalds #include <linux/file.h> 331da177e4SLinus Torvalds #include <linux/quotaops.h> 341da177e4SLinus Torvalds #include <linux/highmem.h> 35630d9c47SPaul Gortmaker #include <linux/export.h> 36bafc0dbaSTejun Heo #include <linux/backing-dev.h> 371da177e4SLinus Torvalds #include <linux/writeback.h> 381da177e4SLinus Torvalds #include <linux/hash.h> 391da177e4SLinus Torvalds #include <linux/suspend.h> 401da177e4SLinus Torvalds #include <linux/buffer_head.h> 4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h> 421da177e4SLinus Torvalds #include <linux/bio.h> 431da177e4SLinus Torvalds #include <linux/cpu.h> 441da177e4SLinus Torvalds #include <linux/bitops.h> 451da177e4SLinus Torvalds #include <linux/mpage.h> 46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h> 4729f3ad7dSJan Kara #include <linux/pagevec.h> 48f745c6f5SShakeel Butt #include <linux/sched/mm.h> 495305cb83STejun Heo #include <trace/events/block.h> 5031fb992cSEric Biggers #include <linux/fscrypt.h> 511da177e4SLinus Torvalds 522b211dc0SBen Dooks #include "internal.h" 532b211dc0SBen Dooks 541da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 552a222ca9SMike Christie static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 56c75e707fSChristoph Hellwig struct writeback_control *wbc); 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 591da177e4SLinus Torvalds 60f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh) 61f0059afdSTejun Heo { 625305cb83STejun Heo trace_block_touch_buffer(bh); 63f0059afdSTejun Heo mark_page_accessed(bh->b_page); 64f0059afdSTejun Heo } 65f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer); 66f0059afdSTejun Heo 67fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh) 681da177e4SLinus Torvalds { 6974316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 701da177e4SLinus Torvalds } 711da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer); 721da177e4SLinus Torvalds 73fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh) 741da177e4SLinus Torvalds { 7551b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state); 764e857c58SPeter Zijlstra smp_mb__after_atomic(); 771da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock); 781da177e4SLinus Torvalds } 791fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer); 801da177e4SLinus Torvalds 811da177e4SLinus Torvalds /* 82b4597226SMel Gorman * Returns if the page has dirty or writeback buffers. If all the buffers 83b4597226SMel Gorman * are unlocked and clean then the PageDirty information is stale. If 84b4597226SMel Gorman * any of the pages are locked, it is assumed they are locked for IO. 85b4597226SMel Gorman */ 86b4597226SMel Gorman void buffer_check_dirty_writeback(struct page *page, 87b4597226SMel Gorman bool *dirty, bool *writeback) 88b4597226SMel Gorman { 89b4597226SMel Gorman struct buffer_head *head, *bh; 90b4597226SMel Gorman *dirty = false; 91b4597226SMel Gorman *writeback = false; 92b4597226SMel Gorman 93b4597226SMel Gorman BUG_ON(!PageLocked(page)); 94b4597226SMel Gorman 95b4597226SMel Gorman if (!page_has_buffers(page)) 96b4597226SMel Gorman return; 97b4597226SMel Gorman 98b4597226SMel Gorman if (PageWriteback(page)) 99b4597226SMel Gorman *writeback = true; 100b4597226SMel Gorman 101b4597226SMel Gorman head = page_buffers(page); 102b4597226SMel Gorman bh = head; 103b4597226SMel Gorman do { 104b4597226SMel Gorman if (buffer_locked(bh)) 105b4597226SMel Gorman *writeback = true; 106b4597226SMel Gorman 107b4597226SMel Gorman if (buffer_dirty(bh)) 108b4597226SMel Gorman *dirty = true; 109b4597226SMel Gorman 110b4597226SMel Gorman bh = bh->b_this_page; 111b4597226SMel Gorman } while (bh != head); 112b4597226SMel Gorman } 113b4597226SMel Gorman EXPORT_SYMBOL(buffer_check_dirty_writeback); 114b4597226SMel Gorman 115b4597226SMel Gorman /* 1161da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it 1171da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself 1181da177e4SLinus Torvalds * if you want to preserve its state. 1191da177e4SLinus Torvalds */ 1201da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh) 1211da177e4SLinus Torvalds { 12274316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 1231da177e4SLinus Torvalds } 1241fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer); 1251da177e4SLinus Torvalds 126b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg) 1271da177e4SLinus Torvalds { 128432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state)) 129432f16e6SRobert Elliott printk_ratelimited(KERN_ERR 130a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n", 131a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds /* 13568671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after 13668671f35SDmitry Monakhov * unlocking it. 13768671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 13868671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for 13968671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh 14068671f35SDmitry Monakhov * itself. 1411da177e4SLinus Torvalds */ 14268671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 1431da177e4SLinus Torvalds { 1441da177e4SLinus Torvalds if (uptodate) { 1451da177e4SLinus Torvalds set_buffer_uptodate(bh); 1461da177e4SLinus Torvalds } else { 14770246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */ 1481da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds unlock_buffer(bh); 15168671f35SDmitry Monakhov } 15268671f35SDmitry Monakhov 15368671f35SDmitry Monakhov /* 15468671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and 15568671f35SDmitry Monakhov * unlock the buffer. This is what ll_rw_block uses too. 15668671f35SDmitry Monakhov */ 15768671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 15868671f35SDmitry Monakhov { 15968671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 1601da177e4SLinus Torvalds put_bh(bh); 1611da177e4SLinus Torvalds } 1621fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync); 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 1651da177e4SLinus Torvalds { 1661da177e4SLinus Torvalds if (uptodate) { 1671da177e4SLinus Torvalds set_buffer_uptodate(bh); 1681da177e4SLinus Torvalds } else { 169b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write"); 17087354e5dSJeff Layton mark_buffer_write_io_error(bh); 1711da177e4SLinus Torvalds clear_buffer_uptodate(bh); 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds unlock_buffer(bh); 1741da177e4SLinus Torvalds put_bh(bh); 1751da177e4SLinus Torvalds } 1761fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync); 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /* 1791da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking. 1801da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this, 1811da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's 1821da177e4SLinus Torvalds * private_lock. 1831da177e4SLinus Torvalds * 184b93b0163SMatthew Wilcox * Hack idea: for the blockdev mapping, private_lock contention 1851da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that 186b93b0163SMatthew Wilcox * succeeds, there is no need to take private_lock. 1871da177e4SLinus Torvalds */ 1881da177e4SLinus Torvalds static struct buffer_head * 189385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block) 1901da177e4SLinus Torvalds { 1911da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode; 1921da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping; 1931da177e4SLinus Torvalds struct buffer_head *ret = NULL; 1941da177e4SLinus Torvalds pgoff_t index; 1951da177e4SLinus Torvalds struct buffer_head *bh; 1961da177e4SLinus Torvalds struct buffer_head *head; 1971da177e4SLinus Torvalds struct page *page; 1981da177e4SLinus Torvalds int all_mapped = 1; 19943636c80STetsuo Handa static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 2001da177e4SLinus Torvalds 20109cbfeafSKirill A. Shutemov index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 2022457aec6SMel Gorman page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); 2031da177e4SLinus Torvalds if (!page) 2041da177e4SLinus Torvalds goto out; 2051da177e4SLinus Torvalds 2061da177e4SLinus Torvalds spin_lock(&bd_mapping->private_lock); 2071da177e4SLinus Torvalds if (!page_has_buffers(page)) 2081da177e4SLinus Torvalds goto out_unlock; 2091da177e4SLinus Torvalds head = page_buffers(page); 2101da177e4SLinus Torvalds bh = head; 2111da177e4SLinus Torvalds do { 21297f76d3dSNikanth Karthikesan if (!buffer_mapped(bh)) 21397f76d3dSNikanth Karthikesan all_mapped = 0; 21497f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) { 2151da177e4SLinus Torvalds ret = bh; 2161da177e4SLinus Torvalds get_bh(bh); 2171da177e4SLinus Torvalds goto out_unlock; 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds bh = bh->b_this_page; 2201da177e4SLinus Torvalds } while (bh != head); 2211da177e4SLinus Torvalds 2221da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are 2231da177e4SLinus Torvalds * not mapped. This is due to various races between 2241da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with 2251da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers 2261da177e4SLinus Torvalds */ 22743636c80STetsuo Handa ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 22843636c80STetsuo Handa if (all_mapped && __ratelimit(&last_warned)) { 22943636c80STetsuo Handa printk("__find_get_block_slow() failed. block=%llu, " 23043636c80STetsuo Handa "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 23143636c80STetsuo Handa "device %pg blocksize: %d\n", 232205f87f6SBadari Pulavarty (unsigned long long)block, 23343636c80STetsuo Handa (unsigned long long)bh->b_blocknr, 23443636c80STetsuo Handa bh->b_state, bh->b_size, bdev, 23572a2ebd8STao Ma 1 << bd_inode->i_blkbits); 2361da177e4SLinus Torvalds } 2371da177e4SLinus Torvalds out_unlock: 2381da177e4SLinus Torvalds spin_unlock(&bd_mapping->private_lock); 23909cbfeafSKirill A. Shutemov put_page(page); 2401da177e4SLinus Torvalds out: 2411da177e4SLinus Torvalds return ret; 2421da177e4SLinus Torvalds } 2431da177e4SLinus Torvalds 2441da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 2451da177e4SLinus Torvalds { 2461da177e4SLinus Torvalds unsigned long flags; 247a3972203SNick Piggin struct buffer_head *first; 2481da177e4SLinus Torvalds struct buffer_head *tmp; 2491da177e4SLinus Torvalds struct page *page; 2501da177e4SLinus Torvalds int page_uptodate = 1; 2511da177e4SLinus Torvalds 2521da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh)); 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds page = bh->b_page; 2551da177e4SLinus Torvalds if (uptodate) { 2561da177e4SLinus Torvalds set_buffer_uptodate(bh); 2571da177e4SLinus Torvalds } else { 2581da177e4SLinus Torvalds clear_buffer_uptodate(bh); 259b744c2acSRobert Elliott buffer_io_error(bh, ", async page read"); 2601da177e4SLinus Torvalds SetPageError(page); 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds /* 2641da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if 2651da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both 2661da177e4SLinus Torvalds * decide that the page is now completely done. 2671da177e4SLinus Torvalds */ 268a3972203SNick Piggin first = page_buffers(page); 269f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags); 2701da177e4SLinus Torvalds clear_buffer_async_read(bh); 2711da177e4SLinus Torvalds unlock_buffer(bh); 2721da177e4SLinus Torvalds tmp = bh; 2731da177e4SLinus Torvalds do { 2741da177e4SLinus Torvalds if (!buffer_uptodate(tmp)) 2751da177e4SLinus Torvalds page_uptodate = 0; 2761da177e4SLinus Torvalds if (buffer_async_read(tmp)) { 2771da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 2781da177e4SLinus Torvalds goto still_busy; 2791da177e4SLinus Torvalds } 2801da177e4SLinus Torvalds tmp = tmp->b_this_page; 2811da177e4SLinus Torvalds } while (tmp != bh); 282f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds /* 2851da177e4SLinus Torvalds * If none of the buffers had errors and they are all 2861da177e4SLinus Torvalds * uptodate then we can set the page uptodate. 2871da177e4SLinus Torvalds */ 2881da177e4SLinus Torvalds if (page_uptodate && !PageError(page)) 2891da177e4SLinus Torvalds SetPageUptodate(page); 2901da177e4SLinus Torvalds unlock_page(page); 2911da177e4SLinus Torvalds return; 2921da177e4SLinus Torvalds 2931da177e4SLinus Torvalds still_busy: 294f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 2951da177e4SLinus Torvalds return; 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds 29831fb992cSEric Biggers struct decrypt_bh_ctx { 29931fb992cSEric Biggers struct work_struct work; 30031fb992cSEric Biggers struct buffer_head *bh; 30131fb992cSEric Biggers }; 30231fb992cSEric Biggers 30331fb992cSEric Biggers static void decrypt_bh(struct work_struct *work) 30431fb992cSEric Biggers { 30531fb992cSEric Biggers struct decrypt_bh_ctx *ctx = 30631fb992cSEric Biggers container_of(work, struct decrypt_bh_ctx, work); 30731fb992cSEric Biggers struct buffer_head *bh = ctx->bh; 30831fb992cSEric Biggers int err; 30931fb992cSEric Biggers 31031fb992cSEric Biggers err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size, 31131fb992cSEric Biggers bh_offset(bh)); 31231fb992cSEric Biggers end_buffer_async_read(bh, err == 0); 31331fb992cSEric Biggers kfree(ctx); 31431fb992cSEric Biggers } 31531fb992cSEric Biggers 31631fb992cSEric Biggers /* 31731fb992cSEric Biggers * I/O completion handler for block_read_full_page() - pages 31831fb992cSEric Biggers * which come unlocked at the end of I/O. 31931fb992cSEric Biggers */ 32031fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 32131fb992cSEric Biggers { 32231fb992cSEric Biggers /* Decrypt if needed */ 3234f74d15fSEric Biggers if (uptodate && 3244f74d15fSEric Biggers fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { 32531fb992cSEric Biggers struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); 32631fb992cSEric Biggers 32731fb992cSEric Biggers if (ctx) { 32831fb992cSEric Biggers INIT_WORK(&ctx->work, decrypt_bh); 32931fb992cSEric Biggers ctx->bh = bh; 33031fb992cSEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work); 33131fb992cSEric Biggers return; 33231fb992cSEric Biggers } 33331fb992cSEric Biggers uptodate = 0; 33431fb992cSEric Biggers } 33531fb992cSEric Biggers end_buffer_async_read(bh, uptodate); 33631fb992cSEric Biggers } 33731fb992cSEric Biggers 3381da177e4SLinus Torvalds /* 3391da177e4SLinus Torvalds * Completion handler for block_write_full_page() - pages which are unlocked 3401da177e4SLinus Torvalds * during I/O, and which have PageWriteback cleared upon I/O completion. 3411da177e4SLinus Torvalds */ 34235c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate) 3431da177e4SLinus Torvalds { 3441da177e4SLinus Torvalds unsigned long flags; 345a3972203SNick Piggin struct buffer_head *first; 3461da177e4SLinus Torvalds struct buffer_head *tmp; 3471da177e4SLinus Torvalds struct page *page; 3481da177e4SLinus Torvalds 3491da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh)); 3501da177e4SLinus Torvalds 3511da177e4SLinus Torvalds page = bh->b_page; 3521da177e4SLinus Torvalds if (uptodate) { 3531da177e4SLinus Torvalds set_buffer_uptodate(bh); 3541da177e4SLinus Torvalds } else { 355b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write"); 35687354e5dSJeff Layton mark_buffer_write_io_error(bh); 3571da177e4SLinus Torvalds clear_buffer_uptodate(bh); 3581da177e4SLinus Torvalds SetPageError(page); 3591da177e4SLinus Torvalds } 3601da177e4SLinus Torvalds 361a3972203SNick Piggin first = page_buffers(page); 362f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags); 363a3972203SNick Piggin 3641da177e4SLinus Torvalds clear_buffer_async_write(bh); 3651da177e4SLinus Torvalds unlock_buffer(bh); 3661da177e4SLinus Torvalds tmp = bh->b_this_page; 3671da177e4SLinus Torvalds while (tmp != bh) { 3681da177e4SLinus Torvalds if (buffer_async_write(tmp)) { 3691da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp)); 3701da177e4SLinus Torvalds goto still_busy; 3711da177e4SLinus Torvalds } 3721da177e4SLinus Torvalds tmp = tmp->b_this_page; 3731da177e4SLinus Torvalds } 374f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 3751da177e4SLinus Torvalds end_page_writeback(page); 3761da177e4SLinus Torvalds return; 3771da177e4SLinus Torvalds 3781da177e4SLinus Torvalds still_busy: 379f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 3801da177e4SLinus Torvalds return; 3811da177e4SLinus Torvalds } 3821fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write); 3831da177e4SLinus Torvalds 3841da177e4SLinus Torvalds /* 3851da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read 3861da177e4SLinus Torvalds * completion) then there is a possibility that another thread of 3871da177e4SLinus Torvalds * control could lock one of the buffers after it has completed 3881da177e4SLinus Torvalds * but while some of the other buffers have not completed. This 3891da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking 3901da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 3911da177e4SLinus Torvalds * that this buffer is not under async I/O. 3921da177e4SLinus Torvalds * 3931da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers 3941da177e4SLinus Torvalds * left. 3951da177e4SLinus Torvalds * 3961da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of 3971da177e4SLinus Torvalds * the buffers. 3981da177e4SLinus Torvalds * 3991da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same 4001da177e4SLinus Torvalds * page. 4011da177e4SLinus Torvalds * 4021da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is 4031da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page). 4041da177e4SLinus Torvalds */ 4051da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh) 4061da177e4SLinus Torvalds { 40731fb992cSEric Biggers bh->b_end_io = end_buffer_async_read_io; 4081da177e4SLinus Torvalds set_buffer_async_read(bh); 4091da177e4SLinus Torvalds } 4101da177e4SLinus Torvalds 4111fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh, 41235c80d5fSChris Mason bh_end_io_t *handler) 41335c80d5fSChris Mason { 41435c80d5fSChris Mason bh->b_end_io = handler; 41535c80d5fSChris Mason set_buffer_async_write(bh); 41635c80d5fSChris Mason } 41735c80d5fSChris Mason 4181da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh) 4191da177e4SLinus Torvalds { 42035c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write); 4211da177e4SLinus Torvalds } 4221da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write); 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds 4251da177e4SLinus Torvalds /* 4261da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's 4271da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is 4281da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for 4291da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be 4301da177e4SLinus Torvalds * written back and waited upon before fsync() returns. 4311da177e4SLinus Torvalds * 4321da177e4SLinus Torvalds * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 4331da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the 4341da177e4SLinus Torvalds * management of a list of dependent buffers at ->i_mapping->private_list. 4351da177e4SLinus Torvalds * 4361da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers 4371da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But 4381da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping 4391da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers. 4401da177e4SLinus Torvalds * So the locking for private_list is via the private_lock in the address_space 4411da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space 4421da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space, 4431da177e4SLinus Torvalds * mapping->private_lock does *not* protect mapping->private_list! In fact, 4441da177e4SLinus Torvalds * mapping->private_list will always be protected by the backing blockdev's 4451da177e4SLinus Torvalds * ->private_lock. 4461da177e4SLinus Torvalds * 4471da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's 4481da177e4SLinus Torvalds * ->private_list must be from the same address_space: the blockdev's. 4491da177e4SLinus Torvalds * 4501da177e4SLinus Torvalds * address_spaces which do not place buffers at ->private_list via these 4511da177e4SLinus Torvalds * utility functions are free to use private_lock and private_list for 4521da177e4SLinus Torvalds * whatever they want. The only requirement is that list_empty(private_list) 4531da177e4SLinus Torvalds * be true at clear_inode() time. 4541da177e4SLinus Torvalds * 4551da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The 4561da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go 4571da177e4SLinus Torvalds * BUG_ON(!list_empty). 4581da177e4SLinus Torvalds * 4591da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 4601da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called 4611da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being 4621da177e4SLinus Torvalds * queued up. 4631da177e4SLinus Torvalds * 4641da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 4651da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list, 4661da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being 4671da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure 4681da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed 4691da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all 4701da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing 4711da177e4SLinus Torvalds * b_inode back. 4721da177e4SLinus Torvalds */ 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds /* 4751da177e4SLinus Torvalds * The buffer's backing address_space's private_lock must be held 4761da177e4SLinus Torvalds */ 477dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh) 4781da177e4SLinus Torvalds { 4791da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 48058ff407bSJan Kara WARN_ON(!bh->b_assoc_map); 48158ff407bSJan Kara bh->b_assoc_map = NULL; 4821da177e4SLinus Torvalds } 4831da177e4SLinus Torvalds 4841da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode) 4851da177e4SLinus Torvalds { 4861da177e4SLinus Torvalds return !list_empty(&inode->i_data.private_list); 4871da177e4SLinus Torvalds } 4881da177e4SLinus Torvalds 4891da177e4SLinus Torvalds /* 4901da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for 4911da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new 4921da177e4SLinus Torvalds * writes to the disk. 4931da177e4SLinus Torvalds * 4941da177e4SLinus Torvalds * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as 4951da177e4SLinus Torvalds * you dirty the buffers, and then use osync_inode_buffers to wait for 4961da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for 4971da177e4SLinus Torvalds * write will not be flushed to disk by the osync. 4981da177e4SLinus Torvalds */ 4991da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 5001da177e4SLinus Torvalds { 5011da177e4SLinus Torvalds struct buffer_head *bh; 5021da177e4SLinus Torvalds struct list_head *p; 5031da177e4SLinus Torvalds int err = 0; 5041da177e4SLinus Torvalds 5051da177e4SLinus Torvalds spin_lock(lock); 5061da177e4SLinus Torvalds repeat: 5071da177e4SLinus Torvalds list_for_each_prev(p, list) { 5081da177e4SLinus Torvalds bh = BH_ENTRY(p); 5091da177e4SLinus Torvalds if (buffer_locked(bh)) { 5101da177e4SLinus Torvalds get_bh(bh); 5111da177e4SLinus Torvalds spin_unlock(lock); 5121da177e4SLinus Torvalds wait_on_buffer(bh); 5131da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 5141da177e4SLinus Torvalds err = -EIO; 5151da177e4SLinus Torvalds brelse(bh); 5161da177e4SLinus Torvalds spin_lock(lock); 5171da177e4SLinus Torvalds goto repeat; 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds spin_unlock(lock); 5211da177e4SLinus Torvalds return err; 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds 52408fdc8a0SMateusz Guzik void emergency_thaw_bdev(struct super_block *sb) 525c2d75438SEric Sandeen { 526040f04bdSChristoph Hellwig while (sb->s_bdev && !thaw_bdev(sb->s_bdev)) 527a1c6f057SDmitry Monakhov printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 528c2d75438SEric Sandeen } 52901a05b33SAl Viro 5301da177e4SLinus Torvalds /** 53178a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 53267be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written 5331da177e4SLinus Torvalds * 5341da177e4SLinus Torvalds * Starts I/O against the buffers at mapping->private_list, and waits upon 5351da177e4SLinus Torvalds * that I/O. 5361da177e4SLinus Torvalds * 53767be2dd1SMartin Waitz * Basically, this is a convenience function for fsync(). 53867be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for 53967be2dd1SMartin Waitz * a successful fsync(). 5401da177e4SLinus Torvalds */ 5411da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping) 5421da177e4SLinus Torvalds { 543252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 5441da177e4SLinus Torvalds 5451da177e4SLinus Torvalds if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 5461da177e4SLinus Torvalds return 0; 5471da177e4SLinus Torvalds 5481da177e4SLinus Torvalds return fsync_buffers_list(&buffer_mapping->private_lock, 5491da177e4SLinus Torvalds &mapping->private_list); 5501da177e4SLinus Torvalds } 5511da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers); 5521da177e4SLinus Torvalds 5531da177e4SLinus Torvalds /* 5541da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that 5551da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at 5561da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 5571da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data. 5581da177e4SLinus Torvalds */ 5591da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev, 5601da177e4SLinus Torvalds sector_t bblock, unsigned blocksize) 5611da177e4SLinus Torvalds { 5621da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 5631da177e4SLinus Torvalds if (bh) { 5641da177e4SLinus Torvalds if (buffer_dirty(bh)) 565dfec8a14SMike Christie ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 5661da177e4SLinus Torvalds put_bh(bh); 5671da177e4SLinus Torvalds } 5681da177e4SLinus Torvalds } 5691da177e4SLinus Torvalds 5701da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 5711da177e4SLinus Torvalds { 5721da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 5731da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds mark_buffer_dirty(bh); 576252aa6f5SRafael Aquini if (!mapping->private_data) { 577252aa6f5SRafael Aquini mapping->private_data = buffer_mapping; 5781da177e4SLinus Torvalds } else { 579252aa6f5SRafael Aquini BUG_ON(mapping->private_data != buffer_mapping); 5801da177e4SLinus Torvalds } 581535ee2fbSJan Kara if (!bh->b_assoc_map) { 5821da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 5831da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers, 5841da177e4SLinus Torvalds &mapping->private_list); 58558ff407bSJan Kara bh->b_assoc_map = mapping; 5861da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 5871da177e4SLinus Torvalds } 5881da177e4SLinus Torvalds } 5891da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode); 5901da177e4SLinus Torvalds 5911da177e4SLinus Torvalds /* 5921da177e4SLinus Torvalds * Add a page to the dirty page list. 5931da177e4SLinus Torvalds * 5941da177e4SLinus Torvalds * It is a sad fact of life that this function is called from several places 5951da177e4SLinus Torvalds * deeply under spinlocking. It may not sleep. 5961da177e4SLinus Torvalds * 5971da177e4SLinus Torvalds * If the page has buffers, the uptodate buffers are set dirty, to preserve 5981da177e4SLinus Torvalds * dirty-state coherency between the page and the buffers. It the page does 5991da177e4SLinus Torvalds * not have buffers then when they are later attached they will all be set 6001da177e4SLinus Torvalds * dirty. 6011da177e4SLinus Torvalds * 6021da177e4SLinus Torvalds * The buffers are dirtied before the page is dirtied. There's a small race 6031da177e4SLinus Torvalds * window in which a writepage caller may see the page cleanness but not the 6041da177e4SLinus Torvalds * buffer dirtiness. That's fine. If this code were to set the page dirty 6051da177e4SLinus Torvalds * before the buffers, a concurrent writepage caller could clear the page dirty 6061da177e4SLinus Torvalds * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 6071da177e4SLinus Torvalds * page on the dirty page list. 6081da177e4SLinus Torvalds * 6091da177e4SLinus Torvalds * We use private_lock to lock against try_to_free_buffers while using the 6101da177e4SLinus Torvalds * page's buffer list. Also use this to protect against clean buffers being 6111da177e4SLinus Torvalds * added to the page after it was set dirty. 6121da177e4SLinus Torvalds * 6131da177e4SLinus Torvalds * FIXME: may need to call ->reservepage here as well. That's rather up to the 6141da177e4SLinus Torvalds * address_space though. 6151da177e4SLinus Torvalds */ 616e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 6171da177e4SLinus Torvalds { 618e621900aSMatthew Wilcox (Oracle) struct buffer_head *head; 619e621900aSMatthew Wilcox (Oracle) bool newly_dirty; 6201da177e4SLinus Torvalds 6211da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 622e621900aSMatthew Wilcox (Oracle) head = folio_buffers(folio); 623e621900aSMatthew Wilcox (Oracle) if (head) { 6241da177e4SLinus Torvalds struct buffer_head *bh = head; 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds do { 6271da177e4SLinus Torvalds set_buffer_dirty(bh); 6281da177e4SLinus Torvalds bh = bh->b_this_page; 6291da177e4SLinus Torvalds } while (bh != head); 6301da177e4SLinus Torvalds } 631c4843a75SGreg Thelen /* 632bcfe06bfSRoman Gushchin * Lock out page's memcg migration to keep PageDirty 63381f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters. 634c4843a75SGreg Thelen */ 635e621900aSMatthew Wilcox (Oracle) folio_memcg_lock(folio); 636e621900aSMatthew Wilcox (Oracle) newly_dirty = !folio_test_set_dirty(folio); 6371da177e4SLinus Torvalds spin_unlock(&mapping->private_lock); 6381da177e4SLinus Torvalds 639a8e7d49aSLinus Torvalds if (newly_dirty) 640e621900aSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 1); 641c4843a75SGreg Thelen 642e621900aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio); 643c4843a75SGreg Thelen 644c4843a75SGreg Thelen if (newly_dirty) 645c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 646c4843a75SGreg Thelen 647a8e7d49aSLinus Torvalds return newly_dirty; 6481da177e4SLinus Torvalds } 649e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio); 6501da177e4SLinus Torvalds 6511da177e4SLinus Torvalds /* 6521da177e4SLinus Torvalds * Write out and wait upon a list of buffers. 6531da177e4SLinus Torvalds * 6541da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all 6551da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently 6561da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last 6571da177e4SLinus Torvalds * forever if somebody is actively writing to the file. 6581da177e4SLinus Torvalds * 6591da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a 6601da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean 6611da177e4SLinus Torvalds * up, waiting for those writes to complete. 6621da177e4SLinus Torvalds * 6631da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end 6641da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so 6651da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but 6661da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through 6671da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing 6681da177e4SLinus Torvalds * any newly dirty buffers for write. 6691da177e4SLinus Torvalds */ 6701da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 6711da177e4SLinus Torvalds { 6721da177e4SLinus Torvalds struct buffer_head *bh; 6731da177e4SLinus Torvalds struct list_head tmp; 6747eaceaccSJens Axboe struct address_space *mapping; 6751da177e4SLinus Torvalds int err = 0, err2; 6764ee2491eSJens Axboe struct blk_plug plug; 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp); 6794ee2491eSJens Axboe blk_start_plug(&plug); 6801da177e4SLinus Torvalds 6811da177e4SLinus Torvalds spin_lock(lock); 6821da177e4SLinus Torvalds while (!list_empty(list)) { 6831da177e4SLinus Torvalds bh = BH_ENTRY(list->next); 684535ee2fbSJan Kara mapping = bh->b_assoc_map; 68558ff407bSJan Kara __remove_assoc_queue(bh); 686535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 687535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 688535ee2fbSJan Kara smp_mb(); 6891da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) { 6901da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp); 691535ee2fbSJan Kara bh->b_assoc_map = mapping; 6921da177e4SLinus Torvalds if (buffer_dirty(bh)) { 6931da177e4SLinus Torvalds get_bh(bh); 6941da177e4SLinus Torvalds spin_unlock(lock); 6951da177e4SLinus Torvalds /* 6961da177e4SLinus Torvalds * Ensure any pending I/O completes so that 6979cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the 6989cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is 6999cb569d6SChristoph Hellwig * still in flight on potentially older 7009cb569d6SChristoph Hellwig * contents. 7011da177e4SLinus Torvalds */ 70270fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC); 7039cf6b720SJens Axboe 7049cf6b720SJens Axboe /* 7059cf6b720SJens Axboe * Kick off IO for the previous mapping. Note 7069cf6b720SJens Axboe * that we will not run the very last mapping, 7079cf6b720SJens Axboe * wait_on_buffer() will do that for us 7089cf6b720SJens Axboe * through sync_buffer(). 7099cf6b720SJens Axboe */ 7101da177e4SLinus Torvalds brelse(bh); 7111da177e4SLinus Torvalds spin_lock(lock); 7121da177e4SLinus Torvalds } 7131da177e4SLinus Torvalds } 7141da177e4SLinus Torvalds } 7151da177e4SLinus Torvalds 7164ee2491eSJens Axboe spin_unlock(lock); 7174ee2491eSJens Axboe blk_finish_plug(&plug); 7184ee2491eSJens Axboe spin_lock(lock); 7194ee2491eSJens Axboe 7201da177e4SLinus Torvalds while (!list_empty(&tmp)) { 7211da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev); 7221da177e4SLinus Torvalds get_bh(bh); 723535ee2fbSJan Kara mapping = bh->b_assoc_map; 724535ee2fbSJan Kara __remove_assoc_queue(bh); 725535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does 726535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */ 727535ee2fbSJan Kara smp_mb(); 728535ee2fbSJan Kara if (buffer_dirty(bh)) { 729535ee2fbSJan Kara list_add(&bh->b_assoc_buffers, 730e3892296SJan Kara &mapping->private_list); 731535ee2fbSJan Kara bh->b_assoc_map = mapping; 732535ee2fbSJan Kara } 7331da177e4SLinus Torvalds spin_unlock(lock); 7341da177e4SLinus Torvalds wait_on_buffer(bh); 7351da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 7361da177e4SLinus Torvalds err = -EIO; 7371da177e4SLinus Torvalds brelse(bh); 7381da177e4SLinus Torvalds spin_lock(lock); 7391da177e4SLinus Torvalds } 7401da177e4SLinus Torvalds 7411da177e4SLinus Torvalds spin_unlock(lock); 7421da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list); 7431da177e4SLinus Torvalds if (err) 7441da177e4SLinus Torvalds return err; 7451da177e4SLinus Torvalds else 7461da177e4SLinus Torvalds return err2; 7471da177e4SLinus Torvalds } 7481da177e4SLinus Torvalds 7491da177e4SLinus Torvalds /* 7501da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are 7511da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already 7521da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list. 7531da177e4SLinus Torvalds * 7541da177e4SLinus Torvalds * NOTE: we take the inode's blockdev's mapping's private_lock. Which 7551da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true 7561da177e4SLinus Torvalds * for reiserfs. 7571da177e4SLinus Torvalds */ 7581da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode) 7591da177e4SLinus Torvalds { 7601da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 7611da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 7621da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 763252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 7641da177e4SLinus Torvalds 7651da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 7661da177e4SLinus Torvalds while (!list_empty(list)) 7671da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next)); 7681da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 7691da177e4SLinus Torvalds } 7701da177e4SLinus Torvalds } 77152b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers); 7721da177e4SLinus Torvalds 7731da177e4SLinus Torvalds /* 7741da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called 7751da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it. 7761da177e4SLinus Torvalds * 7771da177e4SLinus Torvalds * Returns true if all buffers were removed. 7781da177e4SLinus Torvalds */ 7791da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode) 7801da177e4SLinus Torvalds { 7811da177e4SLinus Torvalds int ret = 1; 7821da177e4SLinus Torvalds 7831da177e4SLinus Torvalds if (inode_has_buffers(inode)) { 7841da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data; 7851da177e4SLinus Torvalds struct list_head *list = &mapping->private_list; 786252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data; 7871da177e4SLinus Torvalds 7881da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 7891da177e4SLinus Torvalds while (!list_empty(list)) { 7901da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next); 7911da177e4SLinus Torvalds if (buffer_dirty(bh)) { 7921da177e4SLinus Torvalds ret = 0; 7931da177e4SLinus Torvalds break; 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds __remove_assoc_queue(bh); 7961da177e4SLinus Torvalds } 7971da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 7981da177e4SLinus Torvalds } 7991da177e4SLinus Torvalds return ret; 8001da177e4SLinus Torvalds } 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds /* 8031da177e4SLinus Torvalds * Create the appropriate buffers when given a page for data area and 8041da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to 8051da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more 8061da177e4SLinus Torvalds * buffers. 8071da177e4SLinus Torvalds * 8081da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping) 8091da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations. 8101da177e4SLinus Torvalds */ 8111da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 812640ab98fSJens Axboe bool retry) 8131da177e4SLinus Torvalds { 8141da177e4SLinus Torvalds struct buffer_head *bh, *head; 815f745c6f5SShakeel Butt gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 8161da177e4SLinus Torvalds long offset; 817b87d8cefSRoman Gushchin struct mem_cgroup *memcg, *old_memcg; 8181da177e4SLinus Torvalds 819640ab98fSJens Axboe if (retry) 820640ab98fSJens Axboe gfp |= __GFP_NOFAIL; 821640ab98fSJens Axboe 8226eeb104eSJohannes Weiner /* The page lock pins the memcg */ 8236eeb104eSJohannes Weiner memcg = page_memcg(page); 824b87d8cefSRoman Gushchin old_memcg = set_active_memcg(memcg); 825f745c6f5SShakeel Butt 8261da177e4SLinus Torvalds head = NULL; 8271da177e4SLinus Torvalds offset = PAGE_SIZE; 8281da177e4SLinus Torvalds while ((offset -= size) >= 0) { 829640ab98fSJens Axboe bh = alloc_buffer_head(gfp); 8301da177e4SLinus Torvalds if (!bh) 8311da177e4SLinus Torvalds goto no_grow; 8321da177e4SLinus Torvalds 8331da177e4SLinus Torvalds bh->b_this_page = head; 8341da177e4SLinus Torvalds bh->b_blocknr = -1; 8351da177e4SLinus Torvalds head = bh; 8361da177e4SLinus Torvalds 8371da177e4SLinus Torvalds bh->b_size = size; 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds /* Link the buffer to its page */ 8401da177e4SLinus Torvalds set_bh_page(bh, page, offset); 8411da177e4SLinus Torvalds } 842f745c6f5SShakeel Butt out: 843b87d8cefSRoman Gushchin set_active_memcg(old_memcg); 8441da177e4SLinus Torvalds return head; 8451da177e4SLinus Torvalds /* 8461da177e4SLinus Torvalds * In case anything failed, we just free everything we got. 8471da177e4SLinus Torvalds */ 8481da177e4SLinus Torvalds no_grow: 8491da177e4SLinus Torvalds if (head) { 8501da177e4SLinus Torvalds do { 8511da177e4SLinus Torvalds bh = head; 8521da177e4SLinus Torvalds head = head->b_this_page; 8531da177e4SLinus Torvalds free_buffer_head(bh); 8541da177e4SLinus Torvalds } while (head); 8551da177e4SLinus Torvalds } 8561da177e4SLinus Torvalds 857f745c6f5SShakeel Butt goto out; 8581da177e4SLinus Torvalds } 8591da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers); 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds static inline void 8621da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head) 8631da177e4SLinus Torvalds { 8641da177e4SLinus Torvalds struct buffer_head *bh, *tail; 8651da177e4SLinus Torvalds 8661da177e4SLinus Torvalds bh = head; 8671da177e4SLinus Torvalds do { 8681da177e4SLinus Torvalds tail = bh; 8691da177e4SLinus Torvalds bh = bh->b_this_page; 8701da177e4SLinus Torvalds } while (bh); 8711da177e4SLinus Torvalds tail->b_this_page = head; 87245dcfc27SGuoqing Jiang attach_page_private(page, head); 8731da177e4SLinus Torvalds } 8741da177e4SLinus Torvalds 875bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 876bbec0270SLinus Torvalds { 877bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0); 878b86058f9SChristoph Hellwig loff_t sz = bdev_nr_bytes(bdev); 879bbec0270SLinus Torvalds 880bbec0270SLinus Torvalds if (sz) { 881bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size); 882bbec0270SLinus Torvalds retval = (sz >> sizebits); 883bbec0270SLinus Torvalds } 884bbec0270SLinus Torvalds return retval; 885bbec0270SLinus Torvalds } 886bbec0270SLinus Torvalds 8871da177e4SLinus Torvalds /* 8881da177e4SLinus Torvalds * Initialise the state of a blockdev page's buffers. 8891da177e4SLinus Torvalds */ 890676ce6d5SHugh Dickins static sector_t 8911da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev, 8921da177e4SLinus Torvalds sector_t block, int size) 8931da177e4SLinus Torvalds { 8941da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 8951da177e4SLinus Torvalds struct buffer_head *bh = head; 8961da177e4SLinus Torvalds int uptodate = PageUptodate(page); 897bcd1d063SChristoph Hellwig sector_t end_block = blkdev_max_block(bdev, size); 8981da177e4SLinus Torvalds 8991da177e4SLinus Torvalds do { 9001da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 90101950a34SEric Biggers bh->b_end_io = NULL; 90201950a34SEric Biggers bh->b_private = NULL; 9031da177e4SLinus Torvalds bh->b_bdev = bdev; 9041da177e4SLinus Torvalds bh->b_blocknr = block; 9051da177e4SLinus Torvalds if (uptodate) 9061da177e4SLinus Torvalds set_buffer_uptodate(bh); 907080399aaSJeff Moyer if (block < end_block) 9081da177e4SLinus Torvalds set_buffer_mapped(bh); 9091da177e4SLinus Torvalds } 9101da177e4SLinus Torvalds block++; 9111da177e4SLinus Torvalds bh = bh->b_this_page; 9121da177e4SLinus Torvalds } while (bh != head); 913676ce6d5SHugh Dickins 914676ce6d5SHugh Dickins /* 915676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device. 916676ce6d5SHugh Dickins */ 917676ce6d5SHugh Dickins return end_block; 9181da177e4SLinus Torvalds } 9191da177e4SLinus Torvalds 9201da177e4SLinus Torvalds /* 9211da177e4SLinus Torvalds * Create the page-cache page that contains the requested block. 9221da177e4SLinus Torvalds * 923676ce6d5SHugh Dickins * This is used purely for blockdev mappings. 9241da177e4SLinus Torvalds */ 925676ce6d5SHugh Dickins static int 9261da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block, 9273b5e6454SGioh Kim pgoff_t index, int size, int sizebits, gfp_t gfp) 9281da177e4SLinus Torvalds { 9291da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode; 9301da177e4SLinus Torvalds struct page *page; 9311da177e4SLinus Torvalds struct buffer_head *bh; 932676ce6d5SHugh Dickins sector_t end_block; 933c4b4c2a7SZhiqiang Liu int ret = 0; 93484235de3SJohannes Weiner gfp_t gfp_mask; 9351da177e4SLinus Torvalds 936c62d2555SMichal Hocko gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; 9373b5e6454SGioh Kim 93884235de3SJohannes Weiner /* 93984235de3SJohannes Weiner * XXX: __getblk_slow() can not really deal with failure and 94084235de3SJohannes Weiner * will endlessly loop on improvised global reclaim. Prefer 94184235de3SJohannes Weiner * looping in the allocator rather than here, at least that 94284235de3SJohannes Weiner * code knows what it's doing. 94384235de3SJohannes Weiner */ 94484235de3SJohannes Weiner gfp_mask |= __GFP_NOFAIL; 94584235de3SJohannes Weiner 94684235de3SJohannes Weiner page = find_or_create_page(inode->i_mapping, index, gfp_mask); 9471da177e4SLinus Torvalds 948e827f923SEric Sesterhenn BUG_ON(!PageLocked(page)); 9491da177e4SLinus Torvalds 9501da177e4SLinus Torvalds if (page_has_buffers(page)) { 9511da177e4SLinus Torvalds bh = page_buffers(page); 9521da177e4SLinus Torvalds if (bh->b_size == size) { 953676ce6d5SHugh Dickins end_block = init_page_buffers(page, bdev, 954f2d5a944SAnton Altaparmakov (sector_t)index << sizebits, 955f2d5a944SAnton Altaparmakov size); 956676ce6d5SHugh Dickins goto done; 9571da177e4SLinus Torvalds } 9581da177e4SLinus Torvalds if (!try_to_free_buffers(page)) 9591da177e4SLinus Torvalds goto failed; 9601da177e4SLinus Torvalds } 9611da177e4SLinus Torvalds 9621da177e4SLinus Torvalds /* 9631da177e4SLinus Torvalds * Allocate some buffers for this page 9641da177e4SLinus Torvalds */ 96594dc24c0SJens Axboe bh = alloc_page_buffers(page, size, true); 9661da177e4SLinus Torvalds 9671da177e4SLinus Torvalds /* 9681da177e4SLinus Torvalds * Link the page to the buffers and initialise them. Take the 9691da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not 9701da177e4SLinus Torvalds * run under the page lock. 9711da177e4SLinus Torvalds */ 9721da177e4SLinus Torvalds spin_lock(&inode->i_mapping->private_lock); 9731da177e4SLinus Torvalds link_dev_buffers(page, bh); 974f2d5a944SAnton Altaparmakov end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, 975f2d5a944SAnton Altaparmakov size); 9761da177e4SLinus Torvalds spin_unlock(&inode->i_mapping->private_lock); 977676ce6d5SHugh Dickins done: 978676ce6d5SHugh Dickins ret = (block < end_block) ? 1 : -ENXIO; 9791da177e4SLinus Torvalds failed: 9801da177e4SLinus Torvalds unlock_page(page); 98109cbfeafSKirill A. Shutemov put_page(page); 982676ce6d5SHugh Dickins return ret; 9831da177e4SLinus Torvalds } 9841da177e4SLinus Torvalds 9851da177e4SLinus Torvalds /* 9861da177e4SLinus Torvalds * Create buffers for the specified block device block's page. If 9871da177e4SLinus Torvalds * that page was dirty, the buffers are set dirty also. 9881da177e4SLinus Torvalds */ 989858119e1SArjan van de Ven static int 9903b5e6454SGioh Kim grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) 9911da177e4SLinus Torvalds { 9921da177e4SLinus Torvalds pgoff_t index; 9931da177e4SLinus Torvalds int sizebits; 9941da177e4SLinus Torvalds 99590432e60SMikulas Patocka sizebits = PAGE_SHIFT - __ffs(size); 9961da177e4SLinus Torvalds index = block >> sizebits; 9971da177e4SLinus Torvalds 998e5657933SAndrew Morton /* 999e5657933SAndrew Morton * Check for a block which wants to lie outside our maximum possible 1000e5657933SAndrew Morton * pagecache index. (this comparison is done using sector_t types). 1001e5657933SAndrew Morton */ 1002e5657933SAndrew Morton if (unlikely(index != block >> sizebits)) { 1003e5657933SAndrew Morton printk(KERN_ERR "%s: requested out-of-range block %llu for " 1004a1c6f057SDmitry Monakhov "device %pg\n", 10058e24eea7SHarvey Harrison __func__, (unsigned long long)block, 1006a1c6f057SDmitry Monakhov bdev); 1007e5657933SAndrew Morton return -EIO; 1008e5657933SAndrew Morton } 1009676ce6d5SHugh Dickins 10101da177e4SLinus Torvalds /* Create a page with the proper size buffers.. */ 10113b5e6454SGioh Kim return grow_dev_page(bdev, block, index, size, sizebits, gfp); 10121da177e4SLinus Torvalds } 10131da177e4SLinus Torvalds 10140026ba40SEric Biggers static struct buffer_head * 10153b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block, 10163b5e6454SGioh Kim unsigned size, gfp_t gfp) 10171da177e4SLinus Torvalds { 10181da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */ 1019e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 10201da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) { 10211da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n", 10221da177e4SLinus Torvalds size); 1023e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n", 1024e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev)); 10251da177e4SLinus Torvalds 10261da177e4SLinus Torvalds dump_stack(); 10271da177e4SLinus Torvalds return NULL; 10281da177e4SLinus Torvalds } 10291da177e4SLinus Torvalds 1030676ce6d5SHugh Dickins for (;;) { 1031676ce6d5SHugh Dickins struct buffer_head *bh; 1032676ce6d5SHugh Dickins int ret; 1033676ce6d5SHugh Dickins 10341da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size); 10351da177e4SLinus Torvalds if (bh) 10361da177e4SLinus Torvalds return bh; 10371da177e4SLinus Torvalds 10383b5e6454SGioh Kim ret = grow_buffers(bdev, block, size, gfp); 1039676ce6d5SHugh Dickins if (ret < 0) 104091f68c89SJeff Moyer return NULL; 1041676ce6d5SHugh Dickins } 10421da177e4SLinus Torvalds } 10431da177e4SLinus Torvalds 10441da177e4SLinus Torvalds /* 10451da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages: 10461da177e4SLinus Torvalds * 10471da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1048ec82e1c1SMatthew Wilcox * the page is tagged dirty in the page cache. 10491da177e4SLinus Torvalds * 10501da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of 10511da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is 10521da177e4SLinus Torvalds * merely a hint about the true dirty state. 10531da177e4SLinus Torvalds * 10541da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty 10551da177e4SLinus Torvalds * (if the page has buffers). 10561da177e4SLinus Torvalds * 10571da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other 10581da177e4SLinus Torvalds * buffers are not. 10591da177e4SLinus Torvalds * 10601da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they 10611da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not 10621da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent 10631da177e4SLinus Torvalds * block_read_full_page() against that page will discover all the uptodate 10641da177e4SLinus Torvalds * buffers, will set the page uptodate and will perform no I/O. 10651da177e4SLinus Torvalds */ 10661da177e4SLinus Torvalds 10671da177e4SLinus Torvalds /** 10681da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout 106967be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty 10701da177e4SLinus Torvalds * 1071ec82e1c1SMatthew Wilcox * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1072ec82e1c1SMatthew Wilcox * its backing page dirty, then tag the page as dirty in the page cache 1073ec82e1c1SMatthew Wilcox * and then attach the address_space's inode to its superblock's dirty 10741da177e4SLinus Torvalds * inode list. 10751da177e4SLinus Torvalds * 10761da177e4SLinus Torvalds * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, 1077b93b0163SMatthew Wilcox * i_pages lock and mapping->host->i_lock. 10781da177e4SLinus Torvalds */ 1079fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh) 10801da177e4SLinus Torvalds { 1081787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh)); 10821be62dc1SLinus Torvalds 10835305cb83STejun Heo trace_block_dirty_buffer(bh); 10845305cb83STejun Heo 10851be62dc1SLinus Torvalds /* 10861be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case. 10871be62dc1SLinus Torvalds * 10881be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we 10891be62dc1SLinus Torvalds * perhaps modified the buffer. 10901be62dc1SLinus Torvalds */ 10911be62dc1SLinus Torvalds if (buffer_dirty(bh)) { 10921be62dc1SLinus Torvalds smp_mb(); 10931be62dc1SLinus Torvalds if (buffer_dirty(bh)) 10941be62dc1SLinus Torvalds return; 10951be62dc1SLinus Torvalds } 10961be62dc1SLinus Torvalds 1097a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) { 1098a8e7d49aSLinus Torvalds struct page *page = bh->b_page; 1099c4843a75SGreg Thelen struct address_space *mapping = NULL; 1100c4843a75SGreg Thelen 110162cccb8cSJohannes Weiner lock_page_memcg(page); 11028e9d78edSLinus Torvalds if (!TestSetPageDirty(page)) { 1103c4843a75SGreg Thelen mapping = page_mapping(page); 11048e9d78edSLinus Torvalds if (mapping) 110562cccb8cSJohannes Weiner __set_page_dirty(page, mapping, 0); 11068e9d78edSLinus Torvalds } 110762cccb8cSJohannes Weiner unlock_page_memcg(page); 1108c4843a75SGreg Thelen if (mapping) 1109c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1110a8e7d49aSLinus Torvalds } 11111da177e4SLinus Torvalds } 11121fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty); 11131da177e4SLinus Torvalds 111487354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh) 111587354e5dSJeff Layton { 1116485e9605SJeff Layton struct super_block *sb; 1117485e9605SJeff Layton 111887354e5dSJeff Layton set_buffer_write_io_error(bh); 111987354e5dSJeff Layton /* FIXME: do we need to set this in both places? */ 112087354e5dSJeff Layton if (bh->b_page && bh->b_page->mapping) 112187354e5dSJeff Layton mapping_set_error(bh->b_page->mapping, -EIO); 112287354e5dSJeff Layton if (bh->b_assoc_map) 112387354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO); 1124485e9605SJeff Layton rcu_read_lock(); 1125485e9605SJeff Layton sb = READ_ONCE(bh->b_bdev->bd_super); 1126485e9605SJeff Layton if (sb) 1127485e9605SJeff Layton errseq_set(&sb->s_wb_err, -EIO); 1128485e9605SJeff Layton rcu_read_unlock(); 112987354e5dSJeff Layton } 113087354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error); 113187354e5dSJeff Layton 11321da177e4SLinus Torvalds /* 11331da177e4SLinus Torvalds * Decrement a buffer_head's reference count. If all buffers against a page 11341da177e4SLinus Torvalds * have zero reference count, are clean and unlocked, and if the page is clean 11351da177e4SLinus Torvalds * and unlocked then try_to_free_buffers() may strip the buffers from the page 11361da177e4SLinus Torvalds * in preparation for freeing it (sometimes, rarely, buffers are removed from 11371da177e4SLinus Torvalds * a page but it ends up not being freed, and buffers may later be reattached). 11381da177e4SLinus Torvalds */ 11391da177e4SLinus Torvalds void __brelse(struct buffer_head * buf) 11401da177e4SLinus Torvalds { 11411da177e4SLinus Torvalds if (atomic_read(&buf->b_count)) { 11421da177e4SLinus Torvalds put_bh(buf); 11431da177e4SLinus Torvalds return; 11441da177e4SLinus Torvalds } 11455c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 11461da177e4SLinus Torvalds } 11471fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse); 11481da177e4SLinus Torvalds 11491da177e4SLinus Torvalds /* 11501da177e4SLinus Torvalds * bforget() is like brelse(), except it discards any 11511da177e4SLinus Torvalds * potentially dirty data. 11521da177e4SLinus Torvalds */ 11531da177e4SLinus Torvalds void __bforget(struct buffer_head *bh) 11541da177e4SLinus Torvalds { 11551da177e4SLinus Torvalds clear_buffer_dirty(bh); 1156535ee2fbSJan Kara if (bh->b_assoc_map) { 11571da177e4SLinus Torvalds struct address_space *buffer_mapping = bh->b_page->mapping; 11581da177e4SLinus Torvalds 11591da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock); 11601da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers); 116158ff407bSJan Kara bh->b_assoc_map = NULL; 11621da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock); 11631da177e4SLinus Torvalds } 11641da177e4SLinus Torvalds __brelse(bh); 11651da177e4SLinus Torvalds } 11661fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget); 11671da177e4SLinus Torvalds 11681da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh) 11691da177e4SLinus Torvalds { 11701da177e4SLinus Torvalds lock_buffer(bh); 11711da177e4SLinus Torvalds if (buffer_uptodate(bh)) { 11721da177e4SLinus Torvalds unlock_buffer(bh); 11731da177e4SLinus Torvalds return bh; 11741da177e4SLinus Torvalds } else { 11751da177e4SLinus Torvalds get_bh(bh); 11761da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync; 11772a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 11781da177e4SLinus Torvalds wait_on_buffer(bh); 11791da177e4SLinus Torvalds if (buffer_uptodate(bh)) 11801da177e4SLinus Torvalds return bh; 11811da177e4SLinus Torvalds } 11821da177e4SLinus Torvalds brelse(bh); 11831da177e4SLinus Torvalds return NULL; 11841da177e4SLinus Torvalds } 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds /* 11871da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 11881da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 11891da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear 11901da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple 11911da177e4SLinus Torvalds * CPU's LRUs at the same time. 11921da177e4SLinus Torvalds * 11931da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and 11941da177e4SLinus Torvalds * sb_find_get_block(). 11951da177e4SLinus Torvalds * 11961da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use 11971da177e4SLinus Torvalds * a local interrupt disable for that. 11981da177e4SLinus Torvalds */ 11991da177e4SLinus Torvalds 120086cf78d7SSebastien Buisson #define BH_LRU_SIZE 16 12011da177e4SLinus Torvalds 12021da177e4SLinus Torvalds struct bh_lru { 12031da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE]; 12041da177e4SLinus Torvalds }; 12051da177e4SLinus Torvalds 12061da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 12071da177e4SLinus Torvalds 12081da177e4SLinus Torvalds #ifdef CONFIG_SMP 12091da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable() 12101da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable() 12111da177e4SLinus Torvalds #else 12121da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable() 12131da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable() 12141da177e4SLinus Torvalds #endif 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds static inline void check_irqs_on(void) 12171da177e4SLinus Torvalds { 12181da177e4SLinus Torvalds #ifdef irqs_disabled 12191da177e4SLinus Torvalds BUG_ON(irqs_disabled()); 12201da177e4SLinus Torvalds #endif 12211da177e4SLinus Torvalds } 12221da177e4SLinus Torvalds 12231da177e4SLinus Torvalds /* 1224241f01fbSEric Biggers * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1225241f01fbSEric Biggers * inserted at the front, and the buffer_head at the back if any is evicted. 1226241f01fbSEric Biggers * Or, if already in the LRU it is moved to the front. 12271da177e4SLinus Torvalds */ 12281da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh) 12291da177e4SLinus Torvalds { 1230241f01fbSEric Biggers struct buffer_head *evictee = bh; 1231241f01fbSEric Biggers struct bh_lru *b; 1232241f01fbSEric Biggers int i; 12331da177e4SLinus Torvalds 12341da177e4SLinus Torvalds check_irqs_on(); 1235c0226eb8SMinchan Kim bh_lru_lock(); 1236c0226eb8SMinchan Kim 12378cc621d2SMinchan Kim /* 12388cc621d2SMinchan Kim * the refcount of buffer_head in bh_lru prevents dropping the 12398cc621d2SMinchan Kim * attached page(i.e., try_to_free_buffers) so it could cause 12408cc621d2SMinchan Kim * failing page migration. 12418cc621d2SMinchan Kim * Skip putting upcoming bh into bh_lru until migration is done. 12428cc621d2SMinchan Kim */ 1243c0226eb8SMinchan Kim if (lru_cache_disabled()) { 1244c0226eb8SMinchan Kim bh_lru_unlock(); 12458cc621d2SMinchan Kim return; 1246c0226eb8SMinchan Kim } 1247241f01fbSEric Biggers 1248241f01fbSEric Biggers b = this_cpu_ptr(&bh_lrus); 1249241f01fbSEric Biggers for (i = 0; i < BH_LRU_SIZE; i++) { 1250241f01fbSEric Biggers swap(evictee, b->bhs[i]); 1251241f01fbSEric Biggers if (evictee == bh) { 1252241f01fbSEric Biggers bh_lru_unlock(); 1253241f01fbSEric Biggers return; 1254241f01fbSEric Biggers } 1255241f01fbSEric Biggers } 12561da177e4SLinus Torvalds 12571da177e4SLinus Torvalds get_bh(bh); 12581da177e4SLinus Torvalds bh_lru_unlock(); 1259241f01fbSEric Biggers brelse(evictee); 12601da177e4SLinus Torvalds } 12611da177e4SLinus Torvalds 12621da177e4SLinus Torvalds /* 12631da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head. 12641da177e4SLinus Torvalds */ 1265858119e1SArjan van de Ven static struct buffer_head * 12663991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 12671da177e4SLinus Torvalds { 12681da177e4SLinus Torvalds struct buffer_head *ret = NULL; 12693991d3bdSTomasz Kvarsin unsigned int i; 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds check_irqs_on(); 12721da177e4SLinus Torvalds bh_lru_lock(); 12731da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 1274c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 12751da177e4SLinus Torvalds 12769470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 12779470dd5dSZach Brown bh->b_size == size) { 12781da177e4SLinus Torvalds if (i) { 12791da177e4SLinus Torvalds while (i) { 1280c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i], 1281c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1])); 12821da177e4SLinus Torvalds i--; 12831da177e4SLinus Torvalds } 1284c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh); 12851da177e4SLinus Torvalds } 12861da177e4SLinus Torvalds get_bh(bh); 12871da177e4SLinus Torvalds ret = bh; 12881da177e4SLinus Torvalds break; 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds } 12911da177e4SLinus Torvalds bh_lru_unlock(); 12921da177e4SLinus Torvalds return ret; 12931da177e4SLinus Torvalds } 12941da177e4SLinus Torvalds 12951da177e4SLinus Torvalds /* 12961da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh 12971da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return 12981da177e4SLinus Torvalds * NULL 12991da177e4SLinus Torvalds */ 13001da177e4SLinus Torvalds struct buffer_head * 13013991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 13021da177e4SLinus Torvalds { 13031da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 13041da177e4SLinus Torvalds 13051da177e4SLinus Torvalds if (bh == NULL) { 13062457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */ 1307385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block); 13081da177e4SLinus Torvalds if (bh) 13091da177e4SLinus Torvalds bh_lru_install(bh); 13102457aec6SMel Gorman } else 13111da177e4SLinus Torvalds touch_buffer(bh); 13122457aec6SMel Gorman 13131da177e4SLinus Torvalds return bh; 13141da177e4SLinus Torvalds } 13151da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block); 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds /* 13183b5e6454SGioh Kim * __getblk_gfp() will locate (and, if necessary, create) the buffer_head 13191da177e4SLinus Torvalds * which corresponds to the passed block_device, block and size. The 13201da177e4SLinus Torvalds * returned buffer has its reference count incremented. 13211da177e4SLinus Torvalds * 13223b5e6454SGioh Kim * __getblk_gfp() will lock up the machine if grow_dev_page's 13233b5e6454SGioh Kim * try_to_free_buffers() attempt is failing. FIXME, perhaps? 13241da177e4SLinus Torvalds */ 13251da177e4SLinus Torvalds struct buffer_head * 13263b5e6454SGioh Kim __getblk_gfp(struct block_device *bdev, sector_t block, 13273b5e6454SGioh Kim unsigned size, gfp_t gfp) 13281da177e4SLinus Torvalds { 13291da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, block, size); 13301da177e4SLinus Torvalds 13311da177e4SLinus Torvalds might_sleep(); 13321da177e4SLinus Torvalds if (bh == NULL) 13333b5e6454SGioh Kim bh = __getblk_slow(bdev, block, size, gfp); 13341da177e4SLinus Torvalds return bh; 13351da177e4SLinus Torvalds } 13363b5e6454SGioh Kim EXPORT_SYMBOL(__getblk_gfp); 13371da177e4SLinus Torvalds 13381da177e4SLinus Torvalds /* 13391da177e4SLinus Torvalds * Do async read-ahead on a buffer.. 13401da177e4SLinus Torvalds */ 13413991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 13421da177e4SLinus Torvalds { 13431da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size); 1344a3e713b5SAndrew Morton if (likely(bh)) { 134570246286SChristoph Hellwig ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); 13461da177e4SLinus Torvalds brelse(bh); 13471da177e4SLinus Torvalds } 1348a3e713b5SAndrew Morton } 13491da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead); 13501da177e4SLinus Torvalds 1351d87f6392SRoman Gushchin void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, 1352d87f6392SRoman Gushchin gfp_t gfp) 1353d87f6392SRoman Gushchin { 1354d87f6392SRoman Gushchin struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 1355d87f6392SRoman Gushchin if (likely(bh)) { 1356d87f6392SRoman Gushchin ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); 1357d87f6392SRoman Gushchin brelse(bh); 1358d87f6392SRoman Gushchin } 1359d87f6392SRoman Gushchin } 1360d87f6392SRoman Gushchin EXPORT_SYMBOL(__breadahead_gfp); 1361d87f6392SRoman Gushchin 13621da177e4SLinus Torvalds /** 13633b5e6454SGioh Kim * __bread_gfp() - reads a specified block and returns the bh 136467be2dd1SMartin Waitz * @bdev: the block_device to read from 13651da177e4SLinus Torvalds * @block: number of block 13661da177e4SLinus Torvalds * @size: size (in bytes) to read 13673b5e6454SGioh Kim * @gfp: page allocation flag 13681da177e4SLinus Torvalds * 13691da177e4SLinus Torvalds * Reads a specified block, and returns buffer head that contains it. 13703b5e6454SGioh Kim * The page cache can be allocated from non-movable area 13713b5e6454SGioh Kim * not to prevent page migration if you set gfp to zero. 13721da177e4SLinus Torvalds * It returns NULL if the block was unreadable. 13731da177e4SLinus Torvalds */ 13741da177e4SLinus Torvalds struct buffer_head * 13753b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block, 13763b5e6454SGioh Kim unsigned size, gfp_t gfp) 13771da177e4SLinus Torvalds { 13783b5e6454SGioh Kim struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 13791da177e4SLinus Torvalds 1380a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh)) 13811da177e4SLinus Torvalds bh = __bread_slow(bh); 13821da177e4SLinus Torvalds return bh; 13831da177e4SLinus Torvalds } 13843b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp); 13851da177e4SLinus Torvalds 13868cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b) 13878cc621d2SMinchan Kim { 13888cc621d2SMinchan Kim int i; 13898cc621d2SMinchan Kim 13908cc621d2SMinchan Kim for (i = 0; i < BH_LRU_SIZE; i++) { 13918cc621d2SMinchan Kim brelse(b->bhs[i]); 13928cc621d2SMinchan Kim b->bhs[i] = NULL; 13938cc621d2SMinchan Kim } 13948cc621d2SMinchan Kim } 13951da177e4SLinus Torvalds /* 13961da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount. 13971da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq 13981da177e4SLinus Torvalds * or with preempt disabled. 13991da177e4SLinus Torvalds */ 14001da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg) 14011da177e4SLinus Torvalds { 14021da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus); 14031da177e4SLinus Torvalds 14048cc621d2SMinchan Kim __invalidate_bh_lrus(b); 14051da177e4SLinus Torvalds put_cpu_var(bh_lrus); 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds 14088cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy) 140942be35d0SGilad Ben-Yossef { 141042be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 141142be35d0SGilad Ben-Yossef int i; 141242be35d0SGilad Ben-Yossef 141342be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) { 141442be35d0SGilad Ben-Yossef if (b->bhs[i]) 14151d706679SSaurav Girepunje return true; 141642be35d0SGilad Ben-Yossef } 141742be35d0SGilad Ben-Yossef 14181d706679SSaurav Girepunje return false; 141942be35d0SGilad Ben-Yossef } 142042be35d0SGilad Ben-Yossef 1421f9a14399SPeter Zijlstra void invalidate_bh_lrus(void) 14221da177e4SLinus Torvalds { 1423cb923159SSebastian Andrzej Siewior on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 14241da177e4SLinus Torvalds } 14259db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 14261da177e4SLinus Torvalds 1427243418e3SMinchan Kim /* 1428243418e3SMinchan Kim * It's called from workqueue context so we need a bh_lru_lock to close 1429243418e3SMinchan Kim * the race with preemption/irq. 1430243418e3SMinchan Kim */ 1431243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void) 14328cc621d2SMinchan Kim { 14338cc621d2SMinchan Kim struct bh_lru *b; 14348cc621d2SMinchan Kim 14358cc621d2SMinchan Kim bh_lru_lock(); 1436243418e3SMinchan Kim b = this_cpu_ptr(&bh_lrus); 14378cc621d2SMinchan Kim __invalidate_bh_lrus(b); 14388cc621d2SMinchan Kim bh_lru_unlock(); 14398cc621d2SMinchan Kim } 14408cc621d2SMinchan Kim 14411da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh, 14421da177e4SLinus Torvalds struct page *page, unsigned long offset) 14431da177e4SLinus Torvalds { 14441da177e4SLinus Torvalds bh->b_page = page; 1445e827f923SEric Sesterhenn BUG_ON(offset >= PAGE_SIZE); 14461da177e4SLinus Torvalds if (PageHighMem(page)) 14471da177e4SLinus Torvalds /* 14481da177e4SLinus Torvalds * This catches illegal uses and preserves the offset: 14491da177e4SLinus Torvalds */ 14501da177e4SLinus Torvalds bh->b_data = (char *)(0 + offset); 14511da177e4SLinus Torvalds else 14521da177e4SLinus Torvalds bh->b_data = page_address(page) + offset; 14531da177e4SLinus Torvalds } 14541da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page); 14551da177e4SLinus Torvalds 14561da177e4SLinus Torvalds /* 14571da177e4SLinus Torvalds * Called when truncating a buffer on a page completely. 14581da177e4SLinus Torvalds */ 1459e7470ee8SMel Gorman 1460e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */ 1461e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \ 1462e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1463e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten) 1464e7470ee8SMel Gorman 1465858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh) 14661da177e4SLinus Torvalds { 1467e7470ee8SMel Gorman unsigned long b_state, b_state_old; 1468e7470ee8SMel Gorman 14691da177e4SLinus Torvalds lock_buffer(bh); 14701da177e4SLinus Torvalds clear_buffer_dirty(bh); 14711da177e4SLinus Torvalds bh->b_bdev = NULL; 1472e7470ee8SMel Gorman b_state = bh->b_state; 1473e7470ee8SMel Gorman for (;;) { 1474e7470ee8SMel Gorman b_state_old = cmpxchg(&bh->b_state, b_state, 1475e7470ee8SMel Gorman (b_state & ~BUFFER_FLAGS_DISCARD)); 1476e7470ee8SMel Gorman if (b_state_old == b_state) 1477e7470ee8SMel Gorman break; 1478e7470ee8SMel Gorman b_state = b_state_old; 1479e7470ee8SMel Gorman } 14801da177e4SLinus Torvalds unlock_buffer(bh); 14811da177e4SLinus Torvalds } 14821da177e4SLinus Torvalds 14831da177e4SLinus Torvalds /** 14847ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 14857ba13abbSMatthew Wilcox (Oracle) * @folio: The folio which is affected. 1486d47992f8SLukas Czerner * @offset: start of the range to invalidate 1487d47992f8SLukas Czerner * @length: length of the range to invalidate 14881da177e4SLinus Torvalds * 14897ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() is called when all or part of the folio has been 14901da177e4SLinus Torvalds * invalidated by a truncate operation. 14911da177e4SLinus Torvalds * 14927ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() does not have to release all buffers, but it must 14931da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O 14941da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation 14951da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those 14961da177e4SLinus Torvalds * blocks on-disk. 14971da177e4SLinus Torvalds */ 14987ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 14991da177e4SLinus Torvalds { 15001da177e4SLinus Torvalds struct buffer_head *head, *bh, *next; 15017ba13abbSMatthew Wilcox (Oracle) size_t curr_off = 0; 15027ba13abbSMatthew Wilcox (Oracle) size_t stop = length + offset; 15031da177e4SLinus Torvalds 15047ba13abbSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 15051da177e4SLinus Torvalds 1506d47992f8SLukas Czerner /* 1507d47992f8SLukas Czerner * Check for overflow 1508d47992f8SLukas Czerner */ 15097ba13abbSMatthew Wilcox (Oracle) BUG_ON(stop > folio_size(folio) || stop < length); 1510d47992f8SLukas Czerner 15117ba13abbSMatthew Wilcox (Oracle) head = folio_buffers(folio); 15127ba13abbSMatthew Wilcox (Oracle) if (!head) 15137ba13abbSMatthew Wilcox (Oracle) return; 15147ba13abbSMatthew Wilcox (Oracle) 15151da177e4SLinus Torvalds bh = head; 15161da177e4SLinus Torvalds do { 15177ba13abbSMatthew Wilcox (Oracle) size_t next_off = curr_off + bh->b_size; 15181da177e4SLinus Torvalds next = bh->b_this_page; 15191da177e4SLinus Torvalds 15201da177e4SLinus Torvalds /* 1521d47992f8SLukas Czerner * Are we still fully in range ? 1522d47992f8SLukas Czerner */ 1523d47992f8SLukas Czerner if (next_off > stop) 1524d47992f8SLukas Czerner goto out; 1525d47992f8SLukas Czerner 1526d47992f8SLukas Czerner /* 15271da177e4SLinus Torvalds * is this block fully invalidated? 15281da177e4SLinus Torvalds */ 15291da177e4SLinus Torvalds if (offset <= curr_off) 15301da177e4SLinus Torvalds discard_buffer(bh); 15311da177e4SLinus Torvalds curr_off = next_off; 15321da177e4SLinus Torvalds bh = next; 15331da177e4SLinus Torvalds } while (bh != head); 15341da177e4SLinus Torvalds 15351da177e4SLinus Torvalds /* 15367ba13abbSMatthew Wilcox (Oracle) * We release buffers only if the entire folio is being invalidated. 15371da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated, 15381da177e4SLinus Torvalds * so real IO is not possible anymore. 15391da177e4SLinus Torvalds */ 15407ba13abbSMatthew Wilcox (Oracle) if (length == folio_size(folio)) 15417ba13abbSMatthew Wilcox (Oracle) filemap_release_folio(folio, 0); 15421da177e4SLinus Torvalds out: 15432ff28e22SNeilBrown return; 15441da177e4SLinus Torvalds } 15457ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio); 15461da177e4SLinus Torvalds 1547d47992f8SLukas Czerner 15481da177e4SLinus Torvalds /* 15491da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt 1550e621900aSMatthew Wilcox (Oracle) * block_dirty_folio() via private_lock. try_to_free_buffers 15511da177e4SLinus Torvalds * is already excluded via the page lock. 15521da177e4SLinus Torvalds */ 15531da177e4SLinus Torvalds void create_empty_buffers(struct page *page, 15541da177e4SLinus Torvalds unsigned long blocksize, unsigned long b_state) 15551da177e4SLinus Torvalds { 15561da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail; 15571da177e4SLinus Torvalds 1558640ab98fSJens Axboe head = alloc_page_buffers(page, blocksize, true); 15591da177e4SLinus Torvalds bh = head; 15601da177e4SLinus Torvalds do { 15611da177e4SLinus Torvalds bh->b_state |= b_state; 15621da177e4SLinus Torvalds tail = bh; 15631da177e4SLinus Torvalds bh = bh->b_this_page; 15641da177e4SLinus Torvalds } while (bh); 15651da177e4SLinus Torvalds tail->b_this_page = head; 15661da177e4SLinus Torvalds 15671da177e4SLinus Torvalds spin_lock(&page->mapping->private_lock); 15681da177e4SLinus Torvalds if (PageUptodate(page) || PageDirty(page)) { 15691da177e4SLinus Torvalds bh = head; 15701da177e4SLinus Torvalds do { 15711da177e4SLinus Torvalds if (PageDirty(page)) 15721da177e4SLinus Torvalds set_buffer_dirty(bh); 15731da177e4SLinus Torvalds if (PageUptodate(page)) 15741da177e4SLinus Torvalds set_buffer_uptodate(bh); 15751da177e4SLinus Torvalds bh = bh->b_this_page; 15761da177e4SLinus Torvalds } while (bh != head); 15771da177e4SLinus Torvalds } 157845dcfc27SGuoqing Jiang attach_page_private(page, head); 15791da177e4SLinus Torvalds spin_unlock(&page->mapping->private_lock); 15801da177e4SLinus Torvalds } 15811da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers); 15821da177e4SLinus Torvalds 158329f3ad7dSJan Kara /** 158429f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device 158529f3ad7dSJan Kara * @bdev: Block device to clean buffers in 158629f3ad7dSJan Kara * @block: Start of a range of blocks to clean 158729f3ad7dSJan Kara * @len: Number of blocks to clean 15881da177e4SLinus Torvalds * 158929f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any 159029f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the 159129f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that 159229f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark 159329f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer 159429f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was 159529f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it 159629f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards... 159729f3ad7dSJan Kara * 159829f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be 159929f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that 160029f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really 160129f3ad7dSJan Kara * need to. That happens here. 16021da177e4SLinus Torvalds */ 160329f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 16041da177e4SLinus Torvalds { 160529f3ad7dSJan Kara struct inode *bd_inode = bdev->bd_inode; 160629f3ad7dSJan Kara struct address_space *bd_mapping = bd_inode->i_mapping; 160729f3ad7dSJan Kara struct pagevec pvec; 160829f3ad7dSJan Kara pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 160929f3ad7dSJan Kara pgoff_t end; 1610c10f778dSJan Kara int i, count; 161129f3ad7dSJan Kara struct buffer_head *bh; 161229f3ad7dSJan Kara struct buffer_head *head; 16131da177e4SLinus Torvalds 161429f3ad7dSJan Kara end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 161586679820SMel Gorman pagevec_init(&pvec); 1616397162ffSJan Kara while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { 1617c10f778dSJan Kara count = pagevec_count(&pvec); 1618c10f778dSJan Kara for (i = 0; i < count; i++) { 161929f3ad7dSJan Kara struct page *page = pvec.pages[i]; 16201da177e4SLinus Torvalds 162129f3ad7dSJan Kara if (!page_has_buffers(page)) 162229f3ad7dSJan Kara continue; 162329f3ad7dSJan Kara /* 162429f3ad7dSJan Kara * We use page lock instead of bd_mapping->private_lock 162529f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and 162629f3ad7dSJan Kara * it scales better than a global spinlock lock. 162729f3ad7dSJan Kara */ 162829f3ad7dSJan Kara lock_page(page); 162929f3ad7dSJan Kara /* Recheck when the page is locked which pins bhs */ 163029f3ad7dSJan Kara if (!page_has_buffers(page)) 163129f3ad7dSJan Kara goto unlock_page; 163229f3ad7dSJan Kara head = page_buffers(page); 163329f3ad7dSJan Kara bh = head; 163429f3ad7dSJan Kara do { 16356c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 163629f3ad7dSJan Kara goto next; 163729f3ad7dSJan Kara if (bh->b_blocknr >= block + len) 163829f3ad7dSJan Kara break; 163929f3ad7dSJan Kara clear_buffer_dirty(bh); 164029f3ad7dSJan Kara wait_on_buffer(bh); 164129f3ad7dSJan Kara clear_buffer_req(bh); 164229f3ad7dSJan Kara next: 164329f3ad7dSJan Kara bh = bh->b_this_page; 164429f3ad7dSJan Kara } while (bh != head); 164529f3ad7dSJan Kara unlock_page: 164629f3ad7dSJan Kara unlock_page(page); 164729f3ad7dSJan Kara } 164829f3ad7dSJan Kara pagevec_release(&pvec); 164929f3ad7dSJan Kara cond_resched(); 1650c10f778dSJan Kara /* End of range already reached? */ 1651c10f778dSJan Kara if (index > end || !index) 1652c10f778dSJan Kara break; 16531da177e4SLinus Torvalds } 16541da177e4SLinus Torvalds } 165529f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases); 16561da177e4SLinus Torvalds 16571da177e4SLinus Torvalds /* 165845bce8f3SLinus Torvalds * Size is a power-of-two in the range 512..PAGE_SIZE, 165945bce8f3SLinus Torvalds * and the case we care about most is PAGE_SIZE. 166045bce8f3SLinus Torvalds * 166145bce8f3SLinus Torvalds * So this *could* possibly be written with those 166245bce8f3SLinus Torvalds * constraints in mind (relevant mostly if some 166345bce8f3SLinus Torvalds * architecture has a slow bit-scan instruction) 166445bce8f3SLinus Torvalds */ 166545bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize) 166645bce8f3SLinus Torvalds { 166745bce8f3SLinus Torvalds return ilog2(blocksize); 166845bce8f3SLinus Torvalds } 166945bce8f3SLinus Torvalds 167045bce8f3SLinus Torvalds static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state) 167145bce8f3SLinus Torvalds { 167245bce8f3SLinus Torvalds BUG_ON(!PageLocked(page)); 167345bce8f3SLinus Torvalds 167445bce8f3SLinus Torvalds if (!page_has_buffers(page)) 16756aa7de05SMark Rutland create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits), 16766aa7de05SMark Rutland b_state); 167745bce8f3SLinus Torvalds return page_buffers(page); 167845bce8f3SLinus Torvalds } 167945bce8f3SLinus Torvalds 168045bce8f3SLinus Torvalds /* 16811da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid: 16821da177e4SLinus Torvalds * 16831da177e4SLinus Torvalds * Mapped Uptodate Meaning 16841da177e4SLinus Torvalds * 16851da177e4SLinus Torvalds * No No "unknown" - must do get_block() 16861da177e4SLinus Torvalds * No Yes "hole" - zero-filled 16871da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in 16881da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory. 16891da177e4SLinus Torvalds * 16901da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate). 16911da177e4SLinus Torvalds */ 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds /* 16941da177e4SLinus Torvalds * While block_write_full_page is writing back the dirty buffers under 16951da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them 16961da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer 16971da177e4SLinus Torvalds * state inside lock_buffer(). 16981da177e4SLinus Torvalds * 16991da177e4SLinus Torvalds * If block_write_full_page() is called for regular writeback 17001da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 17011da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer 17021da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback 17031da177e4SLinus Torvalds * prevents this contention from occurring. 17046e34eeddSTheodore Ts'o * 17056e34eeddSTheodore Ts'o * If block_write_full_page() is called with wbc->sync_mode == 170670fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1707721a9602SJens Axboe * causes the writes to be flagged as synchronous writes. 17081da177e4SLinus Torvalds */ 1709b4bba389SBenjamin Marzinski int __block_write_full_page(struct inode *inode, struct page *page, 171035c80d5fSChris Mason get_block_t *get_block, struct writeback_control *wbc, 171135c80d5fSChris Mason bh_end_io_t *handler) 17121da177e4SLinus Torvalds { 17131da177e4SLinus Torvalds int err; 17141da177e4SLinus Torvalds sector_t block; 17151da177e4SLinus Torvalds sector_t last_block; 1716f0fbd5fcSAndrew Morton struct buffer_head *bh, *head; 171745bce8f3SLinus Torvalds unsigned int blocksize, bbits; 17181da177e4SLinus Torvalds int nr_underway = 0; 17197637241eSJens Axboe int write_flags = wbc_to_write_flags(wbc); 17201da177e4SLinus Torvalds 172145bce8f3SLinus Torvalds head = create_page_buffers(page, inode, 17221da177e4SLinus Torvalds (1 << BH_Dirty)|(1 << BH_Uptodate)); 17231da177e4SLinus Torvalds 17241da177e4SLinus Torvalds /* 1725e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio 17261da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at 17271da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it 17281da177e4SLinus Torvalds * then we just miss that fact, and the page stays dirty. 17291da177e4SLinus Torvalds * 1730e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio; 17311da177e4SLinus Torvalds * handle that here by just cleaning them. 17321da177e4SLinus Torvalds */ 17331da177e4SLinus Torvalds 17341da177e4SLinus Torvalds bh = head; 173545bce8f3SLinus Torvalds blocksize = bh->b_size; 173645bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 173745bce8f3SLinus Torvalds 173809cbfeafSKirill A. Shutemov block = (sector_t)page->index << (PAGE_SHIFT - bbits); 173945bce8f3SLinus Torvalds last_block = (i_size_read(inode) - 1) >> bbits; 17401da177e4SLinus Torvalds 17411da177e4SLinus Torvalds /* 17421da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and 17431da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping. 17441da177e4SLinus Torvalds */ 17451da177e4SLinus Torvalds do { 17461da177e4SLinus Torvalds if (block > last_block) { 17471da177e4SLinus Torvalds /* 17481da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because 17491da177e4SLinus Torvalds * this page can be outside i_size when there is a 17501da177e4SLinus Torvalds * truncate in progress. 17511da177e4SLinus Torvalds */ 17521da177e4SLinus Torvalds /* 17531da177e4SLinus Torvalds * The buffer was zeroed by block_write_full_page() 17541da177e4SLinus Torvalds */ 17551da177e4SLinus Torvalds clear_buffer_dirty(bh); 17561da177e4SLinus Torvalds set_buffer_uptodate(bh); 175729a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 175829a814d2SAlex Tomas buffer_dirty(bh)) { 1759b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 17601da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 17611da177e4SLinus Torvalds if (err) 17621da177e4SLinus Torvalds goto recover; 176329a814d2SAlex Tomas clear_buffer_delay(bh); 17641da177e4SLinus Torvalds if (buffer_new(bh)) { 17651da177e4SLinus Torvalds /* blockdev mappings never come here */ 17661da177e4SLinus Torvalds clear_buffer_new(bh); 1767e64855c6SJan Kara clean_bdev_bh_alias(bh); 17681da177e4SLinus Torvalds } 17691da177e4SLinus Torvalds } 17701da177e4SLinus Torvalds bh = bh->b_this_page; 17711da177e4SLinus Torvalds block++; 17721da177e4SLinus Torvalds } while (bh != head); 17731da177e4SLinus Torvalds 17741da177e4SLinus Torvalds do { 17751da177e4SLinus Torvalds if (!buffer_mapped(bh)) 17761da177e4SLinus Torvalds continue; 17771da177e4SLinus Torvalds /* 17781da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot 17791da177e4SLinus Torvalds * lock the buffer then redirty the page. Note that this can 17805b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads 17815b0830cbSJens Axboe * and kswapd activity, but those code paths have their own 17825b0830cbSJens Axboe * higher-level throttling. 17831da177e4SLinus Torvalds */ 17841b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) { 17851da177e4SLinus Torvalds lock_buffer(bh); 1786ca5de404SNick Piggin } else if (!trylock_buffer(bh)) { 17871da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page); 17881da177e4SLinus Torvalds continue; 17891da177e4SLinus Torvalds } 17901da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 179135c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler); 17921da177e4SLinus Torvalds } else { 17931da177e4SLinus Torvalds unlock_buffer(bh); 17941da177e4SLinus Torvalds } 17951da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 17961da177e4SLinus Torvalds 17971da177e4SLinus Torvalds /* 17981da177e4SLinus Torvalds * The page and its buffers are protected by PageWriteback(), so we can 17991da177e4SLinus Torvalds * drop the bh refcounts early. 18001da177e4SLinus Torvalds */ 18011da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 18021da177e4SLinus Torvalds set_page_writeback(page); 18031da177e4SLinus Torvalds 18041da177e4SLinus Torvalds do { 18051da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 18061da177e4SLinus Torvalds if (buffer_async_write(bh)) { 1807c75e707fSChristoph Hellwig submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); 18081da177e4SLinus Torvalds nr_underway++; 1809ad576e63SNick Piggin } 18101da177e4SLinus Torvalds bh = next; 18111da177e4SLinus Torvalds } while (bh != head); 181205937baaSAndrew Morton unlock_page(page); 18131da177e4SLinus Torvalds 18141da177e4SLinus Torvalds err = 0; 18151da177e4SLinus Torvalds done: 18161da177e4SLinus Torvalds if (nr_underway == 0) { 18171da177e4SLinus Torvalds /* 18181da177e4SLinus Torvalds * The page was marked dirty, but the buffers were 18191da177e4SLinus Torvalds * clean. Someone wrote them back by hand with 18201da177e4SLinus Torvalds * ll_rw_block/submit_bh. A rare case. 18211da177e4SLinus Torvalds */ 18221da177e4SLinus Torvalds end_page_writeback(page); 18233d67f2d7SNick Piggin 18241da177e4SLinus Torvalds /* 18251da177e4SLinus Torvalds * The page and buffer_heads can be released at any time from 18261da177e4SLinus Torvalds * here on. 18271da177e4SLinus Torvalds */ 18281da177e4SLinus Torvalds } 18291da177e4SLinus Torvalds return err; 18301da177e4SLinus Torvalds 18311da177e4SLinus Torvalds recover: 18321da177e4SLinus Torvalds /* 18331da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some 18341da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid 18351da177e4SLinus Torvalds * exposing stale data. 18361da177e4SLinus Torvalds * The page is currently locked and not marked for writeback 18371da177e4SLinus Torvalds */ 18381da177e4SLinus Torvalds bh = head; 18391da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */ 18401da177e4SLinus Torvalds do { 184129a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) && 184229a814d2SAlex Tomas !buffer_delay(bh)) { 18431da177e4SLinus Torvalds lock_buffer(bh); 184435c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler); 18451da177e4SLinus Torvalds } else { 18461da177e4SLinus Torvalds /* 18471da177e4SLinus Torvalds * The buffer may have been set dirty during 18481da177e4SLinus Torvalds * attachment to a dirty page. 18491da177e4SLinus Torvalds */ 18501da177e4SLinus Torvalds clear_buffer_dirty(bh); 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head); 18531da177e4SLinus Torvalds SetPageError(page); 18541da177e4SLinus Torvalds BUG_ON(PageWriteback(page)); 18557e4c3690SAndrew Morton mapping_set_error(page->mapping, err); 18561da177e4SLinus Torvalds set_page_writeback(page); 18571da177e4SLinus Torvalds do { 18581da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 18591da177e4SLinus Torvalds if (buffer_async_write(bh)) { 18601da177e4SLinus Torvalds clear_buffer_dirty(bh); 1861c75e707fSChristoph Hellwig submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); 18621da177e4SLinus Torvalds nr_underway++; 1863ad576e63SNick Piggin } 18641da177e4SLinus Torvalds bh = next; 18651da177e4SLinus Torvalds } while (bh != head); 1866ffda9d30SNick Piggin unlock_page(page); 18671da177e4SLinus Torvalds goto done; 18681da177e4SLinus Torvalds } 1869b4bba389SBenjamin Marzinski EXPORT_SYMBOL(__block_write_full_page); 18701da177e4SLinus Torvalds 1871afddba49SNick Piggin /* 1872afddba49SNick Piggin * If a page has any new buffers, zero them out here, and mark them uptodate 1873afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised 1874afddba49SNick Piggin * block data from leaking). And clear the new bit. 1875afddba49SNick Piggin */ 1876afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) 1877afddba49SNick Piggin { 1878afddba49SNick Piggin unsigned int block_start, block_end; 1879afddba49SNick Piggin struct buffer_head *head, *bh; 1880afddba49SNick Piggin 1881afddba49SNick Piggin BUG_ON(!PageLocked(page)); 1882afddba49SNick Piggin if (!page_has_buffers(page)) 1883afddba49SNick Piggin return; 1884afddba49SNick Piggin 1885afddba49SNick Piggin bh = head = page_buffers(page); 1886afddba49SNick Piggin block_start = 0; 1887afddba49SNick Piggin do { 1888afddba49SNick Piggin block_end = block_start + bh->b_size; 1889afddba49SNick Piggin 1890afddba49SNick Piggin if (buffer_new(bh)) { 1891afddba49SNick Piggin if (block_end > from && block_start < to) { 1892afddba49SNick Piggin if (!PageUptodate(page)) { 1893afddba49SNick Piggin unsigned start, size; 1894afddba49SNick Piggin 1895afddba49SNick Piggin start = max(from, block_start); 1896afddba49SNick Piggin size = min(to, block_end) - start; 1897afddba49SNick Piggin 1898eebd2aa3SChristoph Lameter zero_user(page, start, size); 1899afddba49SNick Piggin set_buffer_uptodate(bh); 1900afddba49SNick Piggin } 1901afddba49SNick Piggin 1902afddba49SNick Piggin clear_buffer_new(bh); 1903afddba49SNick Piggin mark_buffer_dirty(bh); 1904afddba49SNick Piggin } 1905afddba49SNick Piggin } 1906afddba49SNick Piggin 1907afddba49SNick Piggin block_start = block_end; 1908afddba49SNick Piggin bh = bh->b_this_page; 1909afddba49SNick Piggin } while (bh != head); 1910afddba49SNick Piggin } 1911afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers); 1912afddba49SNick Piggin 1913ae259a9cSChristoph Hellwig static void 1914ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 19156d49cc85SChristoph Hellwig const struct iomap *iomap) 1916ae259a9cSChristoph Hellwig { 1917ae259a9cSChristoph Hellwig loff_t offset = block << inode->i_blkbits; 1918ae259a9cSChristoph Hellwig 1919ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev; 1920ae259a9cSChristoph Hellwig 1921ae259a9cSChristoph Hellwig /* 1922ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains 1923ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the 1924ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller 1925ae259a9cSChristoph Hellwig * handle it. 1926ae259a9cSChristoph Hellwig */ 1927ae259a9cSChristoph Hellwig BUG_ON(offset >= iomap->offset + iomap->length); 1928ae259a9cSChristoph Hellwig 1929ae259a9cSChristoph Hellwig switch (iomap->type) { 1930ae259a9cSChristoph Hellwig case IOMAP_HOLE: 1931ae259a9cSChristoph Hellwig /* 1932ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF, 1933ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is 1934ae259a9cSChristoph Hellwig * executed if necessary. 1935ae259a9cSChristoph Hellwig */ 1936ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 1937ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 1938ae259a9cSChristoph Hellwig set_buffer_new(bh); 1939ae259a9cSChristoph Hellwig break; 1940ae259a9cSChristoph Hellwig case IOMAP_DELALLOC: 1941ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) || 1942ae259a9cSChristoph Hellwig (offset >= i_size_read(inode))) 1943ae259a9cSChristoph Hellwig set_buffer_new(bh); 1944ae259a9cSChristoph Hellwig set_buffer_uptodate(bh); 1945ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 1946ae259a9cSChristoph Hellwig set_buffer_delay(bh); 1947ae259a9cSChristoph Hellwig break; 1948ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN: 1949ae259a9cSChristoph Hellwig /* 19503d7b6b21SAndreas Gruenbacher * For unwritten regions, we always need to ensure that regions 19513d7b6b21SAndreas Gruenbacher * in the block we are not writing to are zeroed. Mark the 19523d7b6b21SAndreas Gruenbacher * buffer as new to ensure this. 1953ae259a9cSChristoph Hellwig */ 1954ae259a9cSChristoph Hellwig set_buffer_new(bh); 1955ae259a9cSChristoph Hellwig set_buffer_unwritten(bh); 1956df561f66SGustavo A. R. Silva fallthrough; 1957ae259a9cSChristoph Hellwig case IOMAP_MAPPED: 19583d7b6b21SAndreas Gruenbacher if ((iomap->flags & IOMAP_F_NEW) || 19593d7b6b21SAndreas Gruenbacher offset >= i_size_read(inode)) 1960ae259a9cSChristoph Hellwig set_buffer_new(bh); 196119fe5f64SAndreas Gruenbacher bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 196219fe5f64SAndreas Gruenbacher inode->i_blkbits; 1963ae259a9cSChristoph Hellwig set_buffer_mapped(bh); 1964ae259a9cSChristoph Hellwig break; 1965ae259a9cSChristoph Hellwig } 1966ae259a9cSChristoph Hellwig } 1967ae259a9cSChristoph Hellwig 1968d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 19696d49cc85SChristoph Hellwig get_block_t *get_block, const struct iomap *iomap) 19701da177e4SLinus Torvalds { 197109cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 1972ebdec241SChristoph Hellwig unsigned to = from + len; 1973d1bd0b4eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 19741da177e4SLinus Torvalds unsigned block_start, block_end; 19751da177e4SLinus Torvalds sector_t block; 19761da177e4SLinus Torvalds int err = 0; 19771da177e4SLinus Torvalds unsigned blocksize, bbits; 19781da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 19791da177e4SLinus Torvalds 1980d1bd0b4eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio)); 198109cbfeafSKirill A. Shutemov BUG_ON(from > PAGE_SIZE); 198209cbfeafSKirill A. Shutemov BUG_ON(to > PAGE_SIZE); 19831da177e4SLinus Torvalds BUG_ON(from > to); 19841da177e4SLinus Torvalds 1985d1bd0b4eSMatthew Wilcox (Oracle) head = create_page_buffers(&folio->page, inode, 0); 198645bce8f3SLinus Torvalds blocksize = head->b_size; 198745bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 19881da177e4SLinus Torvalds 1989d1bd0b4eSMatthew Wilcox (Oracle) block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 19901da177e4SLinus Torvalds 19911da177e4SLinus Torvalds for(bh = head, block_start = 0; bh != head || !block_start; 19921da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) { 19931da177e4SLinus Torvalds block_end = block_start + blocksize; 19941da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 1995d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 19961da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 19971da177e4SLinus Torvalds set_buffer_uptodate(bh); 19981da177e4SLinus Torvalds } 19991da177e4SLinus Torvalds continue; 20001da177e4SLinus Torvalds } 20011da177e4SLinus Torvalds if (buffer_new(bh)) 20021da177e4SLinus Torvalds clear_buffer_new(bh); 20031da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2004b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2005ae259a9cSChristoph Hellwig if (get_block) { 20061da177e4SLinus Torvalds err = get_block(inode, block, bh, 1); 20071da177e4SLinus Torvalds if (err) 2008f3ddbdc6SNick Piggin break; 2009ae259a9cSChristoph Hellwig } else { 2010ae259a9cSChristoph Hellwig iomap_to_bh(inode, block, bh, iomap); 2011ae259a9cSChristoph Hellwig } 2012ae259a9cSChristoph Hellwig 20131da177e4SLinus Torvalds if (buffer_new(bh)) { 2014e64855c6SJan Kara clean_bdev_bh_alias(bh); 2015d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 2016637aff46SNick Piggin clear_buffer_new(bh); 20171da177e4SLinus Torvalds set_buffer_uptodate(bh); 2018637aff46SNick Piggin mark_buffer_dirty(bh); 20191da177e4SLinus Torvalds continue; 20201da177e4SLinus Torvalds } 2021eebd2aa3SChristoph Lameter if (block_end > to || block_start < from) 2022d1bd0b4eSMatthew Wilcox (Oracle) folio_zero_segments(folio, 2023eebd2aa3SChristoph Lameter to, block_end, 2024eebd2aa3SChristoph Lameter block_start, from); 20251da177e4SLinus Torvalds continue; 20261da177e4SLinus Torvalds } 20271da177e4SLinus Torvalds } 2028d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) { 20291da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 20301da177e4SLinus Torvalds set_buffer_uptodate(bh); 20311da177e4SLinus Torvalds continue; 20321da177e4SLinus Torvalds } 20331da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) && 203433a266ddSDavid Chinner !buffer_unwritten(bh) && 20351da177e4SLinus Torvalds (block_start < from || block_end > to)) { 2036dfec8a14SMike Christie ll_rw_block(REQ_OP_READ, 0, 1, &bh); 20371da177e4SLinus Torvalds *wait_bh++=bh; 20381da177e4SLinus Torvalds } 20391da177e4SLinus Torvalds } 20401da177e4SLinus Torvalds /* 20411da177e4SLinus Torvalds * If we issued read requests - let them complete. 20421da177e4SLinus Torvalds */ 20431da177e4SLinus Torvalds while(wait_bh > wait) { 20441da177e4SLinus Torvalds wait_on_buffer(*--wait_bh); 20451da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh)) 2046f3ddbdc6SNick Piggin err = -EIO; 20471da177e4SLinus Torvalds } 2048f9f07b6cSJan Kara if (unlikely(err)) 2049d1bd0b4eSMatthew Wilcox (Oracle) page_zero_new_buffers(&folio->page, from, to); 20501da177e4SLinus Torvalds return err; 20511da177e4SLinus Torvalds } 2052ae259a9cSChristoph Hellwig 2053ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2054ae259a9cSChristoph Hellwig get_block_t *get_block) 2055ae259a9cSChristoph Hellwig { 2056d1bd0b4eSMatthew Wilcox (Oracle) return __block_write_begin_int(page_folio(page), pos, len, get_block, 2057d1bd0b4eSMatthew Wilcox (Oracle) NULL); 2058ae259a9cSChristoph Hellwig } 2059ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin); 20601da177e4SLinus Torvalds 20611da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page, 20621da177e4SLinus Torvalds unsigned from, unsigned to) 20631da177e4SLinus Torvalds { 20641da177e4SLinus Torvalds unsigned block_start, block_end; 20651da177e4SLinus Torvalds int partial = 0; 20661da177e4SLinus Torvalds unsigned blocksize; 20671da177e4SLinus Torvalds struct buffer_head *bh, *head; 20681da177e4SLinus Torvalds 206945bce8f3SLinus Torvalds bh = head = page_buffers(page); 207045bce8f3SLinus Torvalds blocksize = bh->b_size; 20711da177e4SLinus Torvalds 207245bce8f3SLinus Torvalds block_start = 0; 207345bce8f3SLinus Torvalds do { 20741da177e4SLinus Torvalds block_end = block_start + blocksize; 20751da177e4SLinus Torvalds if (block_end <= from || block_start >= to) { 20761da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 20771da177e4SLinus Torvalds partial = 1; 20781da177e4SLinus Torvalds } else { 20791da177e4SLinus Torvalds set_buffer_uptodate(bh); 20801da177e4SLinus Torvalds mark_buffer_dirty(bh); 20811da177e4SLinus Torvalds } 20824ebd3aecSYang Guo if (buffer_new(bh)) 2083afddba49SNick Piggin clear_buffer_new(bh); 208445bce8f3SLinus Torvalds 208545bce8f3SLinus Torvalds block_start = block_end; 208645bce8f3SLinus Torvalds bh = bh->b_this_page; 208745bce8f3SLinus Torvalds } while (bh != head); 20881da177e4SLinus Torvalds 20891da177e4SLinus Torvalds /* 20901da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers 20911da177e4SLinus Torvalds * uptodate then we can optimize away a bogus readpage() for 20921da177e4SLinus Torvalds * the next read(). Here we 'discover' whether the page went 20931da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write. 20941da177e4SLinus Torvalds */ 20951da177e4SLinus Torvalds if (!partial) 20961da177e4SLinus Torvalds SetPageUptodate(page); 20971da177e4SLinus Torvalds return 0; 20981da177e4SLinus Torvalds } 20991da177e4SLinus Torvalds 21001da177e4SLinus Torvalds /* 2101155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and 2102155130a4SChristoph Hellwig * bringing partial write blocks uptodate first. 2103155130a4SChristoph Hellwig * 21047bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure. 2105afddba49SNick Piggin */ 2106155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2107155130a4SChristoph Hellwig unsigned flags, struct page **pagep, get_block_t *get_block) 2108afddba49SNick Piggin { 210909cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 2110afddba49SNick Piggin struct page *page; 21116e1db88dSChristoph Hellwig int status; 2112afddba49SNick Piggin 211354566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 21146e1db88dSChristoph Hellwig if (!page) 21156e1db88dSChristoph Hellwig return -ENOMEM; 2116afddba49SNick Piggin 21176e1db88dSChristoph Hellwig status = __block_write_begin(page, pos, len, get_block); 2118afddba49SNick Piggin if (unlikely(status)) { 2119afddba49SNick Piggin unlock_page(page); 212009cbfeafSKirill A. Shutemov put_page(page); 21216e1db88dSChristoph Hellwig page = NULL; 2122afddba49SNick Piggin } 2123afddba49SNick Piggin 21246e1db88dSChristoph Hellwig *pagep = page; 2125afddba49SNick Piggin return status; 2126afddba49SNick Piggin } 2127afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin); 2128afddba49SNick Piggin 2129afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping, 2130afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2131afddba49SNick Piggin struct page *page, void *fsdata) 2132afddba49SNick Piggin { 2133afddba49SNick Piggin struct inode *inode = mapping->host; 2134afddba49SNick Piggin unsigned start; 2135afddba49SNick Piggin 213609cbfeafSKirill A. Shutemov start = pos & (PAGE_SIZE - 1); 2137afddba49SNick Piggin 2138afddba49SNick Piggin if (unlikely(copied < len)) { 2139afddba49SNick Piggin /* 2140afddba49SNick Piggin * The buffers that were written will now be uptodate, so we 2141afddba49SNick Piggin * don't have to worry about a readpage reading them and 2142afddba49SNick Piggin * overwriting a partial write. However if we have encountered 2143afddba49SNick Piggin * a short write and only partially written into a buffer, it 2144afddba49SNick Piggin * will not be marked uptodate, so a readpage might come in and 2145afddba49SNick Piggin * destroy our partial write. 2146afddba49SNick Piggin * 2147afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a 2148afddba49SNick Piggin * non uptodate page as a zero-length write, and force the 2149afddba49SNick Piggin * caller to redo the whole thing. 2150afddba49SNick Piggin */ 2151afddba49SNick Piggin if (!PageUptodate(page)) 2152afddba49SNick Piggin copied = 0; 2153afddba49SNick Piggin 2154afddba49SNick Piggin page_zero_new_buffers(page, start+copied, start+len); 2155afddba49SNick Piggin } 2156afddba49SNick Piggin flush_dcache_page(page); 2157afddba49SNick Piggin 2158afddba49SNick Piggin /* This could be a short (even 0-length) commit */ 2159afddba49SNick Piggin __block_commit_write(inode, page, start, start+copied); 2160afddba49SNick Piggin 2161afddba49SNick Piggin return copied; 2162afddba49SNick Piggin } 2163afddba49SNick Piggin EXPORT_SYMBOL(block_write_end); 2164afddba49SNick Piggin 2165afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping, 2166afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied, 2167afddba49SNick Piggin struct page *page, void *fsdata) 2168afddba49SNick Piggin { 21698af54f29SChristoph Hellwig struct inode *inode = mapping->host; 21708af54f29SChristoph Hellwig loff_t old_size = inode->i_size; 21718af54f29SChristoph Hellwig bool i_size_changed = false; 21728af54f29SChristoph Hellwig 2173afddba49SNick Piggin copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 21748af54f29SChristoph Hellwig 21758af54f29SChristoph Hellwig /* 21768af54f29SChristoph Hellwig * No need to use i_size_read() here, the i_size cannot change under us 21778af54f29SChristoph Hellwig * because we hold i_rwsem. 21788af54f29SChristoph Hellwig * 21798af54f29SChristoph Hellwig * But it's important to update i_size while still holding page lock: 21808af54f29SChristoph Hellwig * page writeout could otherwise come in and zero beyond i_size. 21818af54f29SChristoph Hellwig */ 21828af54f29SChristoph Hellwig if (pos + copied > inode->i_size) { 21838af54f29SChristoph Hellwig i_size_write(inode, pos + copied); 21848af54f29SChristoph Hellwig i_size_changed = true; 21858af54f29SChristoph Hellwig } 21868af54f29SChristoph Hellwig 21878af54f29SChristoph Hellwig unlock_page(page); 21887a77dad7SAndreas Gruenbacher put_page(page); 21898af54f29SChristoph Hellwig 21908af54f29SChristoph Hellwig if (old_size < pos) 21918af54f29SChristoph Hellwig pagecache_isize_extended(inode, old_size, pos); 21928af54f29SChristoph Hellwig /* 21938af54f29SChristoph Hellwig * Don't mark the inode dirty under page lock. First, it unnecessarily 21948af54f29SChristoph Hellwig * makes the holding time of page lock longer. Second, it forces lock 21958af54f29SChristoph Hellwig * ordering of page lock and transaction start for journaling 21968af54f29SChristoph Hellwig * filesystems. 21978af54f29SChristoph Hellwig */ 21988af54f29SChristoph Hellwig if (i_size_changed) 21998af54f29SChristoph Hellwig mark_inode_dirty(inode); 220026ddb1f4SAndreas Gruenbacher return copied; 2201afddba49SNick Piggin } 2202afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end); 2203afddba49SNick Piggin 2204afddba49SNick Piggin /* 22052e7e80f7SMatthew Wilcox (Oracle) * block_is_partially_uptodate checks whether buffers within a folio are 22068ab22b9aSHisashi Hifumi * uptodate or not. 22078ab22b9aSHisashi Hifumi * 22082e7e80f7SMatthew Wilcox (Oracle) * Returns true if all buffers which correspond to the specified part 22092e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate. 22108ab22b9aSHisashi Hifumi */ 22112e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 22128ab22b9aSHisashi Hifumi { 22138ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize; 22148ab22b9aSHisashi Hifumi unsigned to; 22158ab22b9aSHisashi Hifumi struct buffer_head *bh, *head; 22162e7e80f7SMatthew Wilcox (Oracle) bool ret = true; 22178ab22b9aSHisashi Hifumi 22182e7e80f7SMatthew Wilcox (Oracle) head = folio_buffers(folio); 22192e7e80f7SMatthew Wilcox (Oracle) if (!head) 22202e7e80f7SMatthew Wilcox (Oracle) return false; 222145bce8f3SLinus Torvalds blocksize = head->b_size; 22222e7e80f7SMatthew Wilcox (Oracle) to = min_t(unsigned, folio_size(folio) - from, count); 22238ab22b9aSHisashi Hifumi to = from + to; 22242e7e80f7SMatthew Wilcox (Oracle) if (from < blocksize && to > folio_size(folio) - blocksize) 22252e7e80f7SMatthew Wilcox (Oracle) return false; 22268ab22b9aSHisashi Hifumi 22278ab22b9aSHisashi Hifumi bh = head; 22288ab22b9aSHisashi Hifumi block_start = 0; 22298ab22b9aSHisashi Hifumi do { 22308ab22b9aSHisashi Hifumi block_end = block_start + blocksize; 22318ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) { 22328ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) { 22332e7e80f7SMatthew Wilcox (Oracle) ret = false; 22348ab22b9aSHisashi Hifumi break; 22358ab22b9aSHisashi Hifumi } 22368ab22b9aSHisashi Hifumi if (block_end >= to) 22378ab22b9aSHisashi Hifumi break; 22388ab22b9aSHisashi Hifumi } 22398ab22b9aSHisashi Hifumi block_start = block_end; 22408ab22b9aSHisashi Hifumi bh = bh->b_this_page; 22418ab22b9aSHisashi Hifumi } while (bh != head); 22428ab22b9aSHisashi Hifumi 22438ab22b9aSHisashi Hifumi return ret; 22448ab22b9aSHisashi Hifumi } 22458ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate); 22468ab22b9aSHisashi Hifumi 22478ab22b9aSHisashi Hifumi /* 22481da177e4SLinus Torvalds * Generic "read page" function for block devices that have the normal 22491da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems. 22501da177e4SLinus Torvalds * Reads the page asynchronously --- the unlock_buffer() and 22511da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the 22521da177e4SLinus Torvalds * page struct once IO has completed. 22531da177e4SLinus Torvalds */ 22541da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block) 22551da177e4SLinus Torvalds { 22561da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 22571da177e4SLinus Torvalds sector_t iblock, lblock; 22581da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 225945bce8f3SLinus Torvalds unsigned int blocksize, bbits; 22601da177e4SLinus Torvalds int nr, i; 22611da177e4SLinus Torvalds int fully_mapped = 1; 22621da177e4SLinus Torvalds 226345bce8f3SLinus Torvalds head = create_page_buffers(page, inode, 0); 226445bce8f3SLinus Torvalds blocksize = head->b_size; 226545bce8f3SLinus Torvalds bbits = block_size_bits(blocksize); 22661da177e4SLinus Torvalds 226709cbfeafSKirill A. Shutemov iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); 226845bce8f3SLinus Torvalds lblock = (i_size_read(inode)+blocksize-1) >> bbits; 22691da177e4SLinus Torvalds bh = head; 22701da177e4SLinus Torvalds nr = 0; 22711da177e4SLinus Torvalds i = 0; 22721da177e4SLinus Torvalds 22731da177e4SLinus Torvalds do { 22741da177e4SLinus Torvalds if (buffer_uptodate(bh)) 22751da177e4SLinus Torvalds continue; 22761da177e4SLinus Torvalds 22771da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2278c64610baSAndrew Morton int err = 0; 2279c64610baSAndrew Morton 22801da177e4SLinus Torvalds fully_mapped = 0; 22811da177e4SLinus Torvalds if (iblock < lblock) { 2282b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 2283c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0); 2284c64610baSAndrew Morton if (err) 22851da177e4SLinus Torvalds SetPageError(page); 22861da177e4SLinus Torvalds } 22871da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2288eebd2aa3SChristoph Lameter zero_user(page, i * blocksize, blocksize); 2289c64610baSAndrew Morton if (!err) 22901da177e4SLinus Torvalds set_buffer_uptodate(bh); 22911da177e4SLinus Torvalds continue; 22921da177e4SLinus Torvalds } 22931da177e4SLinus Torvalds /* 22941da177e4SLinus Torvalds * get_block() might have updated the buffer 22951da177e4SLinus Torvalds * synchronously 22961da177e4SLinus Torvalds */ 22971da177e4SLinus Torvalds if (buffer_uptodate(bh)) 22981da177e4SLinus Torvalds continue; 22991da177e4SLinus Torvalds } 23001da177e4SLinus Torvalds arr[nr++] = bh; 23011da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head); 23021da177e4SLinus Torvalds 23031da177e4SLinus Torvalds if (fully_mapped) 23041da177e4SLinus Torvalds SetPageMappedToDisk(page); 23051da177e4SLinus Torvalds 23061da177e4SLinus Torvalds if (!nr) { 23071da177e4SLinus Torvalds /* 23081da177e4SLinus Torvalds * All buffers are uptodate - we can set the page uptodate 23091da177e4SLinus Torvalds * as well. But not if get_block() returned an error. 23101da177e4SLinus Torvalds */ 23111da177e4SLinus Torvalds if (!PageError(page)) 23121da177e4SLinus Torvalds SetPageUptodate(page); 23131da177e4SLinus Torvalds unlock_page(page); 23141da177e4SLinus Torvalds return 0; 23151da177e4SLinus Torvalds } 23161da177e4SLinus Torvalds 23171da177e4SLinus Torvalds /* Stage two: lock the buffers */ 23181da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 23191da177e4SLinus Torvalds bh = arr[i]; 23201da177e4SLinus Torvalds lock_buffer(bh); 23211da177e4SLinus Torvalds mark_buffer_async_read(bh); 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds 23241da177e4SLinus Torvalds /* 23251da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness 23261da177e4SLinus Torvalds * inside the buffer lock in case another process reading 23271da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix). 23281da177e4SLinus Torvalds */ 23291da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 23301da177e4SLinus Torvalds bh = arr[i]; 23311da177e4SLinus Torvalds if (buffer_uptodate(bh)) 23321da177e4SLinus Torvalds end_buffer_async_read(bh, 1); 23331da177e4SLinus Torvalds else 23342a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 23351da177e4SLinus Torvalds } 23361da177e4SLinus Torvalds return 0; 23371da177e4SLinus Torvalds } 23381fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_read_full_page); 23391da177e4SLinus Torvalds 23401da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding 234189e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to 23421da177e4SLinus Torvalds * deal with the hole. 23431da177e4SLinus Torvalds */ 234489e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size) 23451da177e4SLinus Torvalds { 23461da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 23471da177e4SLinus Torvalds struct page *page; 234889e10787SNick Piggin void *fsdata; 23491da177e4SLinus Torvalds int err; 23501da177e4SLinus Torvalds 2351c08d3b0eSnpiggin@suse.de err = inode_newsize_ok(inode, size); 2352c08d3b0eSnpiggin@suse.de if (err) 23531da177e4SLinus Torvalds goto out; 23541da177e4SLinus Torvalds 2355*d7414ba1SMatthew Wilcox (Oracle) err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); 235689e10787SNick Piggin if (err) 235705eb0b51SOGAWA Hirofumi goto out; 235805eb0b51SOGAWA Hirofumi 235989e10787SNick Piggin err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); 236089e10787SNick Piggin BUG_ON(err > 0); 236105eb0b51SOGAWA Hirofumi 236205eb0b51SOGAWA Hirofumi out: 236305eb0b51SOGAWA Hirofumi return err; 236405eb0b51SOGAWA Hirofumi } 23651fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple); 236605eb0b51SOGAWA Hirofumi 2367f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping, 236889e10787SNick Piggin loff_t pos, loff_t *bytes) 236905eb0b51SOGAWA Hirofumi { 237089e10787SNick Piggin struct inode *inode = mapping->host; 237193407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 237289e10787SNick Piggin struct page *page; 237389e10787SNick Piggin void *fsdata; 237489e10787SNick Piggin pgoff_t index, curidx; 237589e10787SNick Piggin loff_t curpos; 237689e10787SNick Piggin unsigned zerofrom, offset, len; 237789e10787SNick Piggin int err = 0; 237805eb0b51SOGAWA Hirofumi 237909cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT; 238009cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK; 238189e10787SNick Piggin 238209cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 238309cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 238489e10787SNick Piggin if (zerofrom & (blocksize-1)) { 238589e10787SNick Piggin *bytes |= (blocksize-1); 238689e10787SNick Piggin (*bytes)++; 238789e10787SNick Piggin } 238809cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom; 238989e10787SNick Piggin 2390c718a975STetsuo Handa err = pagecache_write_begin(file, mapping, curpos, len, 0, 239189e10787SNick Piggin &page, &fsdata); 239289e10787SNick Piggin if (err) 239389e10787SNick Piggin goto out; 2394eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 239589e10787SNick Piggin err = pagecache_write_end(file, mapping, curpos, len, len, 239689e10787SNick Piggin page, fsdata); 239789e10787SNick Piggin if (err < 0) 239889e10787SNick Piggin goto out; 239989e10787SNick Piggin BUG_ON(err != len); 240089e10787SNick Piggin err = 0; 2401061e9746SOGAWA Hirofumi 2402061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping); 2403c2ca0fcdSMikulas Patocka 240408d405c8SDavidlohr Bueso if (fatal_signal_pending(current)) { 2405c2ca0fcdSMikulas Patocka err = -EINTR; 2406c2ca0fcdSMikulas Patocka goto out; 2407c2ca0fcdSMikulas Patocka } 240889e10787SNick Piggin } 240989e10787SNick Piggin 241089e10787SNick Piggin /* page covers the boundary, find the boundary offset */ 241189e10787SNick Piggin if (index == curidx) { 241209cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK; 241389e10787SNick Piggin /* if we will expand the thing last block will be filled */ 241489e10787SNick Piggin if (offset <= zerofrom) { 241589e10787SNick Piggin goto out; 241689e10787SNick Piggin } 241789e10787SNick Piggin if (zerofrom & (blocksize-1)) { 241889e10787SNick Piggin *bytes |= (blocksize-1); 241989e10787SNick Piggin (*bytes)++; 242089e10787SNick Piggin } 242189e10787SNick Piggin len = offset - zerofrom; 242289e10787SNick Piggin 2423c718a975STetsuo Handa err = pagecache_write_begin(file, mapping, curpos, len, 0, 242489e10787SNick Piggin &page, &fsdata); 242589e10787SNick Piggin if (err) 242689e10787SNick Piggin goto out; 2427eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len); 242889e10787SNick Piggin err = pagecache_write_end(file, mapping, curpos, len, len, 242989e10787SNick Piggin page, fsdata); 243089e10787SNick Piggin if (err < 0) 243189e10787SNick Piggin goto out; 243289e10787SNick Piggin BUG_ON(err != len); 243389e10787SNick Piggin err = 0; 243489e10787SNick Piggin } 243589e10787SNick Piggin out: 243689e10787SNick Piggin return err; 24371da177e4SLinus Torvalds } 24381da177e4SLinus Torvalds 24391da177e4SLinus Torvalds /* 24401da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file. 24411da177e4SLinus Torvalds * We may have to extend the file. 24421da177e4SLinus Torvalds */ 2443282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping, 244489e10787SNick Piggin loff_t pos, unsigned len, unsigned flags, 244589e10787SNick Piggin struct page **pagep, void **fsdata, 244689e10787SNick Piggin get_block_t *get_block, loff_t *bytes) 24471da177e4SLinus Torvalds { 24481da177e4SLinus Torvalds struct inode *inode = mapping->host; 244993407472SFabian Frederick unsigned int blocksize = i_blocksize(inode); 245093407472SFabian Frederick unsigned int zerofrom; 245189e10787SNick Piggin int err; 24521da177e4SLinus Torvalds 245389e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes); 245489e10787SNick Piggin if (err) 2455155130a4SChristoph Hellwig return err; 24561da177e4SLinus Torvalds 245709cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK; 245889e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) { 24591da177e4SLinus Torvalds *bytes |= (blocksize-1); 24601da177e4SLinus Torvalds (*bytes)++; 24611da177e4SLinus Torvalds } 24621da177e4SLinus Torvalds 2463155130a4SChristoph Hellwig return block_write_begin(mapping, pos, len, flags, pagep, get_block); 24641da177e4SLinus Torvalds } 24651fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin); 24661da177e4SLinus Torvalds 24671da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to) 24681da177e4SLinus Torvalds { 24691da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 24701da177e4SLinus Torvalds __block_commit_write(inode,page,from,to); 24711da177e4SLinus Torvalds return 0; 24721da177e4SLinus Torvalds } 24731fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write); 24741da177e4SLinus Torvalds 247554171690SDavid Chinner /* 247654171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets 247754171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must 247854171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly 247954171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into 248054171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that 248154171690SDavid Chinner * support these features. 248254171690SDavid Chinner * 248354171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to 248454171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because 24857bb46a67Snpiggin@suse.de * truncate writes the inode size before removing pages, once we have the 248654171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not 248754171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we 248854171690SDavid Chinner * unlock the page. 2489ea13a864SJan Kara * 249014da9200SJan Kara * Direct callers of this function should protect against filesystem freezing 24915c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions. 249254171690SDavid Chinner */ 24935c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 249454171690SDavid Chinner get_block_t get_block) 249554171690SDavid Chinner { 2496c2ec175cSNick Piggin struct page *page = vmf->page; 2497496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 249854171690SDavid Chinner unsigned long end; 249954171690SDavid Chinner loff_t size; 250024da4fabSJan Kara int ret; 250154171690SDavid Chinner 250254171690SDavid Chinner lock_page(page); 250354171690SDavid Chinner size = i_size_read(inode); 250454171690SDavid Chinner if ((page->mapping != inode->i_mapping) || 250518336338SNick Piggin (page_offset(page) > size)) { 250624da4fabSJan Kara /* We overload EFAULT to mean page got truncated */ 250724da4fabSJan Kara ret = -EFAULT; 250824da4fabSJan Kara goto out_unlock; 250954171690SDavid Chinner } 251054171690SDavid Chinner 251154171690SDavid Chinner /* page is wholly or partially inside EOF */ 251209cbfeafSKirill A. Shutemov if (((page->index + 1) << PAGE_SHIFT) > size) 251309cbfeafSKirill A. Shutemov end = size & ~PAGE_MASK; 251454171690SDavid Chinner else 251509cbfeafSKirill A. Shutemov end = PAGE_SIZE; 251654171690SDavid Chinner 2517ebdec241SChristoph Hellwig ret = __block_write_begin(page, 0, end, get_block); 251854171690SDavid Chinner if (!ret) 251954171690SDavid Chinner ret = block_commit_write(page, 0, end); 252054171690SDavid Chinner 252124da4fabSJan Kara if (unlikely(ret < 0)) 252224da4fabSJan Kara goto out_unlock; 2523ea13a864SJan Kara set_page_dirty(page); 25241d1d1a76SDarrick J. Wong wait_for_stable_page(page); 252524da4fabSJan Kara return 0; 252624da4fabSJan Kara out_unlock: 2527b827e496SNick Piggin unlock_page(page); 252854171690SDavid Chinner return ret; 252954171690SDavid Chinner } 25301fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite); 25311da177e4SLinus Torvalds 25321da177e4SLinus Torvalds /* 253303158cd7SNick Piggin * nobh_write_begin()'s prereads are special: the buffer_heads are freed 25341da177e4SLinus Torvalds * immediately, while under the page lock. So it needs a special end_io 25351da177e4SLinus Torvalds * handler which does not touch the bh after unlocking it. 25361da177e4SLinus Torvalds */ 25371da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) 25381da177e4SLinus Torvalds { 253968671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate); 25401da177e4SLinus Torvalds } 25411da177e4SLinus Torvalds 25421da177e4SLinus Torvalds /* 254303158cd7SNick Piggin * Attach the singly-linked list of buffers created by nobh_write_begin, to 254403158cd7SNick Piggin * the page (converting it to circular linked list and taking care of page 254503158cd7SNick Piggin * dirty races). 254603158cd7SNick Piggin */ 254703158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head) 254803158cd7SNick Piggin { 254903158cd7SNick Piggin struct buffer_head *bh; 255003158cd7SNick Piggin 255103158cd7SNick Piggin BUG_ON(!PageLocked(page)); 255203158cd7SNick Piggin 255303158cd7SNick Piggin spin_lock(&page->mapping->private_lock); 255403158cd7SNick Piggin bh = head; 255503158cd7SNick Piggin do { 255603158cd7SNick Piggin if (PageDirty(page)) 255703158cd7SNick Piggin set_buffer_dirty(bh); 255803158cd7SNick Piggin if (!bh->b_this_page) 255903158cd7SNick Piggin bh->b_this_page = head; 256003158cd7SNick Piggin bh = bh->b_this_page; 256103158cd7SNick Piggin } while (bh != head); 256245dcfc27SGuoqing Jiang attach_page_private(page, head); 256303158cd7SNick Piggin spin_unlock(&page->mapping->private_lock); 256403158cd7SNick Piggin } 256503158cd7SNick Piggin 256603158cd7SNick Piggin /* 2567ea0f04e5SChristoph Hellwig * On entry, the page is fully not uptodate. 2568ea0f04e5SChristoph Hellwig * On exit the page is fully uptodate in the areas outside (from,to) 25697bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure. 25701da177e4SLinus Torvalds */ 2571ea0f04e5SChristoph Hellwig int nobh_write_begin(struct address_space *mapping, 257203158cd7SNick Piggin loff_t pos, unsigned len, unsigned flags, 257303158cd7SNick Piggin struct page **pagep, void **fsdata, 25741da177e4SLinus Torvalds get_block_t *get_block) 25751da177e4SLinus Torvalds { 257603158cd7SNick Piggin struct inode *inode = mapping->host; 25771da177e4SLinus Torvalds const unsigned blkbits = inode->i_blkbits; 25781da177e4SLinus Torvalds const unsigned blocksize = 1 << blkbits; 2579a4b0672dSNick Piggin struct buffer_head *head, *bh; 258003158cd7SNick Piggin struct page *page; 258103158cd7SNick Piggin pgoff_t index; 258203158cd7SNick Piggin unsigned from, to; 25831da177e4SLinus Torvalds unsigned block_in_page; 2584a4b0672dSNick Piggin unsigned block_start, block_end; 25851da177e4SLinus Torvalds sector_t block_in_file; 25861da177e4SLinus Torvalds int nr_reads = 0; 25871da177e4SLinus Torvalds int ret = 0; 25881da177e4SLinus Torvalds int is_mapped_to_disk = 1; 25891da177e4SLinus Torvalds 259009cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT; 259109cbfeafSKirill A. Shutemov from = pos & (PAGE_SIZE - 1); 259203158cd7SNick Piggin to = from + len; 259303158cd7SNick Piggin 259454566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 259503158cd7SNick Piggin if (!page) 259603158cd7SNick Piggin return -ENOMEM; 259703158cd7SNick Piggin *pagep = page; 259803158cd7SNick Piggin *fsdata = NULL; 259903158cd7SNick Piggin 260003158cd7SNick Piggin if (page_has_buffers(page)) { 2601309f77adSNamhyung Kim ret = __block_write_begin(page, pos, len, get_block); 2602309f77adSNamhyung Kim if (unlikely(ret)) 2603309f77adSNamhyung Kim goto out_release; 2604309f77adSNamhyung Kim return ret; 260503158cd7SNick Piggin } 2606a4b0672dSNick Piggin 26071da177e4SLinus Torvalds if (PageMappedToDisk(page)) 26081da177e4SLinus Torvalds return 0; 26091da177e4SLinus Torvalds 2610a4b0672dSNick Piggin /* 2611a4b0672dSNick Piggin * Allocate buffers so that we can keep track of state, and potentially 2612a4b0672dSNick Piggin * attach them to the page if an error occurs. In the common case of 2613a4b0672dSNick Piggin * no error, they will just be freed again without ever being attached 2614a4b0672dSNick Piggin * to the page (which is all OK, because we're under the page lock). 2615a4b0672dSNick Piggin * 2616a4b0672dSNick Piggin * Be careful: the buffer linked list is a NULL terminated one, rather 2617a4b0672dSNick Piggin * than the circular one we're used to. 2618a4b0672dSNick Piggin */ 2619640ab98fSJens Axboe head = alloc_page_buffers(page, blocksize, false); 262003158cd7SNick Piggin if (!head) { 262103158cd7SNick Piggin ret = -ENOMEM; 262203158cd7SNick Piggin goto out_release; 262303158cd7SNick Piggin } 2624a4b0672dSNick Piggin 262509cbfeafSKirill A. Shutemov block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 26261da177e4SLinus Torvalds 26271da177e4SLinus Torvalds /* 26281da177e4SLinus Torvalds * We loop across all blocks in the page, whether or not they are 26291da177e4SLinus Torvalds * part of the affected region. This is so we can discover if the 26301da177e4SLinus Torvalds * page is fully mapped-to-disk. 26311da177e4SLinus Torvalds */ 2632a4b0672dSNick Piggin for (block_start = 0, block_in_page = 0, bh = head; 263309cbfeafSKirill A. Shutemov block_start < PAGE_SIZE; 2634a4b0672dSNick Piggin block_in_page++, block_start += blocksize, bh = bh->b_this_page) { 26351da177e4SLinus Torvalds int create; 26361da177e4SLinus Torvalds 2637a4b0672dSNick Piggin block_end = block_start + blocksize; 2638a4b0672dSNick Piggin bh->b_state = 0; 26391da177e4SLinus Torvalds create = 1; 26401da177e4SLinus Torvalds if (block_start >= to) 26411da177e4SLinus Torvalds create = 0; 26421da177e4SLinus Torvalds ret = get_block(inode, block_in_file + block_in_page, 2643a4b0672dSNick Piggin bh, create); 26441da177e4SLinus Torvalds if (ret) 26451da177e4SLinus Torvalds goto failed; 2646a4b0672dSNick Piggin if (!buffer_mapped(bh)) 26471da177e4SLinus Torvalds is_mapped_to_disk = 0; 2648a4b0672dSNick Piggin if (buffer_new(bh)) 2649e64855c6SJan Kara clean_bdev_bh_alias(bh); 2650a4b0672dSNick Piggin if (PageUptodate(page)) { 2651a4b0672dSNick Piggin set_buffer_uptodate(bh); 26521da177e4SLinus Torvalds continue; 2653a4b0672dSNick Piggin } 2654a4b0672dSNick Piggin if (buffer_new(bh) || !buffer_mapped(bh)) { 2655eebd2aa3SChristoph Lameter zero_user_segments(page, block_start, from, 2656eebd2aa3SChristoph Lameter to, block_end); 26571da177e4SLinus Torvalds continue; 26581da177e4SLinus Torvalds } 2659a4b0672dSNick Piggin if (buffer_uptodate(bh)) 26601da177e4SLinus Torvalds continue; /* reiserfs does this */ 26611da177e4SLinus Torvalds if (block_start < from || block_end > to) { 2662a4b0672dSNick Piggin lock_buffer(bh); 2663a4b0672dSNick Piggin bh->b_end_io = end_buffer_read_nobh; 26642a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 2665a4b0672dSNick Piggin nr_reads++; 26661da177e4SLinus Torvalds } 26671da177e4SLinus Torvalds } 26681da177e4SLinus Torvalds 26691da177e4SLinus Torvalds if (nr_reads) { 26701da177e4SLinus Torvalds /* 26711da177e4SLinus Torvalds * The page is locked, so these buffers are protected from 26721da177e4SLinus Torvalds * any VM or truncate activity. Hence we don't need to care 26731da177e4SLinus Torvalds * for the buffer_head refcounts. 26741da177e4SLinus Torvalds */ 2675a4b0672dSNick Piggin for (bh = head; bh; bh = bh->b_this_page) { 26761da177e4SLinus Torvalds wait_on_buffer(bh); 26771da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 26781da177e4SLinus Torvalds ret = -EIO; 26791da177e4SLinus Torvalds } 26801da177e4SLinus Torvalds if (ret) 26811da177e4SLinus Torvalds goto failed; 26821da177e4SLinus Torvalds } 26831da177e4SLinus Torvalds 26841da177e4SLinus Torvalds if (is_mapped_to_disk) 26851da177e4SLinus Torvalds SetPageMappedToDisk(page); 26861da177e4SLinus Torvalds 268703158cd7SNick Piggin *fsdata = head; /* to be released by nobh_write_end */ 2688a4b0672dSNick Piggin 26891da177e4SLinus Torvalds return 0; 26901da177e4SLinus Torvalds 26911da177e4SLinus Torvalds failed: 269203158cd7SNick Piggin BUG_ON(!ret); 26931da177e4SLinus Torvalds /* 2694a4b0672dSNick Piggin * Error recovery is a bit difficult. We need to zero out blocks that 2695a4b0672dSNick Piggin * were newly allocated, and dirty them to ensure they get written out. 2696a4b0672dSNick Piggin * Buffers need to be attached to the page at this point, otherwise 2697a4b0672dSNick Piggin * the handling of potential IO errors during writeout would be hard 2698a4b0672dSNick Piggin * (could try doing synchronous writeout, but what if that fails too?) 26991da177e4SLinus Torvalds */ 270003158cd7SNick Piggin attach_nobh_buffers(page, head); 270103158cd7SNick Piggin page_zero_new_buffers(page, from, to); 2702a4b0672dSNick Piggin 270303158cd7SNick Piggin out_release: 270403158cd7SNick Piggin unlock_page(page); 270509cbfeafSKirill A. Shutemov put_page(page); 270603158cd7SNick Piggin *pagep = NULL; 2707a4b0672dSNick Piggin 27087bb46a67Snpiggin@suse.de return ret; 27097bb46a67Snpiggin@suse.de } 271003158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin); 27111da177e4SLinus Torvalds 271203158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping, 271303158cd7SNick Piggin loff_t pos, unsigned len, unsigned copied, 271403158cd7SNick Piggin struct page *page, void *fsdata) 27151da177e4SLinus Torvalds { 27161da177e4SLinus Torvalds struct inode *inode = page->mapping->host; 2717efdc3131SNick Piggin struct buffer_head *head = fsdata; 271803158cd7SNick Piggin struct buffer_head *bh; 27195b41e74aSDmitri Monakhov BUG_ON(fsdata != NULL && page_has_buffers(page)); 27201da177e4SLinus Torvalds 2721d4cf109fSDave Kleikamp if (unlikely(copied < len) && head) 272203158cd7SNick Piggin attach_nobh_buffers(page, head); 2723a4b0672dSNick Piggin if (page_has_buffers(page)) 272403158cd7SNick Piggin return generic_write_end(file, mapping, pos, len, 272503158cd7SNick Piggin copied, page, fsdata); 2726a4b0672dSNick Piggin 272722c8ca78SNick Piggin SetPageUptodate(page); 27281da177e4SLinus Torvalds set_page_dirty(page); 272903158cd7SNick Piggin if (pos+copied > inode->i_size) { 273003158cd7SNick Piggin i_size_write(inode, pos+copied); 27311da177e4SLinus Torvalds mark_inode_dirty(inode); 27321da177e4SLinus Torvalds } 273303158cd7SNick Piggin 273403158cd7SNick Piggin unlock_page(page); 273509cbfeafSKirill A. Shutemov put_page(page); 273603158cd7SNick Piggin 273703158cd7SNick Piggin while (head) { 273803158cd7SNick Piggin bh = head; 273903158cd7SNick Piggin head = head->b_this_page; 274003158cd7SNick Piggin free_buffer_head(bh); 27411da177e4SLinus Torvalds } 274203158cd7SNick Piggin 274303158cd7SNick Piggin return copied; 274403158cd7SNick Piggin } 274503158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end); 27461da177e4SLinus Torvalds 27471da177e4SLinus Torvalds /* 27481da177e4SLinus Torvalds * nobh_writepage() - based on block_full_write_page() except 27491da177e4SLinus Torvalds * that it tries to operate without attaching bufferheads to 27501da177e4SLinus Torvalds * the page. 27511da177e4SLinus Torvalds */ 27521da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block, 27531da177e4SLinus Torvalds struct writeback_control *wbc) 27541da177e4SLinus Torvalds { 27551da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 27561da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 275709cbfeafSKirill A. Shutemov const pgoff_t end_index = i_size >> PAGE_SHIFT; 27581da177e4SLinus Torvalds unsigned offset; 27591da177e4SLinus Torvalds int ret; 27601da177e4SLinus Torvalds 27611da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 27621da177e4SLinus Torvalds if (page->index < end_index) 27631da177e4SLinus Torvalds goto out; 27641da177e4SLinus Torvalds 27651da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 276609cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 27671da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 27681da177e4SLinus Torvalds unlock_page(page); 27691da177e4SLinus Torvalds return 0; /* don't care */ 27701da177e4SLinus Torvalds } 27711da177e4SLinus Torvalds 27721da177e4SLinus Torvalds /* 27731da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 27741da177e4SLinus Torvalds * writepage invocation because it may be mmapped. "A file is mapped 27751da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 27761da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 27771da177e4SLinus Torvalds * writes to that region are not written out to the file." 27781da177e4SLinus Torvalds */ 277909cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE); 27801da177e4SLinus Torvalds out: 27811da177e4SLinus Torvalds ret = mpage_writepage(page, get_block, wbc); 27821da177e4SLinus Torvalds if (ret == -EAGAIN) 278335c80d5fSChris Mason ret = __block_write_full_page(inode, page, get_block, wbc, 278435c80d5fSChris Mason end_buffer_async_write); 27851da177e4SLinus Torvalds return ret; 27861da177e4SLinus Torvalds } 27871da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage); 27881da177e4SLinus Torvalds 278903158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping, 279003158cd7SNick Piggin loff_t from, get_block_t *get_block) 27911da177e4SLinus Torvalds { 279209cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT; 279309cbfeafSKirill A. Shutemov unsigned offset = from & (PAGE_SIZE-1); 279403158cd7SNick Piggin unsigned blocksize; 279503158cd7SNick Piggin sector_t iblock; 279603158cd7SNick Piggin unsigned length, pos; 279703158cd7SNick Piggin struct inode *inode = mapping->host; 27981da177e4SLinus Torvalds struct page *page; 279903158cd7SNick Piggin struct buffer_head map_bh; 280003158cd7SNick Piggin int err; 28011da177e4SLinus Torvalds 280293407472SFabian Frederick blocksize = i_blocksize(inode); 280303158cd7SNick Piggin length = offset & (blocksize - 1); 28041da177e4SLinus Torvalds 280503158cd7SNick Piggin /* Block boundary? Nothing to do */ 280603158cd7SNick Piggin if (!length) 280703158cd7SNick Piggin return 0; 280803158cd7SNick Piggin 280903158cd7SNick Piggin length = blocksize - length; 281009cbfeafSKirill A. Shutemov iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 281103158cd7SNick Piggin 28121da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 281303158cd7SNick Piggin err = -ENOMEM; 28141da177e4SLinus Torvalds if (!page) 28151da177e4SLinus Torvalds goto out; 28161da177e4SLinus Torvalds 281703158cd7SNick Piggin if (page_has_buffers(page)) { 281803158cd7SNick Piggin has_buffers: 281903158cd7SNick Piggin unlock_page(page); 282009cbfeafSKirill A. Shutemov put_page(page); 282103158cd7SNick Piggin return block_truncate_page(mapping, from, get_block); 28221da177e4SLinus Torvalds } 282303158cd7SNick Piggin 282403158cd7SNick Piggin /* Find the buffer that contains "offset" */ 282503158cd7SNick Piggin pos = blocksize; 282603158cd7SNick Piggin while (offset >= pos) { 282703158cd7SNick Piggin iblock++; 282803158cd7SNick Piggin pos += blocksize; 282903158cd7SNick Piggin } 283003158cd7SNick Piggin 2831460bcf57STheodore Ts'o map_bh.b_size = blocksize; 2832460bcf57STheodore Ts'o map_bh.b_state = 0; 283303158cd7SNick Piggin err = get_block(inode, iblock, &map_bh, 0); 283403158cd7SNick Piggin if (err) 283503158cd7SNick Piggin goto unlock; 283603158cd7SNick Piggin /* unmapped? It's a hole - nothing to do */ 283703158cd7SNick Piggin if (!buffer_mapped(&map_bh)) 283803158cd7SNick Piggin goto unlock; 283903158cd7SNick Piggin 284003158cd7SNick Piggin /* Ok, it's mapped. Make sure it's up-to-date */ 284103158cd7SNick Piggin if (!PageUptodate(page)) { 284203158cd7SNick Piggin err = mapping->a_ops->readpage(NULL, page); 284303158cd7SNick Piggin if (err) { 284409cbfeafSKirill A. Shutemov put_page(page); 284503158cd7SNick Piggin goto out; 284603158cd7SNick Piggin } 284703158cd7SNick Piggin lock_page(page); 284803158cd7SNick Piggin if (!PageUptodate(page)) { 284903158cd7SNick Piggin err = -EIO; 285003158cd7SNick Piggin goto unlock; 285103158cd7SNick Piggin } 285203158cd7SNick Piggin if (page_has_buffers(page)) 285303158cd7SNick Piggin goto has_buffers; 285403158cd7SNick Piggin } 2855eebd2aa3SChristoph Lameter zero_user(page, offset, length); 285603158cd7SNick Piggin set_page_dirty(page); 285703158cd7SNick Piggin err = 0; 285803158cd7SNick Piggin 285903158cd7SNick Piggin unlock: 28601da177e4SLinus Torvalds unlock_page(page); 286109cbfeafSKirill A. Shutemov put_page(page); 28621da177e4SLinus Torvalds out: 286303158cd7SNick Piggin return err; 28641da177e4SLinus Torvalds } 28651da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page); 28661da177e4SLinus Torvalds 28671da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping, 28681da177e4SLinus Torvalds loff_t from, get_block_t *get_block) 28691da177e4SLinus Torvalds { 287009cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT; 287109cbfeafSKirill A. Shutemov unsigned offset = from & (PAGE_SIZE-1); 28721da177e4SLinus Torvalds unsigned blocksize; 287354b21a79SAndrew Morton sector_t iblock; 28741da177e4SLinus Torvalds unsigned length, pos; 28751da177e4SLinus Torvalds struct inode *inode = mapping->host; 28761da177e4SLinus Torvalds struct page *page; 28771da177e4SLinus Torvalds struct buffer_head *bh; 28781da177e4SLinus Torvalds int err; 28791da177e4SLinus Torvalds 288093407472SFabian Frederick blocksize = i_blocksize(inode); 28811da177e4SLinus Torvalds length = offset & (blocksize - 1); 28821da177e4SLinus Torvalds 28831da177e4SLinus Torvalds /* Block boundary? Nothing to do */ 28841da177e4SLinus Torvalds if (!length) 28851da177e4SLinus Torvalds return 0; 28861da177e4SLinus Torvalds 28871da177e4SLinus Torvalds length = blocksize - length; 288809cbfeafSKirill A. Shutemov iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 28891da177e4SLinus Torvalds 28901da177e4SLinus Torvalds page = grab_cache_page(mapping, index); 28911da177e4SLinus Torvalds err = -ENOMEM; 28921da177e4SLinus Torvalds if (!page) 28931da177e4SLinus Torvalds goto out; 28941da177e4SLinus Torvalds 28951da177e4SLinus Torvalds if (!page_has_buffers(page)) 28961da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0); 28971da177e4SLinus Torvalds 28981da177e4SLinus Torvalds /* Find the buffer that contains "offset" */ 28991da177e4SLinus Torvalds bh = page_buffers(page); 29001da177e4SLinus Torvalds pos = blocksize; 29011da177e4SLinus Torvalds while (offset >= pos) { 29021da177e4SLinus Torvalds bh = bh->b_this_page; 29031da177e4SLinus Torvalds iblock++; 29041da177e4SLinus Torvalds pos += blocksize; 29051da177e4SLinus Torvalds } 29061da177e4SLinus Torvalds 29071da177e4SLinus Torvalds err = 0; 29081da177e4SLinus Torvalds if (!buffer_mapped(bh)) { 2909b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize); 29101da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0); 29111da177e4SLinus Torvalds if (err) 29121da177e4SLinus Torvalds goto unlock; 29131da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */ 29141da177e4SLinus Torvalds if (!buffer_mapped(bh)) 29151da177e4SLinus Torvalds goto unlock; 29161da177e4SLinus Torvalds } 29171da177e4SLinus Torvalds 29181da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */ 29191da177e4SLinus Torvalds if (PageUptodate(page)) 29201da177e4SLinus Torvalds set_buffer_uptodate(bh); 29211da177e4SLinus Torvalds 292233a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 29231da177e4SLinus Torvalds err = -EIO; 2924dfec8a14SMike Christie ll_rw_block(REQ_OP_READ, 0, 1, &bh); 29251da177e4SLinus Torvalds wait_on_buffer(bh); 29261da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */ 29271da177e4SLinus Torvalds if (!buffer_uptodate(bh)) 29281da177e4SLinus Torvalds goto unlock; 29291da177e4SLinus Torvalds } 29301da177e4SLinus Torvalds 2931eebd2aa3SChristoph Lameter zero_user(page, offset, length); 29321da177e4SLinus Torvalds mark_buffer_dirty(bh); 29331da177e4SLinus Torvalds err = 0; 29341da177e4SLinus Torvalds 29351da177e4SLinus Torvalds unlock: 29361da177e4SLinus Torvalds unlock_page(page); 293709cbfeafSKirill A. Shutemov put_page(page); 29381da177e4SLinus Torvalds out: 29391da177e4SLinus Torvalds return err; 29401da177e4SLinus Torvalds } 29411fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page); 29421da177e4SLinus Torvalds 29431da177e4SLinus Torvalds /* 29441da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces 29451da177e4SLinus Torvalds */ 29461b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block, 29471b938c08SMatthew Wilcox struct writeback_control *wbc) 29481da177e4SLinus Torvalds { 29491da177e4SLinus Torvalds struct inode * const inode = page->mapping->host; 29501da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 295109cbfeafSKirill A. Shutemov const pgoff_t end_index = i_size >> PAGE_SHIFT; 29521da177e4SLinus Torvalds unsigned offset; 29531da177e4SLinus Torvalds 29541da177e4SLinus Torvalds /* Is the page fully inside i_size? */ 29551da177e4SLinus Torvalds if (page->index < end_index) 295635c80d5fSChris Mason return __block_write_full_page(inode, page, get_block, wbc, 29571b938c08SMatthew Wilcox end_buffer_async_write); 29581da177e4SLinus Torvalds 29591da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */ 296009cbfeafSKirill A. Shutemov offset = i_size & (PAGE_SIZE-1); 29611da177e4SLinus Torvalds if (page->index >= end_index+1 || !offset) { 29621da177e4SLinus Torvalds unlock_page(page); 29631da177e4SLinus Torvalds return 0; /* don't care */ 29641da177e4SLinus Torvalds } 29651da177e4SLinus Torvalds 29661da177e4SLinus Torvalds /* 29671da177e4SLinus Torvalds * The page straddles i_size. It must be zeroed out on each and every 29682a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped 29691da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of 29701da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and 29711da177e4SLinus Torvalds * writes to that region are not written out to the file." 29721da177e4SLinus Torvalds */ 297309cbfeafSKirill A. Shutemov zero_user_segment(page, offset, PAGE_SIZE); 29741b938c08SMatthew Wilcox return __block_write_full_page(inode, page, get_block, wbc, 297535c80d5fSChris Mason end_buffer_async_write); 297635c80d5fSChris Mason } 29771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page); 297835c80d5fSChris Mason 29791da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 29801da177e4SLinus Torvalds get_block_t *get_block) 29811da177e4SLinus Torvalds { 29821da177e4SLinus Torvalds struct inode *inode = mapping->host; 29832a527d68SAlexander Potapenko struct buffer_head tmp = { 29842a527d68SAlexander Potapenko .b_size = i_blocksize(inode), 29852a527d68SAlexander Potapenko }; 29862a527d68SAlexander Potapenko 29871da177e4SLinus Torvalds get_block(inode, block, &tmp, 0); 29881da177e4SLinus Torvalds return tmp.b_blocknr; 29891da177e4SLinus Torvalds } 29901fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap); 29911da177e4SLinus Torvalds 29924246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio) 29931da177e4SLinus Torvalds { 29941da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private; 29951da177e4SLinus Torvalds 2996b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET))) 299708bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state); 299808bafc03SKeith Mannthey 29994e4cbee9SChristoph Hellwig bh->b_end_io(bh, !bio->bi_status); 30001da177e4SLinus Torvalds bio_put(bio); 30011da177e4SLinus Torvalds } 30021da177e4SLinus Torvalds 30032a222ca9SMike Christie static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 3004c75e707fSChristoph Hellwig struct writeback_control *wbc) 30051da177e4SLinus Torvalds { 30061da177e4SLinus Torvalds struct bio *bio; 30071da177e4SLinus Torvalds 30081da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh)); 30091da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh)); 30101da177e4SLinus Torvalds BUG_ON(!bh->b_end_io); 30118fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh)); 30128fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh)); 30131da177e4SLinus Torvalds 301448fd4f93SJens Axboe /* 301548fd4f93SJens Axboe * Only clear out a write error when rewriting 30161da177e4SLinus Torvalds */ 30172a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 30181da177e4SLinus Torvalds clear_buffer_write_io_error(bh); 30191da177e4SLinus Torvalds 302007888c66SChristoph Hellwig if (buffer_meta(bh)) 302107888c66SChristoph Hellwig op_flags |= REQ_META; 302207888c66SChristoph Hellwig if (buffer_prio(bh)) 302307888c66SChristoph Hellwig op_flags |= REQ_PRIO; 302407888c66SChristoph Hellwig 302507888c66SChristoph Hellwig bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO); 30261da177e4SLinus Torvalds 30274f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 30284f74d15fSEric Biggers 30294f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 30301da177e4SLinus Torvalds 30316cf66b4cSKent Overstreet bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 30326cf66b4cSKent Overstreet BUG_ON(bio->bi_iter.bi_size != bh->b_size); 30331da177e4SLinus Torvalds 30341da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync; 30351da177e4SLinus Torvalds bio->bi_private = bh; 30361da177e4SLinus Torvalds 303783c9c547SMing Lei /* Take care of bh's that straddle the end of the device */ 303883c9c547SMing Lei guard_bio_eod(bio); 303983c9c547SMing Lei 3040fd42df30SDennis Zhou if (wbc) { 3041fd42df30SDennis Zhou wbc_init_bio(wbc, bio); 304234e51a5eSTejun Heo wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); 3043fd42df30SDennis Zhou } 3044fd42df30SDennis Zhou 30454e49ea4aSMike Christie submit_bio(bio); 3046f6454b04SJulia Lawall return 0; 30471da177e4SLinus Torvalds } 3048bafc0dbaSTejun Heo 30492a222ca9SMike Christie int submit_bh(int op, int op_flags, struct buffer_head *bh) 305071368511SDarrick J. Wong { 3051c75e707fSChristoph Hellwig return submit_bh_wbc(op, op_flags, bh, NULL); 305271368511SDarrick J. Wong } 30531fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh); 30541da177e4SLinus Torvalds 30551da177e4SLinus Torvalds /** 30561da177e4SLinus Torvalds * ll_rw_block: low-level access to block devices (DEPRECATED) 3057dfec8a14SMike Christie * @op: whether to %READ or %WRITE 3058ef295ecfSChristoph Hellwig * @op_flags: req_flag_bits 30591da177e4SLinus Torvalds * @nr: number of &struct buffer_heads in the array 30601da177e4SLinus Torvalds * @bhs: array of pointers to &struct buffer_head 30611da177e4SLinus Torvalds * 3062a7662236SJan Kara * ll_rw_block() takes an array of pointers to &struct buffer_heads, and 306370246286SChristoph Hellwig * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE. 306470246286SChristoph Hellwig * @op_flags contains flags modifying the detailed I/O behavior, most notably 306570246286SChristoph Hellwig * %REQ_RAHEAD. 30661da177e4SLinus Torvalds * 30671da177e4SLinus Torvalds * This function drops any buffer that it cannot get a lock on (with the 30689cb569d6SChristoph Hellwig * BH_Lock state bit), any buffer that appears to be clean when doing a write 30699cb569d6SChristoph Hellwig * request, and any buffer that appears to be up-to-date when doing read 30709cb569d6SChristoph Hellwig * request. Further it marks as clean buffers that are processed for 30719cb569d6SChristoph Hellwig * writing (the buffer cache won't assume that they are actually clean 30729cb569d6SChristoph Hellwig * until the buffer gets unlocked). 30731da177e4SLinus Torvalds * 30741da177e4SLinus Torvalds * ll_rw_block sets b_end_io to simple completion handler that marks 3075e227867fSMasanari Iida * the buffer up-to-date (if appropriate), unlocks the buffer and wakes 30761da177e4SLinus Torvalds * any waiters. 30771da177e4SLinus Torvalds * 30781da177e4SLinus Torvalds * All of the buffers must be for the same device, and must also be a 30791da177e4SLinus Torvalds * multiple of the current approved size for the device. 30801da177e4SLinus Torvalds */ 3081dfec8a14SMike Christie void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) 30821da177e4SLinus Torvalds { 30831da177e4SLinus Torvalds int i; 30841da177e4SLinus Torvalds 30851da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 30861da177e4SLinus Torvalds struct buffer_head *bh = bhs[i]; 30871da177e4SLinus Torvalds 30889cb569d6SChristoph Hellwig if (!trylock_buffer(bh)) 30891da177e4SLinus Torvalds continue; 3090dfec8a14SMike Christie if (op == WRITE) { 30911da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 309276c3073aSakpm@osdl.org bh->b_end_io = end_buffer_write_sync; 3093e60e5c50SOGAWA Hirofumi get_bh(bh); 3094dfec8a14SMike Christie submit_bh(op, op_flags, bh); 30951da177e4SLinus Torvalds continue; 30961da177e4SLinus Torvalds } 30971da177e4SLinus Torvalds } else { 30981da177e4SLinus Torvalds if (!buffer_uptodate(bh)) { 309976c3073aSakpm@osdl.org bh->b_end_io = end_buffer_read_sync; 3100e60e5c50SOGAWA Hirofumi get_bh(bh); 3101dfec8a14SMike Christie submit_bh(op, op_flags, bh); 31021da177e4SLinus Torvalds continue; 31031da177e4SLinus Torvalds } 31041da177e4SLinus Torvalds } 31051da177e4SLinus Torvalds unlock_buffer(bh); 31061da177e4SLinus Torvalds } 31071da177e4SLinus Torvalds } 31081fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(ll_rw_block); 31091da177e4SLinus Torvalds 31102a222ca9SMike Christie void write_dirty_buffer(struct buffer_head *bh, int op_flags) 31119cb569d6SChristoph Hellwig { 31129cb569d6SChristoph Hellwig lock_buffer(bh); 31139cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) { 31149cb569d6SChristoph Hellwig unlock_buffer(bh); 31159cb569d6SChristoph Hellwig return; 31169cb569d6SChristoph Hellwig } 31179cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync; 31189cb569d6SChristoph Hellwig get_bh(bh); 31192a222ca9SMike Christie submit_bh(REQ_OP_WRITE, op_flags, bh); 31209cb569d6SChristoph Hellwig } 31219cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer); 31229cb569d6SChristoph Hellwig 31231da177e4SLinus Torvalds /* 31241da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O 31251da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on 31261da177e4SLinus Torvalds * the buffer_head. 31271da177e4SLinus Torvalds */ 31282a222ca9SMike Christie int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) 31291da177e4SLinus Torvalds { 31301da177e4SLinus Torvalds int ret = 0; 31311da177e4SLinus Torvalds 31321da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1); 31331da177e4SLinus Torvalds lock_buffer(bh); 31341da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) { 3135377254b2SXianting Tian /* 3136377254b2SXianting Tian * The bh should be mapped, but it might not be if the 3137377254b2SXianting Tian * device was hot-removed. Not much we can do but fail the I/O. 3138377254b2SXianting Tian */ 3139377254b2SXianting Tian if (!buffer_mapped(bh)) { 3140377254b2SXianting Tian unlock_buffer(bh); 3141377254b2SXianting Tian return -EIO; 3142377254b2SXianting Tian } 3143377254b2SXianting Tian 31441da177e4SLinus Torvalds get_bh(bh); 31451da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync; 31462a222ca9SMike Christie ret = submit_bh(REQ_OP_WRITE, op_flags, bh); 31471da177e4SLinus Torvalds wait_on_buffer(bh); 31481da177e4SLinus Torvalds if (!ret && !buffer_uptodate(bh)) 31491da177e4SLinus Torvalds ret = -EIO; 31501da177e4SLinus Torvalds } else { 31511da177e4SLinus Torvalds unlock_buffer(bh); 31521da177e4SLinus Torvalds } 31531da177e4SLinus Torvalds return ret; 31541da177e4SLinus Torvalds } 315587e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer); 315687e99511SChristoph Hellwig 315787e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh) 315887e99511SChristoph Hellwig { 315970fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC); 316087e99511SChristoph Hellwig } 31611fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer); 31621da177e4SLinus Torvalds 31631da177e4SLinus Torvalds /* 31641da177e4SLinus Torvalds * try_to_free_buffers() checks if all the buffers on this particular page 31651da177e4SLinus Torvalds * are unused, and releases them if so. 31661da177e4SLinus Torvalds * 31671da177e4SLinus Torvalds * Exclusion against try_to_free_buffers may be obtained by either 31681da177e4SLinus Torvalds * locking the page or by holding its mapping's private_lock. 31691da177e4SLinus Torvalds * 31701da177e4SLinus Torvalds * If the page is dirty but all the buffers are clean then we need to 31711da177e4SLinus Torvalds * be sure to mark the page clean as well. This is because the page 31721da177e4SLinus Torvalds * may be against a block device, and a later reattachment of buffers 31731da177e4SLinus Torvalds * to a dirty page will set *all* buffers dirty. Which would corrupt 31741da177e4SLinus Torvalds * filesystem data on the same device. 31751da177e4SLinus Torvalds * 31761da177e4SLinus Torvalds * The same applies to regular filesystem pages: if all the buffers are 31771da177e4SLinus Torvalds * clean then we set the page clean and proceed. To do that, we require 3178e621900aSMatthew Wilcox (Oracle) * total exclusion from block_dirty_folio(). That is obtained with 31791da177e4SLinus Torvalds * private_lock. 31801da177e4SLinus Torvalds * 31811da177e4SLinus Torvalds * try_to_free_buffers() is non-blocking. 31821da177e4SLinus Torvalds */ 31831da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh) 31841da177e4SLinus Torvalds { 31851da177e4SLinus Torvalds return atomic_read(&bh->b_count) | 31861da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 31871da177e4SLinus Torvalds } 31881da177e4SLinus Torvalds 31891da177e4SLinus Torvalds static int 31901da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free) 31911da177e4SLinus Torvalds { 31921da177e4SLinus Torvalds struct buffer_head *head = page_buffers(page); 31931da177e4SLinus Torvalds struct buffer_head *bh; 31941da177e4SLinus Torvalds 31951da177e4SLinus Torvalds bh = head; 31961da177e4SLinus Torvalds do { 31971da177e4SLinus Torvalds if (buffer_busy(bh)) 31981da177e4SLinus Torvalds goto failed; 31991da177e4SLinus Torvalds bh = bh->b_this_page; 32001da177e4SLinus Torvalds } while (bh != head); 32011da177e4SLinus Torvalds 32021da177e4SLinus Torvalds do { 32031da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 32041da177e4SLinus Torvalds 3205535ee2fbSJan Kara if (bh->b_assoc_map) 32061da177e4SLinus Torvalds __remove_assoc_queue(bh); 32071da177e4SLinus Torvalds bh = next; 32081da177e4SLinus Torvalds } while (bh != head); 32091da177e4SLinus Torvalds *buffers_to_free = head; 321045dcfc27SGuoqing Jiang detach_page_private(page); 32111da177e4SLinus Torvalds return 1; 32121da177e4SLinus Torvalds failed: 32131da177e4SLinus Torvalds return 0; 32141da177e4SLinus Torvalds } 32151da177e4SLinus Torvalds 32161da177e4SLinus Torvalds int try_to_free_buffers(struct page *page) 32171da177e4SLinus Torvalds { 32181da177e4SLinus Torvalds struct address_space * const mapping = page->mapping; 32191da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL; 32201da177e4SLinus Torvalds int ret = 0; 32211da177e4SLinus Torvalds 32221da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 3223ecdfc978SLinus Torvalds if (PageWriteback(page)) 32241da177e4SLinus Torvalds return 0; 32251da177e4SLinus Torvalds 32261da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */ 32271da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 32281da177e4SLinus Torvalds goto out; 32291da177e4SLinus Torvalds } 32301da177e4SLinus Torvalds 32311da177e4SLinus Torvalds spin_lock(&mapping->private_lock); 32321da177e4SLinus Torvalds ret = drop_buffers(page, &buffers_to_free); 3233ecdfc978SLinus Torvalds 3234ecdfc978SLinus Torvalds /* 3235ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3) 3236ecdfc978SLinus Torvalds * then we can have clean buffers against a dirty page. We 3237ecdfc978SLinus Torvalds * clean the page here; otherwise the VM will never notice 3238ecdfc978SLinus Torvalds * that the filesystem did any IO at all. 3239ecdfc978SLinus Torvalds * 3240ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all 3241ecdfc978SLinus Torvalds * the page's buffers clean. We discover that here and clean 3242ecdfc978SLinus Torvalds * the page also. 324387df7241SNick Piggin * 324487df7241SNick Piggin * private_lock must be held over this entire operation in order 3245e621900aSMatthew Wilcox (Oracle) * to synchronise against block_dirty_folio and prevent the 324687df7241SNick Piggin * dirty bit from being lost. 3247ecdfc978SLinus Torvalds */ 324811f81becSTejun Heo if (ret) 324911f81becSTejun Heo cancel_dirty_page(page); 325087df7241SNick Piggin spin_unlock(&mapping->private_lock); 32511da177e4SLinus Torvalds out: 32521da177e4SLinus Torvalds if (buffers_to_free) { 32531da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free; 32541da177e4SLinus Torvalds 32551da177e4SLinus Torvalds do { 32561da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page; 32571da177e4SLinus Torvalds free_buffer_head(bh); 32581da177e4SLinus Torvalds bh = next; 32591da177e4SLinus Torvalds } while (bh != buffers_to_free); 32601da177e4SLinus Torvalds } 32611da177e4SLinus Torvalds return ret; 32621da177e4SLinus Torvalds } 32631da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers); 32641da177e4SLinus Torvalds 32651da177e4SLinus Torvalds /* 32661da177e4SLinus Torvalds * Buffer-head allocation 32671da177e4SLinus Torvalds */ 3268a0a9b043SShai Fultheim static struct kmem_cache *bh_cachep __read_mostly; 32691da177e4SLinus Torvalds 32701da177e4SLinus Torvalds /* 32711da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start 32721da177e4SLinus Torvalds * stripping them in writeback. 32731da177e4SLinus Torvalds */ 327443be594aSZhang Yanfei static unsigned long max_buffer_heads; 32751da177e4SLinus Torvalds 32761da177e4SLinus Torvalds int buffer_heads_over_limit; 32771da177e4SLinus Torvalds 32781da177e4SLinus Torvalds struct bh_accounting { 32791da177e4SLinus Torvalds int nr; /* Number of live bh's */ 32801da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */ 32811da177e4SLinus Torvalds }; 32821da177e4SLinus Torvalds 32831da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 32841da177e4SLinus Torvalds 32851da177e4SLinus Torvalds static void recalc_bh_state(void) 32861da177e4SLinus Torvalds { 32871da177e4SLinus Torvalds int i; 32881da177e4SLinus Torvalds int tot = 0; 32891da177e4SLinus Torvalds 3290ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 32911da177e4SLinus Torvalds return; 3292c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0); 32938a143426SEric Dumazet for_each_online_cpu(i) 32941da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr; 32951da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads); 32961da177e4SLinus Torvalds } 32971da177e4SLinus Torvalds 3298dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 32991da177e4SLinus Torvalds { 3300019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 33011da177e4SLinus Torvalds if (ret) { 3302a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers); 3303f1e67e35SThomas Gleixner spin_lock_init(&ret->b_uptodate_lock); 3304c7b92516SChristoph Lameter preempt_disable(); 3305c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr); 33061da177e4SLinus Torvalds recalc_bh_state(); 3307c7b92516SChristoph Lameter preempt_enable(); 33081da177e4SLinus Torvalds } 33091da177e4SLinus Torvalds return ret; 33101da177e4SLinus Torvalds } 33111da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head); 33121da177e4SLinus Torvalds 33131da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh) 33141da177e4SLinus Torvalds { 33151da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers)); 33161da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh); 3317c7b92516SChristoph Lameter preempt_disable(); 3318c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr); 33191da177e4SLinus Torvalds recalc_bh_state(); 3320c7b92516SChristoph Lameter preempt_enable(); 33211da177e4SLinus Torvalds } 33221da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head); 33231da177e4SLinus Torvalds 3324fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu) 33251da177e4SLinus Torvalds { 33261da177e4SLinus Torvalds int i; 33271da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu); 33281da177e4SLinus Torvalds 33291da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) { 33301da177e4SLinus Torvalds brelse(b->bhs[i]); 33311da177e4SLinus Torvalds b->bhs[i] = NULL; 33321da177e4SLinus Torvalds } 3333c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 33348a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0; 3335fc4d24c9SSebastian Andrzej Siewior return 0; 33361da177e4SLinus Torvalds } 33371da177e4SLinus Torvalds 3338389d1b08SAneesh Kumar K.V /** 3339a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate 3340389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3341389d1b08SAneesh Kumar K.V * 3342389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false, 3343389d1b08SAneesh Kumar K.V * with the buffer locked, if not. 3344389d1b08SAneesh Kumar K.V */ 3345389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh) 3346389d1b08SAneesh Kumar K.V { 3347389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) { 3348389d1b08SAneesh Kumar K.V lock_buffer(bh); 3349389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) 3350389d1b08SAneesh Kumar K.V return 0; 3351389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3352389d1b08SAneesh Kumar K.V } 3353389d1b08SAneesh Kumar K.V return 1; 3354389d1b08SAneesh Kumar K.V } 3355389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock); 3356389d1b08SAneesh Kumar K.V 3357389d1b08SAneesh Kumar K.V /** 3358a6b91919SRandy Dunlap * bh_submit_read - Submit a locked buffer for reading 3359389d1b08SAneesh Kumar K.V * @bh: struct buffer_head 3360389d1b08SAneesh Kumar K.V * 3361389d1b08SAneesh Kumar K.V * Returns zero on success and -EIO on error. 3362389d1b08SAneesh Kumar K.V */ 3363389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh) 3364389d1b08SAneesh Kumar K.V { 3365389d1b08SAneesh Kumar K.V BUG_ON(!buffer_locked(bh)); 3366389d1b08SAneesh Kumar K.V 3367389d1b08SAneesh Kumar K.V if (buffer_uptodate(bh)) { 3368389d1b08SAneesh Kumar K.V unlock_buffer(bh); 3369389d1b08SAneesh Kumar K.V return 0; 3370389d1b08SAneesh Kumar K.V } 3371389d1b08SAneesh Kumar K.V 3372389d1b08SAneesh Kumar K.V get_bh(bh); 3373389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync; 33742a222ca9SMike Christie submit_bh(REQ_OP_READ, 0, bh); 3375389d1b08SAneesh Kumar K.V wait_on_buffer(bh); 3376389d1b08SAneesh Kumar K.V if (buffer_uptodate(bh)) 3377389d1b08SAneesh Kumar K.V return 0; 3378389d1b08SAneesh Kumar K.V return -EIO; 3379389d1b08SAneesh Kumar K.V } 3380389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read); 3381389d1b08SAneesh Kumar K.V 33821da177e4SLinus Torvalds void __init buffer_init(void) 33831da177e4SLinus Torvalds { 338443be594aSZhang Yanfei unsigned long nrpages; 3385fc4d24c9SSebastian Andrzej Siewior int ret; 33861da177e4SLinus Torvalds 3387b98938c3SChristoph Lameter bh_cachep = kmem_cache_create("buffer_head", 3388b98938c3SChristoph Lameter sizeof(struct buffer_head), 0, 3389b98938c3SChristoph Lameter (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3390b98938c3SChristoph Lameter SLAB_MEM_SPREAD), 3391019b4d12SRichard Kennedy NULL); 33921da177e4SLinus Torvalds 33931da177e4SLinus Torvalds /* 33941da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL 33951da177e4SLinus Torvalds */ 33961da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100; 33971da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3398fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3399fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead); 3400fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0); 34011da177e4SLinus Torvalds } 3402