1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/fs/buffer.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds
61da177e4SLinus Torvalds */
71da177e4SLinus Torvalds
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds *
111da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds *
141da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating
151da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
161da177e4SLinus Torvalds *
171da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds *
191da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
201da177e4SLinus Torvalds */
211da177e4SLinus Torvalds
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
514fa512ceSEric Biggers #include <linux/fsverity.h>
528a237adfSMarcelo Tosatti #include <linux/sched/isolation.h>
531da177e4SLinus Torvalds
542b211dc0SBen Dooks #include "internal.h"
552b211dc0SBen Dooks
561da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
575bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
5844981351SBart Van Assche enum rw_hint hint, struct writeback_control *wbc);
591da177e4SLinus Torvalds
601da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
611da177e4SLinus Torvalds
touch_buffer(struct buffer_head * bh)62f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
63f0059afdSTejun Heo {
645305cb83STejun Heo trace_block_touch_buffer(bh);
6503c5f331SMatthew Wilcox (Oracle) folio_mark_accessed(bh->b_folio);
66f0059afdSTejun Heo }
67f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
68f0059afdSTejun Heo
__lock_buffer(struct buffer_head * bh)69fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
701da177e4SLinus Torvalds {
7174316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
721da177e4SLinus Torvalds }
731da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
741da177e4SLinus Torvalds
unlock_buffer(struct buffer_head * bh)75fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
761da177e4SLinus Torvalds {
7751b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state);
784e857c58SPeter Zijlstra smp_mb__after_atomic();
791da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock);
801da177e4SLinus Torvalds }
811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
821da177e4SLinus Torvalds
831da177e4SLinus Torvalds /*
84520f301cSMatthew Wilcox (Oracle) * Returns if the folio has dirty or writeback buffers. If all the buffers
85520f301cSMatthew Wilcox (Oracle) * are unlocked and clean then the folio_test_dirty information is stale. If
86520f301cSMatthew Wilcox (Oracle) * any of the buffers are locked, it is assumed they are locked for IO.
87b4597226SMel Gorman */
buffer_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)88520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
89b4597226SMel Gorman bool *dirty, bool *writeback)
90b4597226SMel Gorman {
91b4597226SMel Gorman struct buffer_head *head, *bh;
92b4597226SMel Gorman *dirty = false;
93b4597226SMel Gorman *writeback = false;
94b4597226SMel Gorman
95520f301cSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
96b4597226SMel Gorman
97520f301cSMatthew Wilcox (Oracle) head = folio_buffers(folio);
98520f301cSMatthew Wilcox (Oracle) if (!head)
99b4597226SMel Gorman return;
100b4597226SMel Gorman
101520f301cSMatthew Wilcox (Oracle) if (folio_test_writeback(folio))
102b4597226SMel Gorman *writeback = true;
103b4597226SMel Gorman
104b4597226SMel Gorman bh = head;
105b4597226SMel Gorman do {
106b4597226SMel Gorman if (buffer_locked(bh))
107b4597226SMel Gorman *writeback = true;
108b4597226SMel Gorman
109b4597226SMel Gorman if (buffer_dirty(bh))
110b4597226SMel Gorman *dirty = true;
111b4597226SMel Gorman
112b4597226SMel Gorman bh = bh->b_this_page;
113b4597226SMel Gorman } while (bh != head);
114b4597226SMel Gorman }
115b4597226SMel Gorman
116b4597226SMel Gorman /*
1171da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it
1181da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself
1191da177e4SLinus Torvalds * if you want to preserve its state.
1201da177e4SLinus Torvalds */
__wait_on_buffer(struct buffer_head * bh)1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1221da177e4SLinus Torvalds {
12374316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1241da177e4SLinus Torvalds }
1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1261da177e4SLinus Torvalds
buffer_io_error(struct buffer_head * bh,char * msg)127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1281da177e4SLinus Torvalds {
129432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state))
130432f16e6SRobert Elliott printk_ratelimited(KERN_ERR
131a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n",
132a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds
1351da177e4SLinus Torvalds /*
13668671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after
13768671f35SDmitry Monakhov * unlocking it.
13868671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13968671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for
14068671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh
14168671f35SDmitry Monakhov * itself.
1421da177e4SLinus Torvalds */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds if (uptodate) {
1461da177e4SLinus Torvalds set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds } else {
14870246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */
1491da177e4SLinus Torvalds clear_buffer_uptodate(bh);
1501da177e4SLinus Torvalds }
1511da177e4SLinus Torvalds unlock_buffer(bh);
15268671f35SDmitry Monakhov }
15368671f35SDmitry Monakhov
15468671f35SDmitry Monakhov /*
15568671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and
15679f59784SZhang Yi * unlock the buffer.
15768671f35SDmitry Monakhov */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15968671f35SDmitry Monakhov {
16068671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate);
1611da177e4SLinus Torvalds put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1641da177e4SLinus Torvalds
end_buffer_write_sync(struct buffer_head * bh,int uptodate)1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds if (uptodate) {
1681da177e4SLinus Torvalds set_buffer_uptodate(bh);
1691da177e4SLinus Torvalds } else {
170b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write");
17187354e5dSJeff Layton mark_buffer_write_io_error(bh);
1721da177e4SLinus Torvalds clear_buffer_uptodate(bh);
1731da177e4SLinus Torvalds }
1741da177e4SLinus Torvalds unlock_buffer(bh);
1751da177e4SLinus Torvalds put_bh(bh);
1761da177e4SLinus Torvalds }
1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1781da177e4SLinus Torvalds
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking.
1811da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this,
1821da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's
183600f111eSMatthew Wilcox (Oracle) * i_private_lock.
1841da177e4SLinus Torvalds *
185600f111eSMatthew Wilcox (Oracle) * Hack idea: for the blockdev mapping, i_private_lock contention
1861da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that
187600f111eSMatthew Wilcox (Oracle) * succeeds, there is no need to take i_private_lock.
1881da177e4SLinus Torvalds */
1891da177e4SLinus Torvalds static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block)190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1911da177e4SLinus Torvalds {
19253cd4cd3SAl Viro struct address_space *bd_mapping = bdev->bd_mapping;
19353cd4cd3SAl Viro const int blkbits = bd_mapping->host->i_blkbits;
1941da177e4SLinus Torvalds struct buffer_head *ret = NULL;
1951da177e4SLinus Torvalds pgoff_t index;
1961da177e4SLinus Torvalds struct buffer_head *bh;
1971da177e4SLinus Torvalds struct buffer_head *head;
198eee25182SMatthew Wilcox (Oracle) struct folio *folio;
1991da177e4SLinus Torvalds int all_mapped = 1;
20043636c80STetsuo Handa static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
2011da177e4SLinus Torvalds
20253cd4cd3SAl Viro index = ((loff_t)block << blkbits) / PAGE_SIZE;
203eee25182SMatthew Wilcox (Oracle) folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204eee25182SMatthew Wilcox (Oracle) if (IS_ERR(folio))
2051da177e4SLinus Torvalds goto out;
2061da177e4SLinus Torvalds
207600f111eSMatthew Wilcox (Oracle) spin_lock(&bd_mapping->i_private_lock);
208eee25182SMatthew Wilcox (Oracle) head = folio_buffers(folio);
209eee25182SMatthew Wilcox (Oracle) if (!head)
2101da177e4SLinus Torvalds goto out_unlock;
2111da177e4SLinus Torvalds bh = head;
2121da177e4SLinus Torvalds do {
21397f76d3dSNikanth Karthikesan if (!buffer_mapped(bh))
21497f76d3dSNikanth Karthikesan all_mapped = 0;
21597f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) {
2161da177e4SLinus Torvalds ret = bh;
2171da177e4SLinus Torvalds get_bh(bh);
2181da177e4SLinus Torvalds goto out_unlock;
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds bh = bh->b_this_page;
2211da177e4SLinus Torvalds } while (bh != head);
2221da177e4SLinus Torvalds
2231da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are
2241da177e4SLinus Torvalds * not mapped. This is due to various races between
2251da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with
2261da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers
2271da177e4SLinus Torvalds */
22843636c80STetsuo Handa ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
22943636c80STetsuo Handa if (all_mapped && __ratelimit(&last_warned)) {
23043636c80STetsuo Handa printk("__find_get_block_slow() failed. block=%llu, "
23143636c80STetsuo Handa "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
23243636c80STetsuo Handa "device %pg blocksize: %d\n",
233205f87f6SBadari Pulavarty (unsigned long long)block,
23443636c80STetsuo Handa (unsigned long long)bh->b_blocknr,
23543636c80STetsuo Handa bh->b_state, bh->b_size, bdev,
23653cd4cd3SAl Viro 1 << blkbits);
2371da177e4SLinus Torvalds }
2381da177e4SLinus Torvalds out_unlock:
239600f111eSMatthew Wilcox (Oracle) spin_unlock(&bd_mapping->i_private_lock);
240eee25182SMatthew Wilcox (Oracle) folio_put(folio);
2411da177e4SLinus Torvalds out:
2421da177e4SLinus Torvalds return ret;
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds
end_buffer_async_read(struct buffer_head * bh,int uptodate)2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds unsigned long flags;
248a3972203SNick Piggin struct buffer_head *first;
2491da177e4SLinus Torvalds struct buffer_head *tmp;
2502e2dba15SMatthew Wilcox (Oracle) struct folio *folio;
2512e2dba15SMatthew Wilcox (Oracle) int folio_uptodate = 1;
2521da177e4SLinus Torvalds
2531da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh));
2541da177e4SLinus Torvalds
2552e2dba15SMatthew Wilcox (Oracle) folio = bh->b_folio;
2561da177e4SLinus Torvalds if (uptodate) {
2571da177e4SLinus Torvalds set_buffer_uptodate(bh);
2581da177e4SLinus Torvalds } else {
2591da177e4SLinus Torvalds clear_buffer_uptodate(bh);
260b744c2acSRobert Elliott buffer_io_error(bh, ", async page read");
2611da177e4SLinus Torvalds }
2621da177e4SLinus Torvalds
2631da177e4SLinus Torvalds /*
2641da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if
2651da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both
2661da177e4SLinus Torvalds * decide that the page is now completely done.
2671da177e4SLinus Torvalds */
2682e2dba15SMatthew Wilcox (Oracle) first = folio_buffers(folio);
269f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
2701da177e4SLinus Torvalds clear_buffer_async_read(bh);
2711da177e4SLinus Torvalds unlock_buffer(bh);
2721da177e4SLinus Torvalds tmp = bh;
2731da177e4SLinus Torvalds do {
2741da177e4SLinus Torvalds if (!buffer_uptodate(tmp))
2752e2dba15SMatthew Wilcox (Oracle) folio_uptodate = 0;
2761da177e4SLinus Torvalds if (buffer_async_read(tmp)) {
2771da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp));
2781da177e4SLinus Torvalds goto still_busy;
2791da177e4SLinus Torvalds }
2801da177e4SLinus Torvalds tmp = tmp->b_this_page;
2811da177e4SLinus Torvalds } while (tmp != bh);
282f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2831da177e4SLinus Torvalds
2846ba924d3SMatthew Wilcox (Oracle) folio_end_read(folio, folio_uptodate);
2851da177e4SLinus Torvalds return;
2861da177e4SLinus Torvalds
2871da177e4SLinus Torvalds still_busy:
288f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2891da177e4SLinus Torvalds return;
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds
2924fa512ceSEric Biggers struct postprocess_bh_ctx {
29331fb992cSEric Biggers struct work_struct work;
29431fb992cSEric Biggers struct buffer_head *bh;
29531fb992cSEric Biggers };
29631fb992cSEric Biggers
verify_bh(struct work_struct * work)2974fa512ceSEric Biggers static void verify_bh(struct work_struct *work)
2984fa512ceSEric Biggers {
2994fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3004fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work);
3014fa512ceSEric Biggers struct buffer_head *bh = ctx->bh;
3024fa512ceSEric Biggers bool valid;
3034fa512ceSEric Biggers
3048b7d3fe9SEric Biggers valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
3054fa512ceSEric Biggers end_buffer_async_read(bh, valid);
3064fa512ceSEric Biggers kfree(ctx);
3074fa512ceSEric Biggers }
3084fa512ceSEric Biggers
need_fsverity(struct buffer_head * bh)3094fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh)
3104fa512ceSEric Biggers {
3118b7d3fe9SEric Biggers struct folio *folio = bh->b_folio;
3128b7d3fe9SEric Biggers struct inode *inode = folio->mapping->host;
3134fa512ceSEric Biggers
3144fa512ceSEric Biggers return fsverity_active(inode) &&
3154fa512ceSEric Biggers /* needed by ext4 */
3168b7d3fe9SEric Biggers folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
3174fa512ceSEric Biggers }
3184fa512ceSEric Biggers
decrypt_bh(struct work_struct * work)31931fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
32031fb992cSEric Biggers {
3214fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3224fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work);
32331fb992cSEric Biggers struct buffer_head *bh = ctx->bh;
32431fb992cSEric Biggers int err;
32531fb992cSEric Biggers
3269c7fb7f7SEric Biggers err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
3279c7fb7f7SEric Biggers bh_offset(bh));
3284fa512ceSEric Biggers if (err == 0 && need_fsverity(bh)) {
3294fa512ceSEric Biggers /*
3304fa512ceSEric Biggers * We use different work queues for decryption and for verity
3314fa512ceSEric Biggers * because verity may require reading metadata pages that need
3324fa512ceSEric Biggers * decryption, and we shouldn't recurse to the same workqueue.
3334fa512ceSEric Biggers */
3344fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh);
3354fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work);
3364fa512ceSEric Biggers return;
3374fa512ceSEric Biggers }
33831fb992cSEric Biggers end_buffer_async_read(bh, err == 0);
33931fb992cSEric Biggers kfree(ctx);
34031fb992cSEric Biggers }
34131fb992cSEric Biggers
34231fb992cSEric Biggers /*
3432c69e205SMatthew Wilcox (Oracle) * I/O completion handler for block_read_full_folio() - pages
34431fb992cSEric Biggers * which come unlocked at the end of I/O.
34531fb992cSEric Biggers */
end_buffer_async_read_io(struct buffer_head * bh,int uptodate)34631fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
34731fb992cSEric Biggers {
3483822a7c4SLinus Torvalds struct inode *inode = bh->b_folio->mapping->host;
3494fa512ceSEric Biggers bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
3504fa512ceSEric Biggers bool verify = need_fsverity(bh);
3514fa512ceSEric Biggers
3524fa512ceSEric Biggers /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
3534fa512ceSEric Biggers if (uptodate && (decrypt || verify)) {
3544fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3554fa512ceSEric Biggers kmalloc(sizeof(*ctx), GFP_ATOMIC);
35631fb992cSEric Biggers
35731fb992cSEric Biggers if (ctx) {
35831fb992cSEric Biggers ctx->bh = bh;
3594fa512ceSEric Biggers if (decrypt) {
3604fa512ceSEric Biggers INIT_WORK(&ctx->work, decrypt_bh);
36131fb992cSEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work);
3624fa512ceSEric Biggers } else {
3634fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh);
3644fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work);
3654fa512ceSEric Biggers }
36631fb992cSEric Biggers return;
36731fb992cSEric Biggers }
36831fb992cSEric Biggers uptodate = 0;
36931fb992cSEric Biggers }
37031fb992cSEric Biggers end_buffer_async_read(bh, uptodate);
37131fb992cSEric Biggers }
37231fb992cSEric Biggers
3731da177e4SLinus Torvalds /*
37414059f66SMatthew Wilcox (Oracle) * Completion handler for block_write_full_folio() - folios which are unlocked
37514059f66SMatthew Wilcox (Oracle) * during I/O, and which have the writeback flag cleared upon I/O completion.
3761da177e4SLinus Torvalds */
end_buffer_async_write(struct buffer_head * bh,int uptodate)37714059f66SMatthew Wilcox (Oracle) static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3781da177e4SLinus Torvalds {
3791da177e4SLinus Torvalds unsigned long flags;
380a3972203SNick Piggin struct buffer_head *first;
3811da177e4SLinus Torvalds struct buffer_head *tmp;
382743ed81eSMatthew Wilcox (Oracle) struct folio *folio;
3831da177e4SLinus Torvalds
3841da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh));
3851da177e4SLinus Torvalds
386743ed81eSMatthew Wilcox (Oracle) folio = bh->b_folio;
3871da177e4SLinus Torvalds if (uptodate) {
3881da177e4SLinus Torvalds set_buffer_uptodate(bh);
3891da177e4SLinus Torvalds } else {
390b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write");
39187354e5dSJeff Layton mark_buffer_write_io_error(bh);
3921da177e4SLinus Torvalds clear_buffer_uptodate(bh);
3931da177e4SLinus Torvalds }
3941da177e4SLinus Torvalds
395743ed81eSMatthew Wilcox (Oracle) first = folio_buffers(folio);
396f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
397a3972203SNick Piggin
3981da177e4SLinus Torvalds clear_buffer_async_write(bh);
3991da177e4SLinus Torvalds unlock_buffer(bh);
4001da177e4SLinus Torvalds tmp = bh->b_this_page;
4011da177e4SLinus Torvalds while (tmp != bh) {
4021da177e4SLinus Torvalds if (buffer_async_write(tmp)) {
4031da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp));
4041da177e4SLinus Torvalds goto still_busy;
4051da177e4SLinus Torvalds }
4061da177e4SLinus Torvalds tmp = tmp->b_this_page;
4071da177e4SLinus Torvalds }
408f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
409743ed81eSMatthew Wilcox (Oracle) folio_end_writeback(folio);
4101da177e4SLinus Torvalds return;
4111da177e4SLinus Torvalds
4121da177e4SLinus Torvalds still_busy:
413f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
4141da177e4SLinus Torvalds return;
4151da177e4SLinus Torvalds }
4161da177e4SLinus Torvalds
4171da177e4SLinus Torvalds /*
4181da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read
4191da177e4SLinus Torvalds * completion) then there is a possibility that another thread of
4201da177e4SLinus Torvalds * control could lock one of the buffers after it has completed
4211da177e4SLinus Torvalds * but while some of the other buffers have not completed. This
4221da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking
4231da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
4241da177e4SLinus Torvalds * that this buffer is not under async I/O.
4251da177e4SLinus Torvalds *
4261da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers
4271da177e4SLinus Torvalds * left.
4281da177e4SLinus Torvalds *
4291da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of
4301da177e4SLinus Torvalds * the buffers.
4311da177e4SLinus Torvalds *
4321da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same
4331da177e4SLinus Torvalds * page.
4341da177e4SLinus Torvalds *
4351da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is
4361da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page).
4371da177e4SLinus Torvalds */
mark_buffer_async_read(struct buffer_head * bh)4381da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4391da177e4SLinus Torvalds {
44031fb992cSEric Biggers bh->b_end_io = end_buffer_async_read_io;
4411da177e4SLinus Torvalds set_buffer_async_read(bh);
4421da177e4SLinus Torvalds }
4431da177e4SLinus Torvalds
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)4441fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
44535c80d5fSChris Mason bh_end_io_t *handler)
44635c80d5fSChris Mason {
44735c80d5fSChris Mason bh->b_end_io = handler;
44835c80d5fSChris Mason set_buffer_async_write(bh);
44935c80d5fSChris Mason }
45035c80d5fSChris Mason
mark_buffer_async_write(struct buffer_head * bh)4511da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4521da177e4SLinus Torvalds {
45335c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write);
4541da177e4SLinus Torvalds }
4551da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4561da177e4SLinus Torvalds
4571da177e4SLinus Torvalds
4581da177e4SLinus Torvalds /*
4591da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's
4601da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is
4611da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for
4621da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be
4631da177e4SLinus Torvalds * written back and waited upon before fsync() returns.
4641da177e4SLinus Torvalds *
46573f65b8bSAndreas Gruenbacher * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
4661da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the
467600f111eSMatthew Wilcox (Oracle) * management of a list of dependent buffers at ->i_mapping->i_private_list.
4681da177e4SLinus Torvalds *
4691da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers
4701da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But
4711da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping
4721da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers.
473600f111eSMatthew Wilcox (Oracle) * So the locking for i_private_list is via the i_private_lock in the address_space
4741da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space
4751da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space,
476600f111eSMatthew Wilcox (Oracle) * mapping->i_private_lock does *not* protect mapping->i_private_list! In fact,
477600f111eSMatthew Wilcox (Oracle) * mapping->i_private_list will always be protected by the backing blockdev's
478600f111eSMatthew Wilcox (Oracle) * ->i_private_lock.
4791da177e4SLinus Torvalds *
4801da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's
481600f111eSMatthew Wilcox (Oracle) * ->i_private_list must be from the same address_space: the blockdev's.
4821da177e4SLinus Torvalds *
483600f111eSMatthew Wilcox (Oracle) * address_spaces which do not place buffers at ->i_private_list via these
484600f111eSMatthew Wilcox (Oracle) * utility functions are free to use i_private_lock and i_private_list for
485600f111eSMatthew Wilcox (Oracle) * whatever they want. The only requirement is that list_empty(i_private_list)
4861da177e4SLinus Torvalds * be true at clear_inode() time.
4871da177e4SLinus Torvalds *
4881da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The
4891da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go
4901da177e4SLinus Torvalds * BUG_ON(!list_empty).
4911da177e4SLinus Torvalds *
4921da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
4931da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called
4941da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4951da177e4SLinus Torvalds * queued up.
4961da177e4SLinus Torvalds *
4971da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
4981da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list,
4991da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being
5001da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure
5011da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed
5021da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all
5031da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing
5041da177e4SLinus Torvalds * b_inode back.
5051da177e4SLinus Torvalds */
5061da177e4SLinus Torvalds
5071da177e4SLinus Torvalds /*
508600f111eSMatthew Wilcox (Oracle) * The buffer's backing address_space's i_private_lock must be held
5091da177e4SLinus Torvalds */
__remove_assoc_queue(struct buffer_head * bh)510dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5111da177e4SLinus Torvalds {
5121da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers);
51358ff407bSJan Kara WARN_ON(!bh->b_assoc_map);
51458ff407bSJan Kara bh->b_assoc_map = NULL;
5151da177e4SLinus Torvalds }
5161da177e4SLinus Torvalds
inode_has_buffers(struct inode * inode)5171da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5181da177e4SLinus Torvalds {
519600f111eSMatthew Wilcox (Oracle) return !list_empty(&inode->i_data.i_private_list);
5201da177e4SLinus Torvalds }
5211da177e4SLinus Torvalds
5221da177e4SLinus Torvalds /*
5231da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for
5241da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new
5251da177e4SLinus Torvalds * writes to the disk.
5261da177e4SLinus Torvalds *
52779f59784SZhang Yi * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
52879f59784SZhang Yi * as you dirty the buffers, and then use osync_inode_buffers to wait for
5291da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for
5301da177e4SLinus Torvalds * write will not be flushed to disk by the osync.
5311da177e4SLinus Torvalds */
osync_buffers_list(spinlock_t * lock,struct list_head * list)5321da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5331da177e4SLinus Torvalds {
5341da177e4SLinus Torvalds struct buffer_head *bh;
5351da177e4SLinus Torvalds struct list_head *p;
5361da177e4SLinus Torvalds int err = 0;
5371da177e4SLinus Torvalds
5381da177e4SLinus Torvalds spin_lock(lock);
5391da177e4SLinus Torvalds repeat:
5401da177e4SLinus Torvalds list_for_each_prev(p, list) {
5411da177e4SLinus Torvalds bh = BH_ENTRY(p);
5421da177e4SLinus Torvalds if (buffer_locked(bh)) {
5431da177e4SLinus Torvalds get_bh(bh);
5441da177e4SLinus Torvalds spin_unlock(lock);
5451da177e4SLinus Torvalds wait_on_buffer(bh);
5461da177e4SLinus Torvalds if (!buffer_uptodate(bh))
5471da177e4SLinus Torvalds err = -EIO;
5481da177e4SLinus Torvalds brelse(bh);
5491da177e4SLinus Torvalds spin_lock(lock);
5501da177e4SLinus Torvalds goto repeat;
5511da177e4SLinus Torvalds }
5521da177e4SLinus Torvalds }
5531da177e4SLinus Torvalds spin_unlock(lock);
5541da177e4SLinus Torvalds return err;
5551da177e4SLinus Torvalds }
5561da177e4SLinus Torvalds
5571da177e4SLinus Torvalds /**
55878a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
55967be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written
5601da177e4SLinus Torvalds *
561600f111eSMatthew Wilcox (Oracle) * Starts I/O against the buffers at mapping->i_private_list, and waits upon
5621da177e4SLinus Torvalds * that I/O.
5631da177e4SLinus Torvalds *
56467be2dd1SMartin Waitz * Basically, this is a convenience function for fsync().
56567be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for
56667be2dd1SMartin Waitz * a successful fsync().
5671da177e4SLinus Torvalds */
sync_mapping_buffers(struct address_space * mapping)5681da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5691da177e4SLinus Torvalds {
570600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data;
5711da177e4SLinus Torvalds
572600f111eSMatthew Wilcox (Oracle) if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
5731da177e4SLinus Torvalds return 0;
5741da177e4SLinus Torvalds
575600f111eSMatthew Wilcox (Oracle) return fsync_buffers_list(&buffer_mapping->i_private_lock,
576600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list);
5771da177e4SLinus Torvalds }
5781da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5791da177e4SLinus Torvalds
58031b2ebc0SRitesh Harjani (IBM) /**
58131b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync_noflush - generic buffer fsync implementation
58231b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock
58331b2ebc0SRitesh Harjani (IBM) *
58431b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize
58531b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes
58631b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive)
58731b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true
58831b2ebc0SRitesh Harjani (IBM) *
58931b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple
59031b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list
59131b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure.
59231b2ebc0SRitesh Harjani (IBM) */
generic_buffers_fsync_noflush(struct file * file,loff_t start,loff_t end,bool datasync)59331b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
59431b2ebc0SRitesh Harjani (IBM) bool datasync)
59531b2ebc0SRitesh Harjani (IBM) {
59631b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
59731b2ebc0SRitesh Harjani (IBM) int err;
59831b2ebc0SRitesh Harjani (IBM) int ret;
59931b2ebc0SRitesh Harjani (IBM)
60031b2ebc0SRitesh Harjani (IBM) err = file_write_and_wait_range(file, start, end);
60131b2ebc0SRitesh Harjani (IBM) if (err)
60231b2ebc0SRitesh Harjani (IBM) return err;
60331b2ebc0SRitesh Harjani (IBM)
60431b2ebc0SRitesh Harjani (IBM) ret = sync_mapping_buffers(inode->i_mapping);
60531b2ebc0SRitesh Harjani (IBM) if (!(inode->i_state & I_DIRTY_ALL))
60631b2ebc0SRitesh Harjani (IBM) goto out;
60731b2ebc0SRitesh Harjani (IBM) if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
60831b2ebc0SRitesh Harjani (IBM) goto out;
60931b2ebc0SRitesh Harjani (IBM)
61031b2ebc0SRitesh Harjani (IBM) err = sync_inode_metadata(inode, 1);
61131b2ebc0SRitesh Harjani (IBM) if (ret == 0)
61231b2ebc0SRitesh Harjani (IBM) ret = err;
61331b2ebc0SRitesh Harjani (IBM)
61431b2ebc0SRitesh Harjani (IBM) out:
61531b2ebc0SRitesh Harjani (IBM) /* check and advance again to catch errors after syncing out buffers */
61631b2ebc0SRitesh Harjani (IBM) err = file_check_and_advance_wb_err(file);
61731b2ebc0SRitesh Harjani (IBM) if (ret == 0)
61831b2ebc0SRitesh Harjani (IBM) ret = err;
61931b2ebc0SRitesh Harjani (IBM) return ret;
62031b2ebc0SRitesh Harjani (IBM) }
62131b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush);
62231b2ebc0SRitesh Harjani (IBM)
62331b2ebc0SRitesh Harjani (IBM) /**
62431b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync - generic buffer fsync implementation
62531b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock
62631b2ebc0SRitesh Harjani (IBM) *
62731b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize
62831b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes
62931b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive)
63031b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true
63131b2ebc0SRitesh Harjani (IBM) *
63231b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple
63331b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list
63431b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. This also makes sure that
63531b2ebc0SRitesh Harjani (IBM) * a device cache flush operation is called at the end.
63631b2ebc0SRitesh Harjani (IBM) */
generic_buffers_fsync(struct file * file,loff_t start,loff_t end,bool datasync)63731b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
63831b2ebc0SRitesh Harjani (IBM) bool datasync)
63931b2ebc0SRitesh Harjani (IBM) {
64031b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
64131b2ebc0SRitesh Harjani (IBM) int ret;
64231b2ebc0SRitesh Harjani (IBM)
64331b2ebc0SRitesh Harjani (IBM) ret = generic_buffers_fsync_noflush(file, start, end, datasync);
64431b2ebc0SRitesh Harjani (IBM) if (!ret)
64531b2ebc0SRitesh Harjani (IBM) ret = blkdev_issue_flush(inode->i_sb->s_bdev);
64631b2ebc0SRitesh Harjani (IBM) return ret;
64731b2ebc0SRitesh Harjani (IBM) }
64831b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync);
64931b2ebc0SRitesh Harjani (IBM)
6501da177e4SLinus Torvalds /*
6511da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that
6521da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at
6531da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
6541da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data.
6551da177e4SLinus Torvalds */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)6561da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6571da177e4SLinus Torvalds sector_t bblock, unsigned blocksize)
6581da177e4SLinus Torvalds {
6591da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6601da177e4SLinus Torvalds if (bh) {
6611da177e4SLinus Torvalds if (buffer_dirty(bh))
662e7ea1129SZhang Yi write_dirty_buffer(bh, 0);
6631da177e4SLinus Torvalds put_bh(bh);
6641da177e4SLinus Torvalds }
6651da177e4SLinus Torvalds }
6661da177e4SLinus Torvalds
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)6671da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6681da177e4SLinus Torvalds {
6691da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
670abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping;
6711da177e4SLinus Torvalds
6721da177e4SLinus Torvalds mark_buffer_dirty(bh);
673600f111eSMatthew Wilcox (Oracle) if (!mapping->i_private_data) {
674600f111eSMatthew Wilcox (Oracle) mapping->i_private_data = buffer_mapping;
6751da177e4SLinus Torvalds } else {
676600f111eSMatthew Wilcox (Oracle) BUG_ON(mapping->i_private_data != buffer_mapping);
6771da177e4SLinus Torvalds }
678535ee2fbSJan Kara if (!bh->b_assoc_map) {
679600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
6801da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers,
681600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list);
68258ff407bSJan Kara bh->b_assoc_map = mapping;
683600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
6841da177e4SLinus Torvalds }
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6871da177e4SLinus Torvalds
6883814ec89SMatthew Wilcox (Oracle) /**
6893814ec89SMatthew Wilcox (Oracle) * block_dirty_folio - Mark a folio as dirty.
6903814ec89SMatthew Wilcox (Oracle) * @mapping: The address space containing this folio.
6913814ec89SMatthew Wilcox (Oracle) * @folio: The folio to mark dirty.
6921da177e4SLinus Torvalds *
6933814ec89SMatthew Wilcox (Oracle) * Filesystems which use buffer_heads can use this function as their
6943814ec89SMatthew Wilcox (Oracle) * ->dirty_folio implementation. Some filesystems need to do a little
6953814ec89SMatthew Wilcox (Oracle) * work before calling this function. Filesystems which do not use
6963814ec89SMatthew Wilcox (Oracle) * buffer_heads should call filemap_dirty_folio() instead.
6971da177e4SLinus Torvalds *
6983814ec89SMatthew Wilcox (Oracle) * If the folio has buffers, the uptodate buffers are set dirty, to
6993814ec89SMatthew Wilcox (Oracle) * preserve dirty-state coherency between the folio and the buffers.
7003814ec89SMatthew Wilcox (Oracle) * Buffers added to a dirty folio are created dirty.
7011da177e4SLinus Torvalds *
7023814ec89SMatthew Wilcox (Oracle) * The buffers are dirtied before the folio is dirtied. There's a small
7033814ec89SMatthew Wilcox (Oracle) * race window in which writeback may see the folio cleanness but not the
7043814ec89SMatthew Wilcox (Oracle) * buffer dirtiness. That's fine. If this code were to set the folio
7053814ec89SMatthew Wilcox (Oracle) * dirty before the buffers, writeback could clear the folio dirty flag,
7063814ec89SMatthew Wilcox (Oracle) * see a bunch of clean buffers and we'd end up with dirty buffers/clean
7073814ec89SMatthew Wilcox (Oracle) * folio on the dirty folio list.
7081da177e4SLinus Torvalds *
7093814ec89SMatthew Wilcox (Oracle) * We use i_private_lock to lock against try_to_free_buffers() while
7103814ec89SMatthew Wilcox (Oracle) * using the folio's buffer list. This also prevents clean buffers
7113814ec89SMatthew Wilcox (Oracle) * being added to the folio after it was set dirty.
7121da177e4SLinus Torvalds *
7133814ec89SMatthew Wilcox (Oracle) * Context: May only be called from process context. Does not sleep.
7143814ec89SMatthew Wilcox (Oracle) * Caller must ensure that @folio cannot be truncated during this call,
7153814ec89SMatthew Wilcox (Oracle) * typically by holding the folio lock or having a page in the folio
7163814ec89SMatthew Wilcox (Oracle) * mapped and holding the page table lock.
7173814ec89SMatthew Wilcox (Oracle) *
7183814ec89SMatthew Wilcox (Oracle) * Return: True if the folio was dirtied; false if it was already dirtied.
7191da177e4SLinus Torvalds */
block_dirty_folio(struct address_space * mapping,struct folio * folio)720e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
7211da177e4SLinus Torvalds {
722e621900aSMatthew Wilcox (Oracle) struct buffer_head *head;
723e621900aSMatthew Wilcox (Oracle) bool newly_dirty;
7241da177e4SLinus Torvalds
725600f111eSMatthew Wilcox (Oracle) spin_lock(&mapping->i_private_lock);
726e621900aSMatthew Wilcox (Oracle) head = folio_buffers(folio);
727e621900aSMatthew Wilcox (Oracle) if (head) {
7281da177e4SLinus Torvalds struct buffer_head *bh = head;
7291da177e4SLinus Torvalds
7301da177e4SLinus Torvalds do {
7311da177e4SLinus Torvalds set_buffer_dirty(bh);
7321da177e4SLinus Torvalds bh = bh->b_this_page;
7331da177e4SLinus Torvalds } while (bh != head);
7341da177e4SLinus Torvalds }
735c4843a75SGreg Thelen /*
736bcfe06bfSRoman Gushchin * Lock out page's memcg migration to keep PageDirty
73781f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters.
738c4843a75SGreg Thelen */
739e621900aSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
740e621900aSMatthew Wilcox (Oracle) newly_dirty = !folio_test_set_dirty(folio);
741600f111eSMatthew Wilcox (Oracle) spin_unlock(&mapping->i_private_lock);
7421da177e4SLinus Torvalds
743a8e7d49aSLinus Torvalds if (newly_dirty)
744e621900aSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 1);
745c4843a75SGreg Thelen
746e621900aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
747c4843a75SGreg Thelen
748c4843a75SGreg Thelen if (newly_dirty)
749c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
750c4843a75SGreg Thelen
751a8e7d49aSLinus Torvalds return newly_dirty;
7521da177e4SLinus Torvalds }
753e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
7541da177e4SLinus Torvalds
7551da177e4SLinus Torvalds /*
7561da177e4SLinus Torvalds * Write out and wait upon a list of buffers.
7571da177e4SLinus Torvalds *
7581da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all
7591da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently
7601da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last
7611da177e4SLinus Torvalds * forever if somebody is actively writing to the file.
7621da177e4SLinus Torvalds *
7631da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a
7641da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean
7651da177e4SLinus Torvalds * up, waiting for those writes to complete.
7661da177e4SLinus Torvalds *
7671da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end
7681da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so
7691da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but
7701da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through
7711da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing
7721da177e4SLinus Torvalds * any newly dirty buffers for write.
7731da177e4SLinus Torvalds */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)7741da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7751da177e4SLinus Torvalds {
7761da177e4SLinus Torvalds struct buffer_head *bh;
7777eaceaccSJens Axboe struct address_space *mapping;
7781da177e4SLinus Torvalds int err = 0, err2;
7794ee2491eSJens Axboe struct blk_plug plug;
78073ce1c9fSHongbo Li LIST_HEAD(tmp);
7811da177e4SLinus Torvalds
7824ee2491eSJens Axboe blk_start_plug(&plug);
7831da177e4SLinus Torvalds
7841da177e4SLinus Torvalds spin_lock(lock);
7851da177e4SLinus Torvalds while (!list_empty(list)) {
7861da177e4SLinus Torvalds bh = BH_ENTRY(list->next);
787535ee2fbSJan Kara mapping = bh->b_assoc_map;
78858ff407bSJan Kara __remove_assoc_queue(bh);
789535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does
790535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */
791535ee2fbSJan Kara smp_mb();
7921da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) {
7931da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp);
794535ee2fbSJan Kara bh->b_assoc_map = mapping;
7951da177e4SLinus Torvalds if (buffer_dirty(bh)) {
7961da177e4SLinus Torvalds get_bh(bh);
7971da177e4SLinus Torvalds spin_unlock(lock);
7981da177e4SLinus Torvalds /*
7991da177e4SLinus Torvalds * Ensure any pending I/O completes so that
8009cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the
8019cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is
8029cb569d6SChristoph Hellwig * still in flight on potentially older
8039cb569d6SChristoph Hellwig * contents.
8041da177e4SLinus Torvalds */
80570fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC);
8069cf6b720SJens Axboe
8079cf6b720SJens Axboe /*
8089cf6b720SJens Axboe * Kick off IO for the previous mapping. Note
8099cf6b720SJens Axboe * that we will not run the very last mapping,
8109cf6b720SJens Axboe * wait_on_buffer() will do that for us
8119cf6b720SJens Axboe * through sync_buffer().
8129cf6b720SJens Axboe */
8131da177e4SLinus Torvalds brelse(bh);
8141da177e4SLinus Torvalds spin_lock(lock);
8151da177e4SLinus Torvalds }
8161da177e4SLinus Torvalds }
8171da177e4SLinus Torvalds }
8181da177e4SLinus Torvalds
8194ee2491eSJens Axboe spin_unlock(lock);
8204ee2491eSJens Axboe blk_finish_plug(&plug);
8214ee2491eSJens Axboe spin_lock(lock);
8224ee2491eSJens Axboe
8231da177e4SLinus Torvalds while (!list_empty(&tmp)) {
8241da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev);
8251da177e4SLinus Torvalds get_bh(bh);
826535ee2fbSJan Kara mapping = bh->b_assoc_map;
827535ee2fbSJan Kara __remove_assoc_queue(bh);
828535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does
829535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */
830535ee2fbSJan Kara smp_mb();
831535ee2fbSJan Kara if (buffer_dirty(bh)) {
832535ee2fbSJan Kara list_add(&bh->b_assoc_buffers,
833600f111eSMatthew Wilcox (Oracle) &mapping->i_private_list);
834535ee2fbSJan Kara bh->b_assoc_map = mapping;
835535ee2fbSJan Kara }
8361da177e4SLinus Torvalds spin_unlock(lock);
8371da177e4SLinus Torvalds wait_on_buffer(bh);
8381da177e4SLinus Torvalds if (!buffer_uptodate(bh))
8391da177e4SLinus Torvalds err = -EIO;
8401da177e4SLinus Torvalds brelse(bh);
8411da177e4SLinus Torvalds spin_lock(lock);
8421da177e4SLinus Torvalds }
8431da177e4SLinus Torvalds
8441da177e4SLinus Torvalds spin_unlock(lock);
8451da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list);
8461da177e4SLinus Torvalds if (err)
8471da177e4SLinus Torvalds return err;
8481da177e4SLinus Torvalds else
8491da177e4SLinus Torvalds return err2;
8501da177e4SLinus Torvalds }
8511da177e4SLinus Torvalds
8521da177e4SLinus Torvalds /*
8531da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are
8541da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already
8551da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list.
8561da177e4SLinus Torvalds *
857600f111eSMatthew Wilcox (Oracle) * NOTE: we take the inode's blockdev's mapping's i_private_lock. Which
8581da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true
8591da177e4SLinus Torvalds * for reiserfs.
8601da177e4SLinus Torvalds */
invalidate_inode_buffers(struct inode * inode)8611da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8621da177e4SLinus Torvalds {
8631da177e4SLinus Torvalds if (inode_has_buffers(inode)) {
8641da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data;
865600f111eSMatthew Wilcox (Oracle) struct list_head *list = &mapping->i_private_list;
866600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data;
8671da177e4SLinus Torvalds
868600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
8691da177e4SLinus Torvalds while (!list_empty(list))
8701da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next));
871600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
8721da177e4SLinus Torvalds }
8731da177e4SLinus Torvalds }
87452b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8751da177e4SLinus Torvalds
8761da177e4SLinus Torvalds /*
8771da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called
8781da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it.
8791da177e4SLinus Torvalds *
8801da177e4SLinus Torvalds * Returns true if all buffers were removed.
8811da177e4SLinus Torvalds */
remove_inode_buffers(struct inode * inode)8821da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8831da177e4SLinus Torvalds {
8841da177e4SLinus Torvalds int ret = 1;
8851da177e4SLinus Torvalds
8861da177e4SLinus Torvalds if (inode_has_buffers(inode)) {
8871da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data;
888600f111eSMatthew Wilcox (Oracle) struct list_head *list = &mapping->i_private_list;
889600f111eSMatthew Wilcox (Oracle) struct address_space *buffer_mapping = mapping->i_private_data;
8901da177e4SLinus Torvalds
891600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
8921da177e4SLinus Torvalds while (!list_empty(list)) {
8931da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next);
8941da177e4SLinus Torvalds if (buffer_dirty(bh)) {
8951da177e4SLinus Torvalds ret = 0;
8961da177e4SLinus Torvalds break;
8971da177e4SLinus Torvalds }
8981da177e4SLinus Torvalds __remove_assoc_queue(bh);
8991da177e4SLinus Torvalds }
900600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
9011da177e4SLinus Torvalds }
9021da177e4SLinus Torvalds return ret;
9031da177e4SLinus Torvalds }
9041da177e4SLinus Torvalds
9051da177e4SLinus Torvalds /*
906c71124a8SPankaj Raghav * Create the appropriate buffers when given a folio for data area and
9071da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to
9081da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more
9091da177e4SLinus Torvalds * buffers.
9101da177e4SLinus Torvalds *
9111da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping)
9121da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations.
9131da177e4SLinus Torvalds */
folio_alloc_buffers(struct folio * folio,unsigned long size,gfp_t gfp)914c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
9152a418157SMatthew Wilcox (Oracle) gfp_t gfp)
9161da177e4SLinus Torvalds {
9171da177e4SLinus Torvalds struct buffer_head *bh, *head;
9181da177e4SLinus Torvalds long offset;
919b87d8cefSRoman Gushchin struct mem_cgroup *memcg, *old_memcg;
9201da177e4SLinus Torvalds
921c71124a8SPankaj Raghav /* The folio lock pins the memcg */
922c71124a8SPankaj Raghav memcg = folio_memcg(folio);
923b87d8cefSRoman Gushchin old_memcg = set_active_memcg(memcg);
924f745c6f5SShakeel Butt
9251da177e4SLinus Torvalds head = NULL;
926c71124a8SPankaj Raghav offset = folio_size(folio);
9271da177e4SLinus Torvalds while ((offset -= size) >= 0) {
928640ab98fSJens Axboe bh = alloc_buffer_head(gfp);
9291da177e4SLinus Torvalds if (!bh)
9301da177e4SLinus Torvalds goto no_grow;
9311da177e4SLinus Torvalds
9321da177e4SLinus Torvalds bh->b_this_page = head;
9331da177e4SLinus Torvalds bh->b_blocknr = -1;
9341da177e4SLinus Torvalds head = bh;
9351da177e4SLinus Torvalds
9361da177e4SLinus Torvalds bh->b_size = size;
9371da177e4SLinus Torvalds
938c71124a8SPankaj Raghav /* Link the buffer to its folio */
939c71124a8SPankaj Raghav folio_set_bh(bh, folio, offset);
9401da177e4SLinus Torvalds }
941f745c6f5SShakeel Butt out:
942b87d8cefSRoman Gushchin set_active_memcg(old_memcg);
9431da177e4SLinus Torvalds return head;
9441da177e4SLinus Torvalds /*
9451da177e4SLinus Torvalds * In case anything failed, we just free everything we got.
9461da177e4SLinus Torvalds */
9471da177e4SLinus Torvalds no_grow:
9481da177e4SLinus Torvalds if (head) {
9491da177e4SLinus Torvalds do {
9501da177e4SLinus Torvalds bh = head;
9511da177e4SLinus Torvalds head = head->b_this_page;
9521da177e4SLinus Torvalds free_buffer_head(bh);
9531da177e4SLinus Torvalds } while (head);
9541da177e4SLinus Torvalds }
9551da177e4SLinus Torvalds
956f745c6f5SShakeel Butt goto out;
9571da177e4SLinus Torvalds }
958c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers);
959c71124a8SPankaj Raghav
alloc_page_buffers(struct page * page,unsigned long size)960*5c40e050SMichal Hocko struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
961c71124a8SPankaj Raghav {
9622a418157SMatthew Wilcox (Oracle) gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
9632a418157SMatthew Wilcox (Oracle)
9642a418157SMatthew Wilcox (Oracle) return folio_alloc_buffers(page_folio(page), size, gfp);
965c71124a8SPankaj Raghav }
9661da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9671da177e4SLinus Torvalds
link_dev_buffers(struct folio * folio,struct buffer_head * head)96808d84addSMatthew Wilcox (Oracle) static inline void link_dev_buffers(struct folio *folio,
96908d84addSMatthew Wilcox (Oracle) struct buffer_head *head)
9701da177e4SLinus Torvalds {
9711da177e4SLinus Torvalds struct buffer_head *bh, *tail;
9721da177e4SLinus Torvalds
9731da177e4SLinus Torvalds bh = head;
9741da177e4SLinus Torvalds do {
9751da177e4SLinus Torvalds tail = bh;
9761da177e4SLinus Torvalds bh = bh->b_this_page;
9771da177e4SLinus Torvalds } while (bh);
9781da177e4SLinus Torvalds tail->b_this_page = head;
97908d84addSMatthew Wilcox (Oracle) folio_attach_private(folio, head);
9801da177e4SLinus Torvalds }
9811da177e4SLinus Torvalds
blkdev_max_block(struct block_device * bdev,unsigned int size)982bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
983bbec0270SLinus Torvalds {
984bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0);
985b86058f9SChristoph Hellwig loff_t sz = bdev_nr_bytes(bdev);
986bbec0270SLinus Torvalds
987bbec0270SLinus Torvalds if (sz) {
988bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size);
989bbec0270SLinus Torvalds retval = (sz >> sizebits);
990bbec0270SLinus Torvalds }
991bbec0270SLinus Torvalds return retval;
992bbec0270SLinus Torvalds }
993bbec0270SLinus Torvalds
9941da177e4SLinus Torvalds /*
9956f24ce6bSMatthew Wilcox (Oracle) * Initialise the state of a blockdev folio's buffers.
9961da177e4SLinus Torvalds */
folio_init_buffers(struct folio * folio,struct block_device * bdev,unsigned size)9976f24ce6bSMatthew Wilcox (Oracle) static sector_t folio_init_buffers(struct folio *folio,
998382497adSMatthew Wilcox (Oracle) struct block_device *bdev, unsigned size)
9991da177e4SLinus Torvalds {
10006f24ce6bSMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio);
10011da177e4SLinus Torvalds struct buffer_head *bh = head;
10026f24ce6bSMatthew Wilcox (Oracle) bool uptodate = folio_test_uptodate(folio);
1003382497adSMatthew Wilcox (Oracle) sector_t block = div_u64(folio_pos(folio), size);
1004bcd1d063SChristoph Hellwig sector_t end_block = blkdev_max_block(bdev, size);
10051da177e4SLinus Torvalds
10061da177e4SLinus Torvalds do {
10071da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
100801950a34SEric Biggers bh->b_end_io = NULL;
100901950a34SEric Biggers bh->b_private = NULL;
10101da177e4SLinus Torvalds bh->b_bdev = bdev;
10111da177e4SLinus Torvalds bh->b_blocknr = block;
10121da177e4SLinus Torvalds if (uptodate)
10131da177e4SLinus Torvalds set_buffer_uptodate(bh);
1014080399aaSJeff Moyer if (block < end_block)
10151da177e4SLinus Torvalds set_buffer_mapped(bh);
10161da177e4SLinus Torvalds }
10171da177e4SLinus Torvalds block++;
10181da177e4SLinus Torvalds bh = bh->b_this_page;
10191da177e4SLinus Torvalds } while (bh != head);
1020676ce6d5SHugh Dickins
1021676ce6d5SHugh Dickins /*
1022676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device.
1023676ce6d5SHugh Dickins */
1024676ce6d5SHugh Dickins return end_block;
10251da177e4SLinus Torvalds }
10261da177e4SLinus Torvalds
10271da177e4SLinus Torvalds /*
10286d840a18SMatthew Wilcox (Oracle) * Create the page-cache folio that contains the requested block.
10291da177e4SLinus Torvalds *
1030676ce6d5SHugh Dickins * This is used purely for blockdev mappings.
10316d840a18SMatthew Wilcox (Oracle) *
1032bcd30d4cSMatthew Wilcox (Oracle) * Returns false if we have a failure which cannot be cured by retrying
1033bcd30d4cSMatthew Wilcox (Oracle) * without sleeping. Returns true if we succeeded, or the caller should retry.
10341da177e4SLinus Torvalds */
grow_dev_folio(struct block_device * bdev,sector_t block,pgoff_t index,unsigned size,gfp_t gfp)10356d840a18SMatthew Wilcox (Oracle) static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1036382497adSMatthew Wilcox (Oracle) pgoff_t index, unsigned size, gfp_t gfp)
10371da177e4SLinus Torvalds {
103822f89a4fSAl Viro struct address_space *mapping = bdev->bd_mapping;
10393c98a41cSMatthew Wilcox (Oracle) struct folio *folio;
10401da177e4SLinus Torvalds struct buffer_head *bh;
10416d840a18SMatthew Wilcox (Oracle) sector_t end_block = 0;
104284235de3SJohannes Weiner
104322f89a4fSAl Viro folio = __filemap_get_folio(mapping, index,
10443ed65f04SMatthew Wilcox (Oracle) FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
10453ed65f04SMatthew Wilcox (Oracle) if (IS_ERR(folio))
10466d840a18SMatthew Wilcox (Oracle) return false;
10471da177e4SLinus Torvalds
10483c98a41cSMatthew Wilcox (Oracle) bh = folio_buffers(folio);
10493c98a41cSMatthew Wilcox (Oracle) if (bh) {
10501da177e4SLinus Torvalds if (bh->b_size == size) {
1051382497adSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev, size);
10526d840a18SMatthew Wilcox (Oracle) goto unlock;
10531da177e4SLinus Torvalds }
10541da177e4SLinus Torvalds
1055bcd30d4cSMatthew Wilcox (Oracle) /*
1056bcd30d4cSMatthew Wilcox (Oracle) * Retrying may succeed; for example the folio may finish
1057bcd30d4cSMatthew Wilcox (Oracle) * writeback, or buffers may be cleaned. This should not
1058bcd30d4cSMatthew Wilcox (Oracle) * happen very often; maybe we have old buffers attached to
1059bcd30d4cSMatthew Wilcox (Oracle) * this blockdev's page cache and we're trying to change
1060bcd30d4cSMatthew Wilcox (Oracle) * the block size?
1061bcd30d4cSMatthew Wilcox (Oracle) */
1062bcd30d4cSMatthew Wilcox (Oracle) if (!try_to_free_buffers(folio)) {
10636d840a18SMatthew Wilcox (Oracle) end_block = ~0ULL;
10646d840a18SMatthew Wilcox (Oracle) goto unlock;
10656d840a18SMatthew Wilcox (Oracle) }
1066bcd30d4cSMatthew Wilcox (Oracle) }
10676d840a18SMatthew Wilcox (Oracle)
10683ed65f04SMatthew Wilcox (Oracle) bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
10693ed65f04SMatthew Wilcox (Oracle) if (!bh)
10706d840a18SMatthew Wilcox (Oracle) goto unlock;
10711da177e4SLinus Torvalds
10721da177e4SLinus Torvalds /*
10733c98a41cSMatthew Wilcox (Oracle) * Link the folio to the buffers and initialise them. Take the
10741da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not
10753c98a41cSMatthew Wilcox (Oracle) * run under the folio lock.
10761da177e4SLinus Torvalds */
107722f89a4fSAl Viro spin_lock(&mapping->i_private_lock);
107808d84addSMatthew Wilcox (Oracle) link_dev_buffers(folio, bh);
1079382497adSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev, size);
108022f89a4fSAl Viro spin_unlock(&mapping->i_private_lock);
10816d840a18SMatthew Wilcox (Oracle) unlock:
10823c98a41cSMatthew Wilcox (Oracle) folio_unlock(folio);
10833c98a41cSMatthew Wilcox (Oracle) folio_put(folio);
10846d840a18SMatthew Wilcox (Oracle) return block < end_block;
10851da177e4SLinus Torvalds }
10861da177e4SLinus Torvalds
10871da177e4SLinus Torvalds /*
10886d840a18SMatthew Wilcox (Oracle) * Create buffers for the specified block device block's folio. If
10896d840a18SMatthew Wilcox (Oracle) * that folio was dirty, the buffers are set dirty also. Returns false
10906d840a18SMatthew Wilcox (Oracle) * if we've hit a permanent error.
10911da177e4SLinus Torvalds */
grow_buffers(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)10926d840a18SMatthew Wilcox (Oracle) static bool grow_buffers(struct block_device *bdev, sector_t block,
10936d840a18SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp)
10941da177e4SLinus Torvalds {
10955f3bd90dSMatthew Wilcox (Oracle) loff_t pos;
10961da177e4SLinus Torvalds
1097e5657933SAndrew Morton /*
10985f3bd90dSMatthew Wilcox (Oracle) * Check for a block which lies outside our maximum possible
10995f3bd90dSMatthew Wilcox (Oracle) * pagecache index.
1100e5657933SAndrew Morton */
11015f3bd90dSMatthew Wilcox (Oracle) if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
11025f3bd90dSMatthew Wilcox (Oracle) printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
11038e24eea7SHarvey Harrison __func__, (unsigned long long)block,
1104a1c6f057SDmitry Monakhov bdev);
11056d840a18SMatthew Wilcox (Oracle) return false;
1106e5657933SAndrew Morton }
1107676ce6d5SHugh Dickins
11086d840a18SMatthew Wilcox (Oracle) /* Create a folio with the proper size buffers */
11095f3bd90dSMatthew Wilcox (Oracle) return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
11101da177e4SLinus Torvalds }
11111da177e4SLinus Torvalds
11120026ba40SEric Biggers static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)11133b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
11143b5e6454SGioh Kim unsigned size, gfp_t gfp)
11151da177e4SLinus Torvalds {
11161da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */
1117e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
11181da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) {
11191da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11201da177e4SLinus Torvalds size);
1121e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n",
1122e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev));
11231da177e4SLinus Torvalds
11241da177e4SLinus Torvalds dump_stack();
11251da177e4SLinus Torvalds return NULL;
11261da177e4SLinus Torvalds }
11271da177e4SLinus Torvalds
1128676ce6d5SHugh Dickins for (;;) {
1129676ce6d5SHugh Dickins struct buffer_head *bh;
1130676ce6d5SHugh Dickins
11311da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size);
11321da177e4SLinus Torvalds if (bh)
11331da177e4SLinus Torvalds return bh;
11341da177e4SLinus Torvalds
11356d840a18SMatthew Wilcox (Oracle) if (!grow_buffers(bdev, block, size, gfp))
113691f68c89SJeff Moyer return NULL;
1137676ce6d5SHugh Dickins }
11381da177e4SLinus Torvalds }
11391da177e4SLinus Torvalds
11401da177e4SLinus Torvalds /*
11411da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages:
11421da177e4SLinus Torvalds *
11431da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1144ec82e1c1SMatthew Wilcox * the page is tagged dirty in the page cache.
11451da177e4SLinus Torvalds *
11461da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of
11471da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is
11481da177e4SLinus Torvalds * merely a hint about the true dirty state.
11491da177e4SLinus Torvalds *
11501da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty
11511da177e4SLinus Torvalds * (if the page has buffers).
11521da177e4SLinus Torvalds *
11531da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other
11541da177e4SLinus Torvalds * buffers are not.
11551da177e4SLinus Torvalds *
11561da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they
11571da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not
11581da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent
11592c69e205SMatthew Wilcox (Oracle) * block_read_full_folio() against that folio will discover all the uptodate
11602c69e205SMatthew Wilcox (Oracle) * buffers, will set the folio uptodate and will perform no I/O.
11611da177e4SLinus Torvalds */
11621da177e4SLinus Torvalds
11631da177e4SLinus Torvalds /**
11641da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout
116567be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty
11661da177e4SLinus Torvalds *
1167ec82e1c1SMatthew Wilcox * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1168ec82e1c1SMatthew Wilcox * its backing page dirty, then tag the page as dirty in the page cache
1169ec82e1c1SMatthew Wilcox * and then attach the address_space's inode to its superblock's dirty
11701da177e4SLinus Torvalds * inode list.
11711da177e4SLinus Torvalds *
1172600f111eSMatthew Wilcox (Oracle) * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->i_private_lock,
1173b93b0163SMatthew Wilcox * i_pages lock and mapping->host->i_lock.
11741da177e4SLinus Torvalds */
mark_buffer_dirty(struct buffer_head * bh)1175fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11761da177e4SLinus Torvalds {
1177787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh));
11781be62dc1SLinus Torvalds
11795305cb83STejun Heo trace_block_dirty_buffer(bh);
11805305cb83STejun Heo
11811be62dc1SLinus Torvalds /*
11821be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case.
11831be62dc1SLinus Torvalds *
11841be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we
11851be62dc1SLinus Torvalds * perhaps modified the buffer.
11861be62dc1SLinus Torvalds */
11871be62dc1SLinus Torvalds if (buffer_dirty(bh)) {
11881be62dc1SLinus Torvalds smp_mb();
11891be62dc1SLinus Torvalds if (buffer_dirty(bh))
11901be62dc1SLinus Torvalds return;
11911be62dc1SLinus Torvalds }
11921be62dc1SLinus Torvalds
1193a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) {
1194cf1d3417SMatthew Wilcox (Oracle) struct folio *folio = bh->b_folio;
1195c4843a75SGreg Thelen struct address_space *mapping = NULL;
1196c4843a75SGreg Thelen
1197cf1d3417SMatthew Wilcox (Oracle) folio_memcg_lock(folio);
1198cf1d3417SMatthew Wilcox (Oracle) if (!folio_test_set_dirty(folio)) {
1199cf1d3417SMatthew Wilcox (Oracle) mapping = folio->mapping;
12008e9d78edSLinus Torvalds if (mapping)
1201cf1d3417SMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 0);
12028e9d78edSLinus Torvalds }
1203cf1d3417SMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
1204c4843a75SGreg Thelen if (mapping)
1205c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1206a8e7d49aSLinus Torvalds }
12071da177e4SLinus Torvalds }
12081fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
12091da177e4SLinus Torvalds
mark_buffer_write_io_error(struct buffer_head * bh)121087354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
121187354e5dSJeff Layton {
121287354e5dSJeff Layton set_buffer_write_io_error(bh);
121387354e5dSJeff Layton /* FIXME: do we need to set this in both places? */
1214abc8a8a2SMatthew Wilcox (Oracle) if (bh->b_folio && bh->b_folio->mapping)
1215abc8a8a2SMatthew Wilcox (Oracle) mapping_set_error(bh->b_folio->mapping, -EIO);
12164b2201daSChristoph Hellwig if (bh->b_assoc_map) {
121787354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO);
12184b2201daSChristoph Hellwig errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
12194b2201daSChristoph Hellwig }
122087354e5dSJeff Layton }
122187354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
122287354e5dSJeff Layton
122366924fdaSMatthew Wilcox (Oracle) /**
122466924fdaSMatthew Wilcox (Oracle) * __brelse - Release a buffer.
122566924fdaSMatthew Wilcox (Oracle) * @bh: The buffer to release.
122666924fdaSMatthew Wilcox (Oracle) *
122766924fdaSMatthew Wilcox (Oracle) * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
12281da177e4SLinus Torvalds */
__brelse(struct buffer_head * bh)122966924fdaSMatthew Wilcox (Oracle) void __brelse(struct buffer_head *bh)
12301da177e4SLinus Torvalds {
123166924fdaSMatthew Wilcox (Oracle) if (atomic_read(&bh->b_count)) {
123266924fdaSMatthew Wilcox (Oracle) put_bh(bh);
12331da177e4SLinus Torvalds return;
12341da177e4SLinus Torvalds }
12355c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12361da177e4SLinus Torvalds }
12371fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
12381da177e4SLinus Torvalds
1239b73a936fSMatthew Wilcox (Oracle) /**
1240b73a936fSMatthew Wilcox (Oracle) * __bforget - Discard any dirty data in a buffer.
1241b73a936fSMatthew Wilcox (Oracle) * @bh: The buffer to forget.
1242b73a936fSMatthew Wilcox (Oracle) *
1243b73a936fSMatthew Wilcox (Oracle) * This variant of bforget() can be called if @bh is guaranteed to not
1244b73a936fSMatthew Wilcox (Oracle) * be NULL.
12451da177e4SLinus Torvalds */
__bforget(struct buffer_head * bh)12461da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12471da177e4SLinus Torvalds {
12481da177e4SLinus Torvalds clear_buffer_dirty(bh);
1249535ee2fbSJan Kara if (bh->b_assoc_map) {
1250abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping;
12511da177e4SLinus Torvalds
1252600f111eSMatthew Wilcox (Oracle) spin_lock(&buffer_mapping->i_private_lock);
12531da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers);
125458ff407bSJan Kara bh->b_assoc_map = NULL;
1255600f111eSMatthew Wilcox (Oracle) spin_unlock(&buffer_mapping->i_private_lock);
12561da177e4SLinus Torvalds }
12571da177e4SLinus Torvalds __brelse(bh);
12581da177e4SLinus Torvalds }
12591fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12601da177e4SLinus Torvalds
__bread_slow(struct buffer_head * bh)12611da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12621da177e4SLinus Torvalds {
12631da177e4SLinus Torvalds lock_buffer(bh);
12641da177e4SLinus Torvalds if (buffer_uptodate(bh)) {
12651da177e4SLinus Torvalds unlock_buffer(bh);
12661da177e4SLinus Torvalds return bh;
12671da177e4SLinus Torvalds } else {
12681da177e4SLinus Torvalds get_bh(bh);
12691da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync;
12701420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh);
12711da177e4SLinus Torvalds wait_on_buffer(bh);
12721da177e4SLinus Torvalds if (buffer_uptodate(bh))
12731da177e4SLinus Torvalds return bh;
12741da177e4SLinus Torvalds }
12751da177e4SLinus Torvalds brelse(bh);
12761da177e4SLinus Torvalds return NULL;
12771da177e4SLinus Torvalds }
12781da177e4SLinus Torvalds
12791da177e4SLinus Torvalds /*
12801da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
12811da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
12821da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear
12831da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple
12841da177e4SLinus Torvalds * CPU's LRUs at the same time.
12851da177e4SLinus Torvalds *
12861da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12871da177e4SLinus Torvalds * sb_find_get_block().
12881da177e4SLinus Torvalds *
12891da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use
12901da177e4SLinus Torvalds * a local interrupt disable for that.
12911da177e4SLinus Torvalds */
12921da177e4SLinus Torvalds
129386cf78d7SSebastien Buisson #define BH_LRU_SIZE 16
12941da177e4SLinus Torvalds
12951da177e4SLinus Torvalds struct bh_lru {
12961da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE];
12971da177e4SLinus Torvalds };
12981da177e4SLinus Torvalds
12991da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
13001da177e4SLinus Torvalds
13011da177e4SLinus Torvalds #ifdef CONFIG_SMP
13021da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable()
13031da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable()
13041da177e4SLinus Torvalds #else
13051da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable()
13061da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable()
13071da177e4SLinus Torvalds #endif
13081da177e4SLinus Torvalds
check_irqs_on(void)13091da177e4SLinus Torvalds static inline void check_irqs_on(void)
13101da177e4SLinus Torvalds {
13111da177e4SLinus Torvalds #ifdef irqs_disabled
13121da177e4SLinus Torvalds BUG_ON(irqs_disabled());
13131da177e4SLinus Torvalds #endif
13141da177e4SLinus Torvalds }
13151da177e4SLinus Torvalds
13161da177e4SLinus Torvalds /*
1317241f01fbSEric Biggers * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1318241f01fbSEric Biggers * inserted at the front, and the buffer_head at the back if any is evicted.
1319241f01fbSEric Biggers * Or, if already in the LRU it is moved to the front.
13201da177e4SLinus Torvalds */
bh_lru_install(struct buffer_head * bh)13211da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13221da177e4SLinus Torvalds {
1323241f01fbSEric Biggers struct buffer_head *evictee = bh;
1324241f01fbSEric Biggers struct bh_lru *b;
1325241f01fbSEric Biggers int i;
13261da177e4SLinus Torvalds
13271da177e4SLinus Torvalds check_irqs_on();
1328c0226eb8SMinchan Kim bh_lru_lock();
1329c0226eb8SMinchan Kim
13308cc621d2SMinchan Kim /*
13318cc621d2SMinchan Kim * the refcount of buffer_head in bh_lru prevents dropping the
13328cc621d2SMinchan Kim * attached page(i.e., try_to_free_buffers) so it could cause
13338cc621d2SMinchan Kim * failing page migration.
13348cc621d2SMinchan Kim * Skip putting upcoming bh into bh_lru until migration is done.
13358cc621d2SMinchan Kim */
13368a237adfSMarcelo Tosatti if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1337c0226eb8SMinchan Kim bh_lru_unlock();
13388cc621d2SMinchan Kim return;
1339c0226eb8SMinchan Kim }
1340241f01fbSEric Biggers
1341241f01fbSEric Biggers b = this_cpu_ptr(&bh_lrus);
1342241f01fbSEric Biggers for (i = 0; i < BH_LRU_SIZE; i++) {
1343241f01fbSEric Biggers swap(evictee, b->bhs[i]);
1344241f01fbSEric Biggers if (evictee == bh) {
1345241f01fbSEric Biggers bh_lru_unlock();
1346241f01fbSEric Biggers return;
1347241f01fbSEric Biggers }
1348241f01fbSEric Biggers }
13491da177e4SLinus Torvalds
13501da177e4SLinus Torvalds get_bh(bh);
13511da177e4SLinus Torvalds bh_lru_unlock();
1352241f01fbSEric Biggers brelse(evictee);
13531da177e4SLinus Torvalds }
13541da177e4SLinus Torvalds
13551da177e4SLinus Torvalds /*
13561da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head.
13571da177e4SLinus Torvalds */
1358858119e1SArjan van de Ven static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)13593991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13601da177e4SLinus Torvalds {
13611da177e4SLinus Torvalds struct buffer_head *ret = NULL;
13623991d3bdSTomasz Kvarsin unsigned int i;
13631da177e4SLinus Torvalds
13641da177e4SLinus Torvalds check_irqs_on();
13651da177e4SLinus Torvalds bh_lru_lock();
13668a237adfSMarcelo Tosatti if (cpu_is_isolated(smp_processor_id())) {
13678a237adfSMarcelo Tosatti bh_lru_unlock();
13688a237adfSMarcelo Tosatti return NULL;
13698a237adfSMarcelo Tosatti }
13701da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) {
1371c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13721da177e4SLinus Torvalds
13739470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
13749470dd5dSZach Brown bh->b_size == size) {
13751da177e4SLinus Torvalds if (i) {
13761da177e4SLinus Torvalds while (i) {
1377c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i],
1378c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1]));
13791da177e4SLinus Torvalds i--;
13801da177e4SLinus Torvalds }
1381c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh);
13821da177e4SLinus Torvalds }
13831da177e4SLinus Torvalds get_bh(bh);
13841da177e4SLinus Torvalds ret = bh;
13851da177e4SLinus Torvalds break;
13861da177e4SLinus Torvalds }
13871da177e4SLinus Torvalds }
13881da177e4SLinus Torvalds bh_lru_unlock();
13891da177e4SLinus Torvalds return ret;
13901da177e4SLinus Torvalds }
13911da177e4SLinus Torvalds
13921da177e4SLinus Torvalds /*
13931da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh
13941da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return
13951da177e4SLinus Torvalds * NULL
13961da177e4SLinus Torvalds */
13971da177e4SLinus Torvalds struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)13983991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13991da177e4SLinus Torvalds {
14001da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
14011da177e4SLinus Torvalds
14021da177e4SLinus Torvalds if (bh == NULL) {
14032457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */
1404385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block);
14051da177e4SLinus Torvalds if (bh)
14061da177e4SLinus Torvalds bh_lru_install(bh);
14072457aec6SMel Gorman } else
14081da177e4SLinus Torvalds touch_buffer(bh);
14092457aec6SMel Gorman
14101da177e4SLinus Torvalds return bh;
14111da177e4SLinus Torvalds }
14121da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14131da177e4SLinus Torvalds
14143ed65f04SMatthew Wilcox (Oracle) /**
14153ed65f04SMatthew Wilcox (Oracle) * bdev_getblk - Get a buffer_head in a block device's buffer cache.
14163ed65f04SMatthew Wilcox (Oracle) * @bdev: The block device.
14173ed65f04SMatthew Wilcox (Oracle) * @block: The block number.
14183ed65f04SMatthew Wilcox (Oracle) * @size: The size of buffer_heads for this @bdev.
14193ed65f04SMatthew Wilcox (Oracle) * @gfp: The memory allocation flags to use.
14203ed65f04SMatthew Wilcox (Oracle) *
14210b116ff4SMatthew Wilcox (Oracle) * The returned buffer head has its reference count incremented, but is
14220b116ff4SMatthew Wilcox (Oracle) * not locked. The caller should call brelse() when it has finished
14230b116ff4SMatthew Wilcox (Oracle) * with the buffer. The buffer may not be uptodate. If needed, the
14240b116ff4SMatthew Wilcox (Oracle) * caller can bring it uptodate either by reading it or overwriting it.
14250b116ff4SMatthew Wilcox (Oracle) *
14263ed65f04SMatthew Wilcox (Oracle) * Return: The buffer head, or NULL if memory could not be allocated.
14273ed65f04SMatthew Wilcox (Oracle) */
bdev_getblk(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)14283ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
14293ed65f04SMatthew Wilcox (Oracle) unsigned size, gfp_t gfp)
14303ed65f04SMatthew Wilcox (Oracle) {
14313ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bh = __find_get_block(bdev, block, size);
14323ed65f04SMatthew Wilcox (Oracle)
14333ed65f04SMatthew Wilcox (Oracle) might_alloc(gfp);
14343ed65f04SMatthew Wilcox (Oracle) if (bh)
14353ed65f04SMatthew Wilcox (Oracle) return bh;
14363ed65f04SMatthew Wilcox (Oracle)
14373ed65f04SMatthew Wilcox (Oracle) return __getblk_slow(bdev, block, size, gfp);
14383ed65f04SMatthew Wilcox (Oracle) }
14393ed65f04SMatthew Wilcox (Oracle) EXPORT_SYMBOL(bdev_getblk);
14403ed65f04SMatthew Wilcox (Oracle)
14411da177e4SLinus Torvalds /*
14421da177e4SLinus Torvalds * Do async read-ahead on a buffer..
14431da177e4SLinus Torvalds */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)14443991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14451da177e4SLinus Torvalds {
1446775d9b10SMatthew Wilcox (Oracle) struct buffer_head *bh = bdev_getblk(bdev, block, size,
1447775d9b10SMatthew Wilcox (Oracle) GFP_NOWAIT | __GFP_MOVABLE);
1448775d9b10SMatthew Wilcox (Oracle)
1449a3e713b5SAndrew Morton if (likely(bh)) {
1450e7ea1129SZhang Yi bh_readahead(bh, REQ_RAHEAD);
14511da177e4SLinus Torvalds brelse(bh);
14521da177e4SLinus Torvalds }
1453a3e713b5SAndrew Morton }
14541da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14551da177e4SLinus Torvalds
14561da177e4SLinus Torvalds /**
1457324ecaeeSMatthew Wilcox (Oracle) * __bread_gfp() - Read a block.
1458324ecaeeSMatthew Wilcox (Oracle) * @bdev: The block device to read from.
1459324ecaeeSMatthew Wilcox (Oracle) * @block: Block number in units of block size.
1460324ecaeeSMatthew Wilcox (Oracle) * @size: The block size of this device in bytes.
1461324ecaeeSMatthew Wilcox (Oracle) * @gfp: Not page allocation flags; see below.
14621da177e4SLinus Torvalds *
1463324ecaeeSMatthew Wilcox (Oracle) * You are not expected to call this function. You should use one of
1464324ecaeeSMatthew Wilcox (Oracle) * sb_bread(), sb_bread_unmovable() or __bread().
1465324ecaeeSMatthew Wilcox (Oracle) *
1466324ecaeeSMatthew Wilcox (Oracle) * Read a specified block, and return the buffer head that refers to it.
1467324ecaeeSMatthew Wilcox (Oracle) * If @gfp is 0, the memory will be allocated using the block device's
1468324ecaeeSMatthew Wilcox (Oracle) * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
1469324ecaeeSMatthew Wilcox (Oracle) * allocated from a movable area. Do not pass in a complete set of
1470324ecaeeSMatthew Wilcox (Oracle) * GFP flags.
1471324ecaeeSMatthew Wilcox (Oracle) *
1472324ecaeeSMatthew Wilcox (Oracle) * The returned buffer head has its refcount increased. The caller should
1473324ecaeeSMatthew Wilcox (Oracle) * call brelse() when it has finished with the buffer.
1474324ecaeeSMatthew Wilcox (Oracle) *
1475324ecaeeSMatthew Wilcox (Oracle) * Context: May sleep waiting for I/O.
1476324ecaeeSMatthew Wilcox (Oracle) * Return: NULL if the block was unreadable.
14771da177e4SLinus Torvalds */
__bread_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)1478324ecaeeSMatthew Wilcox (Oracle) struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
14793b5e6454SGioh Kim unsigned size, gfp_t gfp)
14801da177e4SLinus Torvalds {
148193b13ecaSMatthew Wilcox (Oracle) struct buffer_head *bh;
148293b13ecaSMatthew Wilcox (Oracle)
1483224941e8SAl Viro gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
148493b13ecaSMatthew Wilcox (Oracle)
148593b13ecaSMatthew Wilcox (Oracle) /*
148693b13ecaSMatthew Wilcox (Oracle) * Prefer looping in the allocator rather than here, at least that
148793b13ecaSMatthew Wilcox (Oracle) * code knows what it's doing.
148893b13ecaSMatthew Wilcox (Oracle) */
148993b13ecaSMatthew Wilcox (Oracle) gfp |= __GFP_NOFAIL;
149093b13ecaSMatthew Wilcox (Oracle)
149193b13ecaSMatthew Wilcox (Oracle) bh = bdev_getblk(bdev, block, size, gfp);
14921da177e4SLinus Torvalds
1493a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh))
14941da177e4SLinus Torvalds bh = __bread_slow(bh);
14951da177e4SLinus Torvalds return bh;
14961da177e4SLinus Torvalds }
14973b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
14981da177e4SLinus Torvalds
__invalidate_bh_lrus(struct bh_lru * b)14998cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
15008cc621d2SMinchan Kim {
15018cc621d2SMinchan Kim int i;
15028cc621d2SMinchan Kim
15038cc621d2SMinchan Kim for (i = 0; i < BH_LRU_SIZE; i++) {
15048cc621d2SMinchan Kim brelse(b->bhs[i]);
15058cc621d2SMinchan Kim b->bhs[i] = NULL;
15068cc621d2SMinchan Kim }
15078cc621d2SMinchan Kim }
15081da177e4SLinus Torvalds /*
15091da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount.
15101da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq
15111da177e4SLinus Torvalds * or with preempt disabled.
15121da177e4SLinus Torvalds */
invalidate_bh_lru(void * arg)15131da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
15141da177e4SLinus Torvalds {
15151da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus);
15161da177e4SLinus Torvalds
15178cc621d2SMinchan Kim __invalidate_bh_lrus(b);
15181da177e4SLinus Torvalds put_cpu_var(bh_lrus);
15191da177e4SLinus Torvalds }
15201da177e4SLinus Torvalds
has_bh_in_lru(int cpu,void * dummy)15218cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
152242be35d0SGilad Ben-Yossef {
152342be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
152442be35d0SGilad Ben-Yossef int i;
152542be35d0SGilad Ben-Yossef
152642be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) {
152742be35d0SGilad Ben-Yossef if (b->bhs[i])
15281d706679SSaurav Girepunje return true;
152942be35d0SGilad Ben-Yossef }
153042be35d0SGilad Ben-Yossef
15311d706679SSaurav Girepunje return false;
153242be35d0SGilad Ben-Yossef }
153342be35d0SGilad Ben-Yossef
invalidate_bh_lrus(void)1534f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
15351da177e4SLinus Torvalds {
1536cb923159SSebastian Andrzej Siewior on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
15371da177e4SLinus Torvalds }
15389db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
15391da177e4SLinus Torvalds
1540243418e3SMinchan Kim /*
1541243418e3SMinchan Kim * It's called from workqueue context so we need a bh_lru_lock to close
1542243418e3SMinchan Kim * the race with preemption/irq.
1543243418e3SMinchan Kim */
invalidate_bh_lrus_cpu(void)1544243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
15458cc621d2SMinchan Kim {
15468cc621d2SMinchan Kim struct bh_lru *b;
15478cc621d2SMinchan Kim
15488cc621d2SMinchan Kim bh_lru_lock();
1549243418e3SMinchan Kim b = this_cpu_ptr(&bh_lrus);
15508cc621d2SMinchan Kim __invalidate_bh_lrus(b);
15518cc621d2SMinchan Kim bh_lru_unlock();
15528cc621d2SMinchan Kim }
15538cc621d2SMinchan Kim
folio_set_bh(struct buffer_head * bh,struct folio * folio,unsigned long offset)1554465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1555465e5e6aSPankaj Raghav unsigned long offset)
1556465e5e6aSPankaj Raghav {
1557465e5e6aSPankaj Raghav bh->b_folio = folio;
1558465e5e6aSPankaj Raghav BUG_ON(offset >= folio_size(folio));
1559465e5e6aSPankaj Raghav if (folio_test_highmem(folio))
1560465e5e6aSPankaj Raghav /*
1561465e5e6aSPankaj Raghav * This catches illegal uses and preserves the offset:
1562465e5e6aSPankaj Raghav */
1563465e5e6aSPankaj Raghav bh->b_data = (char *)(0 + offset);
1564465e5e6aSPankaj Raghav else
1565465e5e6aSPankaj Raghav bh->b_data = folio_address(folio) + offset;
1566465e5e6aSPankaj Raghav }
1567465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh);
1568465e5e6aSPankaj Raghav
15691da177e4SLinus Torvalds /*
15701da177e4SLinus Torvalds * Called when truncating a buffer on a page completely.
15711da177e4SLinus Torvalds */
1572e7470ee8SMel Gorman
1573e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1574e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1575e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1576e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten)
1577e7470ee8SMel Gorman
discard_buffer(struct buffer_head * bh)1578858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
15791da177e4SLinus Torvalds {
1580b0192296SUros Bizjak unsigned long b_state;
1581e7470ee8SMel Gorman
15821da177e4SLinus Torvalds lock_buffer(bh);
15831da177e4SLinus Torvalds clear_buffer_dirty(bh);
15841da177e4SLinus Torvalds bh->b_bdev = NULL;
1585b0192296SUros Bizjak b_state = READ_ONCE(bh->b_state);
1586b0192296SUros Bizjak do {
1587b0192296SUros Bizjak } while (!try_cmpxchg(&bh->b_state, &b_state,
1588b0192296SUros Bizjak b_state & ~BUFFER_FLAGS_DISCARD));
15891da177e4SLinus Torvalds unlock_buffer(bh);
15901da177e4SLinus Torvalds }
15911da177e4SLinus Torvalds
15921da177e4SLinus Torvalds /**
15937ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
15947ba13abbSMatthew Wilcox (Oracle) * @folio: The folio which is affected.
1595d47992f8SLukas Czerner * @offset: start of the range to invalidate
1596d47992f8SLukas Czerner * @length: length of the range to invalidate
15971da177e4SLinus Torvalds *
15987ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() is called when all or part of the folio has been
15991da177e4SLinus Torvalds * invalidated by a truncate operation.
16001da177e4SLinus Torvalds *
16017ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() does not have to release all buffers, but it must
16021da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O
16031da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation
16041da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those
16051da177e4SLinus Torvalds * blocks on-disk.
16061da177e4SLinus Torvalds */
block_invalidate_folio(struct folio * folio,size_t offset,size_t length)16077ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
16081da177e4SLinus Torvalds {
16091da177e4SLinus Torvalds struct buffer_head *head, *bh, *next;
16107ba13abbSMatthew Wilcox (Oracle) size_t curr_off = 0;
16117ba13abbSMatthew Wilcox (Oracle) size_t stop = length + offset;
16121da177e4SLinus Torvalds
16137ba13abbSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
16141da177e4SLinus Torvalds
1615d47992f8SLukas Czerner /*
1616d47992f8SLukas Czerner * Check for overflow
1617d47992f8SLukas Czerner */
16187ba13abbSMatthew Wilcox (Oracle) BUG_ON(stop > folio_size(folio) || stop < length);
1619d47992f8SLukas Czerner
16207ba13abbSMatthew Wilcox (Oracle) head = folio_buffers(folio);
16217ba13abbSMatthew Wilcox (Oracle) if (!head)
16227ba13abbSMatthew Wilcox (Oracle) return;
16237ba13abbSMatthew Wilcox (Oracle)
16241da177e4SLinus Torvalds bh = head;
16251da177e4SLinus Torvalds do {
16267ba13abbSMatthew Wilcox (Oracle) size_t next_off = curr_off + bh->b_size;
16271da177e4SLinus Torvalds next = bh->b_this_page;
16281da177e4SLinus Torvalds
16291da177e4SLinus Torvalds /*
1630d47992f8SLukas Czerner * Are we still fully in range ?
1631d47992f8SLukas Czerner */
1632d47992f8SLukas Czerner if (next_off > stop)
1633d47992f8SLukas Czerner goto out;
1634d47992f8SLukas Czerner
1635d47992f8SLukas Czerner /*
16361da177e4SLinus Torvalds * is this block fully invalidated?
16371da177e4SLinus Torvalds */
16381da177e4SLinus Torvalds if (offset <= curr_off)
16391da177e4SLinus Torvalds discard_buffer(bh);
16401da177e4SLinus Torvalds curr_off = next_off;
16411da177e4SLinus Torvalds bh = next;
16421da177e4SLinus Torvalds } while (bh != head);
16431da177e4SLinus Torvalds
16441da177e4SLinus Torvalds /*
16457ba13abbSMatthew Wilcox (Oracle) * We release buffers only if the entire folio is being invalidated.
16461da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated,
16471da177e4SLinus Torvalds * so real IO is not possible anymore.
16481da177e4SLinus Torvalds */
16497ba13abbSMatthew Wilcox (Oracle) if (length == folio_size(folio))
16507ba13abbSMatthew Wilcox (Oracle) filemap_release_folio(folio, 0);
16511da177e4SLinus Torvalds out:
16522ff28e22SNeilBrown return;
16531da177e4SLinus Torvalds }
16547ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
16551da177e4SLinus Torvalds
16561da177e4SLinus Torvalds /*
16571da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt
1658600f111eSMatthew Wilcox (Oracle) * block_dirty_folio() via i_private_lock. try_to_free_buffers
16598e2e1756SPankaj Raghav * is already excluded via the folio lock.
16601da177e4SLinus Torvalds */
create_empty_buffers(struct folio * folio,unsigned long blocksize,unsigned long b_state)16610a88810dSMatthew Wilcox (Oracle) struct buffer_head *create_empty_buffers(struct folio *folio,
16623decb856SMatthew Wilcox (Oracle) unsigned long blocksize, unsigned long b_state)
16631da177e4SLinus Torvalds {
16641da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail;
16652a418157SMatthew Wilcox (Oracle) gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
16661da177e4SLinus Torvalds
16672a418157SMatthew Wilcox (Oracle) head = folio_alloc_buffers(folio, blocksize, gfp);
16681da177e4SLinus Torvalds bh = head;
16691da177e4SLinus Torvalds do {
16701da177e4SLinus Torvalds bh->b_state |= b_state;
16711da177e4SLinus Torvalds tail = bh;
16721da177e4SLinus Torvalds bh = bh->b_this_page;
16731da177e4SLinus Torvalds } while (bh);
16741da177e4SLinus Torvalds tail->b_this_page = head;
16751da177e4SLinus Torvalds
1676600f111eSMatthew Wilcox (Oracle) spin_lock(&folio->mapping->i_private_lock);
16778e2e1756SPankaj Raghav if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
16781da177e4SLinus Torvalds bh = head;
16791da177e4SLinus Torvalds do {
16808e2e1756SPankaj Raghav if (folio_test_dirty(folio))
16811da177e4SLinus Torvalds set_buffer_dirty(bh);
16828e2e1756SPankaj Raghav if (folio_test_uptodate(folio))
16831da177e4SLinus Torvalds set_buffer_uptodate(bh);
16841da177e4SLinus Torvalds bh = bh->b_this_page;
16851da177e4SLinus Torvalds } while (bh != head);
16861da177e4SLinus Torvalds }
16878e2e1756SPankaj Raghav folio_attach_private(folio, head);
1688600f111eSMatthew Wilcox (Oracle) spin_unlock(&folio->mapping->i_private_lock);
16893decb856SMatthew Wilcox (Oracle)
16903decb856SMatthew Wilcox (Oracle) return head;
16918e2e1756SPankaj Raghav }
16921da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16931da177e4SLinus Torvalds
169429f3ad7dSJan Kara /**
169529f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device
169629f3ad7dSJan Kara * @bdev: Block device to clean buffers in
169729f3ad7dSJan Kara * @block: Start of a range of blocks to clean
169829f3ad7dSJan Kara * @len: Number of blocks to clean
16991da177e4SLinus Torvalds *
170029f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any
170129f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the
170229f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that
170329f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark
170429f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer
170529f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was
170629f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it
170729f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards...
170829f3ad7dSJan Kara *
170929f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be
171029f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that
171129f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really
171229f3ad7dSJan Kara * need to. That happens here.
17131da177e4SLinus Torvalds */
clean_bdev_aliases(struct block_device * bdev,sector_t block,sector_t len)171429f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
17151da177e4SLinus Torvalds {
171653cd4cd3SAl Viro struct address_space *bd_mapping = bdev->bd_mapping;
171753cd4cd3SAl Viro const int blkbits = bd_mapping->host->i_blkbits;
17189e0b6f31SMatthew Wilcox (Oracle) struct folio_batch fbatch;
171953cd4cd3SAl Viro pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
172029f3ad7dSJan Kara pgoff_t end;
1721c10f778dSJan Kara int i, count;
172229f3ad7dSJan Kara struct buffer_head *bh;
172329f3ad7dSJan Kara struct buffer_head *head;
17241da177e4SLinus Torvalds
172553cd4cd3SAl Viro end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
17269e0b6f31SMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
17279e0b6f31SMatthew Wilcox (Oracle) while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
17289e0b6f31SMatthew Wilcox (Oracle) count = folio_batch_count(&fbatch);
1729c10f778dSJan Kara for (i = 0; i < count; i++) {
17309e0b6f31SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
17311da177e4SLinus Torvalds
17329e0b6f31SMatthew Wilcox (Oracle) if (!folio_buffers(folio))
173329f3ad7dSJan Kara continue;
173429f3ad7dSJan Kara /*
1735600f111eSMatthew Wilcox (Oracle) * We use folio lock instead of bd_mapping->i_private_lock
173629f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and
173729f3ad7dSJan Kara * it scales better than a global spinlock lock.
173829f3ad7dSJan Kara */
17399e0b6f31SMatthew Wilcox (Oracle) folio_lock(folio);
17409e0b6f31SMatthew Wilcox (Oracle) /* Recheck when the folio is locked which pins bhs */
17419e0b6f31SMatthew Wilcox (Oracle) head = folio_buffers(folio);
17429e0b6f31SMatthew Wilcox (Oracle) if (!head)
174329f3ad7dSJan Kara goto unlock_page;
174429f3ad7dSJan Kara bh = head;
174529f3ad7dSJan Kara do {
17466c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block))
174729f3ad7dSJan Kara goto next;
174829f3ad7dSJan Kara if (bh->b_blocknr >= block + len)
174929f3ad7dSJan Kara break;
175029f3ad7dSJan Kara clear_buffer_dirty(bh);
175129f3ad7dSJan Kara wait_on_buffer(bh);
175229f3ad7dSJan Kara clear_buffer_req(bh);
175329f3ad7dSJan Kara next:
175429f3ad7dSJan Kara bh = bh->b_this_page;
175529f3ad7dSJan Kara } while (bh != head);
175629f3ad7dSJan Kara unlock_page:
17579e0b6f31SMatthew Wilcox (Oracle) folio_unlock(folio);
175829f3ad7dSJan Kara }
17599e0b6f31SMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
176029f3ad7dSJan Kara cond_resched();
1761c10f778dSJan Kara /* End of range already reached? */
1762c10f778dSJan Kara if (index > end || !index)
1763c10f778dSJan Kara break;
17641da177e4SLinus Torvalds }
17651da177e4SLinus Torvalds }
176629f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
17671da177e4SLinus Torvalds
folio_create_buffers(struct folio * folio,struct inode * inode,unsigned int b_state)1768c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio,
1769c6c8c3e7SPankaj Raghav struct inode *inode,
1770c6c8c3e7SPankaj Raghav unsigned int b_state)
177145bce8f3SLinus Torvalds {
17723decb856SMatthew Wilcox (Oracle) struct buffer_head *bh;
17733decb856SMatthew Wilcox (Oracle)
1774c6c8c3e7SPankaj Raghav BUG_ON(!folio_test_locked(folio));
177545bce8f3SLinus Torvalds
17763decb856SMatthew Wilcox (Oracle) bh = folio_buffers(folio);
17773decb856SMatthew Wilcox (Oracle) if (!bh)
17780a88810dSMatthew Wilcox (Oracle) bh = create_empty_buffers(folio,
17793decb856SMatthew Wilcox (Oracle) 1 << READ_ONCE(inode->i_blkbits), b_state);
17803decb856SMatthew Wilcox (Oracle) return bh;
178145bce8f3SLinus Torvalds }
178245bce8f3SLinus Torvalds
178345bce8f3SLinus Torvalds /*
17841da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid:
17851da177e4SLinus Torvalds *
17861da177e4SLinus Torvalds * Mapped Uptodate Meaning
17871da177e4SLinus Torvalds *
17881da177e4SLinus Torvalds * No No "unknown" - must do get_block()
17891da177e4SLinus Torvalds * No Yes "hole" - zero-filled
17901da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in
17911da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory.
17921da177e4SLinus Torvalds *
17931da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate).
17941da177e4SLinus Torvalds */
17951da177e4SLinus Torvalds
17961da177e4SLinus Torvalds /*
179717bf23a9SMatthew Wilcox (Oracle) * While block_write_full_folio is writing back the dirty buffers under
17981da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them
17991da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer
18001da177e4SLinus Torvalds * state inside lock_buffer().
18011da177e4SLinus Torvalds *
180217bf23a9SMatthew Wilcox (Oracle) * If block_write_full_folio() is called for regular writeback
18031da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
18041da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer
18051da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback
18061da177e4SLinus Torvalds * prevents this contention from occurring.
18076e34eeddSTheodore Ts'o *
180817bf23a9SMatthew Wilcox (Oracle) * If block_write_full_folio() is called with wbc->sync_mode ==
180970fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1810721a9602SJens Axboe * causes the writes to be flagged as synchronous writes.
18111da177e4SLinus Torvalds */
__block_write_full_folio(struct inode * inode,struct folio * folio,get_block_t * get_block,struct writeback_control * wbc)181253418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio,
181314059f66SMatthew Wilcox (Oracle) get_block_t *get_block, struct writeback_control *wbc)
18141da177e4SLinus Torvalds {
18151da177e4SLinus Torvalds int err;
18161da177e4SLinus Torvalds sector_t block;
18171da177e4SLinus Torvalds sector_t last_block;
1818f0fbd5fcSAndrew Morton struct buffer_head *bh, *head;
1819fa399c31SMatthew Wilcox (Oracle) size_t blocksize;
18201da177e4SLinus Torvalds int nr_underway = 0;
18213ae72869SBart Van Assche blk_opf_t write_flags = wbc_to_write_flags(wbc);
18221da177e4SLinus Torvalds
182353418a18SMatthew Wilcox (Oracle) head = folio_create_buffers(folio, inode,
18241da177e4SLinus Torvalds (1 << BH_Dirty) | (1 << BH_Uptodate));
18251da177e4SLinus Torvalds
18261da177e4SLinus Torvalds /*
1827e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio
18281da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at
18291da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it
183053418a18SMatthew Wilcox (Oracle) * then we just miss that fact, and the folio stays dirty.
18311da177e4SLinus Torvalds *
1832e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio;
18331da177e4SLinus Torvalds * handle that here by just cleaning them.
18341da177e4SLinus Torvalds */
18351da177e4SLinus Torvalds
18361da177e4SLinus Torvalds bh = head;
183745bce8f3SLinus Torvalds blocksize = bh->b_size;
183845bce8f3SLinus Torvalds
1839fa399c31SMatthew Wilcox (Oracle) block = div_u64(folio_pos(folio), blocksize);
1840fa399c31SMatthew Wilcox (Oracle) last_block = div_u64(i_size_read(inode) - 1, blocksize);
18411da177e4SLinus Torvalds
18421da177e4SLinus Torvalds /*
18431da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and
18441da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping.
18451da177e4SLinus Torvalds */
18461da177e4SLinus Torvalds do {
18471da177e4SLinus Torvalds if (block > last_block) {
18481da177e4SLinus Torvalds /*
18491da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because
185053418a18SMatthew Wilcox (Oracle) * this folio can be outside i_size when there is a
18511da177e4SLinus Torvalds * truncate in progress.
18521da177e4SLinus Torvalds */
18531da177e4SLinus Torvalds /*
185417bf23a9SMatthew Wilcox (Oracle) * The buffer was zeroed by block_write_full_folio()
18551da177e4SLinus Torvalds */
18561da177e4SLinus Torvalds clear_buffer_dirty(bh);
18571da177e4SLinus Torvalds set_buffer_uptodate(bh);
185829a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
185929a814d2SAlex Tomas buffer_dirty(bh)) {
1860b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
18611da177e4SLinus Torvalds err = get_block(inode, block, bh, 1);
18621da177e4SLinus Torvalds if (err)
18631da177e4SLinus Torvalds goto recover;
186429a814d2SAlex Tomas clear_buffer_delay(bh);
18651da177e4SLinus Torvalds if (buffer_new(bh)) {
18661da177e4SLinus Torvalds /* blockdev mappings never come here */
18671da177e4SLinus Torvalds clear_buffer_new(bh);
1868e64855c6SJan Kara clean_bdev_bh_alias(bh);
18691da177e4SLinus Torvalds }
18701da177e4SLinus Torvalds }
18711da177e4SLinus Torvalds bh = bh->b_this_page;
18721da177e4SLinus Torvalds block++;
18731da177e4SLinus Torvalds } while (bh != head);
18741da177e4SLinus Torvalds
18751da177e4SLinus Torvalds do {
18761da177e4SLinus Torvalds if (!buffer_mapped(bh))
18771da177e4SLinus Torvalds continue;
18781da177e4SLinus Torvalds /*
18791da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot
188053418a18SMatthew Wilcox (Oracle) * lock the buffer then redirty the folio. Note that this can
18815b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads
18825b0830cbSJens Axboe * and kswapd activity, but those code paths have their own
18835b0830cbSJens Axboe * higher-level throttling.
18841da177e4SLinus Torvalds */
18851b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) {
18861da177e4SLinus Torvalds lock_buffer(bh);
1887ca5de404SNick Piggin } else if (!trylock_buffer(bh)) {
188853418a18SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio);
18891da177e4SLinus Torvalds continue;
18901da177e4SLinus Torvalds }
18911da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
189214059f66SMatthew Wilcox (Oracle) mark_buffer_async_write_endio(bh,
189314059f66SMatthew Wilcox (Oracle) end_buffer_async_write);
18941da177e4SLinus Torvalds } else {
18951da177e4SLinus Torvalds unlock_buffer(bh);
18961da177e4SLinus Torvalds }
18971da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
18981da177e4SLinus Torvalds
18991da177e4SLinus Torvalds /*
190053418a18SMatthew Wilcox (Oracle) * The folio and its buffers are protected by the writeback flag,
190153418a18SMatthew Wilcox (Oracle) * so we can drop the bh refcounts early.
19021da177e4SLinus Torvalds */
190353418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
190453418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio);
19051da177e4SLinus Torvalds
19061da177e4SLinus Torvalds do {
19071da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
19081da177e4SLinus Torvalds if (buffer_async_write(bh)) {
190944981351SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
191044981351SBart Van Assche inode->i_write_hint, wbc);
19111da177e4SLinus Torvalds nr_underway++;
1912ad576e63SNick Piggin }
19131da177e4SLinus Torvalds bh = next;
19141da177e4SLinus Torvalds } while (bh != head);
191553418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
19161da177e4SLinus Torvalds
19171da177e4SLinus Torvalds err = 0;
19181da177e4SLinus Torvalds done:
19191da177e4SLinus Torvalds if (nr_underway == 0) {
19201da177e4SLinus Torvalds /*
192153418a18SMatthew Wilcox (Oracle) * The folio was marked dirty, but the buffers were
19221da177e4SLinus Torvalds * clean. Someone wrote them back by hand with
192379f59784SZhang Yi * write_dirty_buffer/submit_bh. A rare case.
19241da177e4SLinus Torvalds */
192553418a18SMatthew Wilcox (Oracle) folio_end_writeback(folio);
19263d67f2d7SNick Piggin
19271da177e4SLinus Torvalds /*
192853418a18SMatthew Wilcox (Oracle) * The folio and buffer_heads can be released at any time from
19291da177e4SLinus Torvalds * here on.
19301da177e4SLinus Torvalds */
19311da177e4SLinus Torvalds }
19321da177e4SLinus Torvalds return err;
19331da177e4SLinus Torvalds
19341da177e4SLinus Torvalds recover:
19351da177e4SLinus Torvalds /*
19361da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some
19371da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid
19381da177e4SLinus Torvalds * exposing stale data.
193953418a18SMatthew Wilcox (Oracle) * The folio is currently locked and not marked for writeback
19401da177e4SLinus Torvalds */
19411da177e4SLinus Torvalds bh = head;
19421da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */
19431da177e4SLinus Torvalds do {
194429a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) &&
194529a814d2SAlex Tomas !buffer_delay(bh)) {
19461da177e4SLinus Torvalds lock_buffer(bh);
194714059f66SMatthew Wilcox (Oracle) mark_buffer_async_write_endio(bh,
194814059f66SMatthew Wilcox (Oracle) end_buffer_async_write);
19491da177e4SLinus Torvalds } else {
19501da177e4SLinus Torvalds /*
19511da177e4SLinus Torvalds * The buffer may have been set dirty during
195253418a18SMatthew Wilcox (Oracle) * attachment to a dirty folio.
19531da177e4SLinus Torvalds */
19541da177e4SLinus Torvalds clear_buffer_dirty(bh);
19551da177e4SLinus Torvalds }
19561da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
195753418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
195853418a18SMatthew Wilcox (Oracle) mapping_set_error(folio->mapping, err);
195953418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio);
19601da177e4SLinus Torvalds do {
19611da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
19621da177e4SLinus Torvalds if (buffer_async_write(bh)) {
19631da177e4SLinus Torvalds clear_buffer_dirty(bh);
196444981351SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
196544981351SBart Van Assche inode->i_write_hint, wbc);
19661da177e4SLinus Torvalds nr_underway++;
1967ad576e63SNick Piggin }
19681da177e4SLinus Torvalds bh = next;
19691da177e4SLinus Torvalds } while (bh != head);
197053418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
19711da177e4SLinus Torvalds goto done;
19721da177e4SLinus Torvalds }
197353418a18SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__block_write_full_folio);
19741da177e4SLinus Torvalds
1975afddba49SNick Piggin /*
19764a9622f2SMatthew Wilcox (Oracle) * If a folio has any new buffers, zero them out here, and mark them uptodate
1977afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised
1978afddba49SNick Piggin * block data from leaking). And clear the new bit.
1979afddba49SNick Piggin */
folio_zero_new_buffers(struct folio * folio,size_t from,size_t to)19804a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1981afddba49SNick Piggin {
19824a9622f2SMatthew Wilcox (Oracle) size_t block_start, block_end;
1983afddba49SNick Piggin struct buffer_head *head, *bh;
1984afddba49SNick Piggin
19854a9622f2SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
19864a9622f2SMatthew Wilcox (Oracle) head = folio_buffers(folio);
19874a9622f2SMatthew Wilcox (Oracle) if (!head)
1988afddba49SNick Piggin return;
1989afddba49SNick Piggin
19904a9622f2SMatthew Wilcox (Oracle) bh = head;
1991afddba49SNick Piggin block_start = 0;
1992afddba49SNick Piggin do {
1993afddba49SNick Piggin block_end = block_start + bh->b_size;
1994afddba49SNick Piggin
1995afddba49SNick Piggin if (buffer_new(bh)) {
1996afddba49SNick Piggin if (block_end > from && block_start < to) {
19974a9622f2SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) {
19984a9622f2SMatthew Wilcox (Oracle) size_t start, xend;
1999afddba49SNick Piggin
2000afddba49SNick Piggin start = max(from, block_start);
20014a9622f2SMatthew Wilcox (Oracle) xend = min(to, block_end);
2002afddba49SNick Piggin
20034a9622f2SMatthew Wilcox (Oracle) folio_zero_segment(folio, start, xend);
2004afddba49SNick Piggin set_buffer_uptodate(bh);
2005afddba49SNick Piggin }
2006afddba49SNick Piggin
2007afddba49SNick Piggin clear_buffer_new(bh);
2008afddba49SNick Piggin mark_buffer_dirty(bh);
2009afddba49SNick Piggin }
2010afddba49SNick Piggin }
2011afddba49SNick Piggin
2012afddba49SNick Piggin block_start = block_end;
2013afddba49SNick Piggin bh = bh->b_this_page;
2014afddba49SNick Piggin } while (bh != head);
2015afddba49SNick Piggin }
20164a9622f2SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_zero_new_buffers);
2017afddba49SNick Piggin
20184aa8cdd5SChristoph Hellwig static int
iomap_to_bh(struct inode * inode,sector_t block,struct buffer_head * bh,const struct iomap * iomap)2019ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
20206d49cc85SChristoph Hellwig const struct iomap *iomap)
2021ae259a9cSChristoph Hellwig {
202280844194SMatthew Wilcox (Oracle) loff_t offset = (loff_t)block << inode->i_blkbits;
2023ae259a9cSChristoph Hellwig
2024ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev;
2025ae259a9cSChristoph Hellwig
2026ae259a9cSChristoph Hellwig /*
2027ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains
2028ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the
2029ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller
2030ae259a9cSChristoph Hellwig * handle it.
2031ae259a9cSChristoph Hellwig */
20324aa8cdd5SChristoph Hellwig if (offset >= iomap->offset + iomap->length)
20334aa8cdd5SChristoph Hellwig return -EIO;
2034ae259a9cSChristoph Hellwig
2035ae259a9cSChristoph Hellwig switch (iomap->type) {
2036ae259a9cSChristoph Hellwig case IOMAP_HOLE:
2037ae259a9cSChristoph Hellwig /*
2038ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF,
2039ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is
2040ae259a9cSChristoph Hellwig * executed if necessary.
2041ae259a9cSChristoph Hellwig */
2042ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) ||
2043ae259a9cSChristoph Hellwig (offset >= i_size_read(inode)))
2044ae259a9cSChristoph Hellwig set_buffer_new(bh);
20454aa8cdd5SChristoph Hellwig return 0;
2046ae259a9cSChristoph Hellwig case IOMAP_DELALLOC:
2047ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) ||
2048ae259a9cSChristoph Hellwig (offset >= i_size_read(inode)))
2049ae259a9cSChristoph Hellwig set_buffer_new(bh);
2050ae259a9cSChristoph Hellwig set_buffer_uptodate(bh);
2051ae259a9cSChristoph Hellwig set_buffer_mapped(bh);
2052ae259a9cSChristoph Hellwig set_buffer_delay(bh);
20534aa8cdd5SChristoph Hellwig return 0;
2054ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN:
2055ae259a9cSChristoph Hellwig /*
20563d7b6b21SAndreas Gruenbacher * For unwritten regions, we always need to ensure that regions
20573d7b6b21SAndreas Gruenbacher * in the block we are not writing to are zeroed. Mark the
20583d7b6b21SAndreas Gruenbacher * buffer as new to ensure this.
2059ae259a9cSChristoph Hellwig */
2060ae259a9cSChristoph Hellwig set_buffer_new(bh);
2061ae259a9cSChristoph Hellwig set_buffer_unwritten(bh);
2062df561f66SGustavo A. R. Silva fallthrough;
2063ae259a9cSChristoph Hellwig case IOMAP_MAPPED:
20643d7b6b21SAndreas Gruenbacher if ((iomap->flags & IOMAP_F_NEW) ||
2065381c0432SChristoph Hellwig offset >= i_size_read(inode)) {
2066381c0432SChristoph Hellwig /*
2067381c0432SChristoph Hellwig * This can happen if truncating the block device races
2068381c0432SChristoph Hellwig * with the check in the caller as i_size updates on
2069381c0432SChristoph Hellwig * block devices aren't synchronized by i_rwsem for
2070381c0432SChristoph Hellwig * block devices.
2071381c0432SChristoph Hellwig */
2072381c0432SChristoph Hellwig if (S_ISBLK(inode->i_mode))
2073381c0432SChristoph Hellwig return -EIO;
2074ae259a9cSChristoph Hellwig set_buffer_new(bh);
2075381c0432SChristoph Hellwig }
207619fe5f64SAndreas Gruenbacher bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
207719fe5f64SAndreas Gruenbacher inode->i_blkbits;
2078ae259a9cSChristoph Hellwig set_buffer_mapped(bh);
20794aa8cdd5SChristoph Hellwig return 0;
20804aa8cdd5SChristoph Hellwig default:
20814aa8cdd5SChristoph Hellwig WARN_ON_ONCE(1);
20824aa8cdd5SChristoph Hellwig return -EIO;
2083ae259a9cSChristoph Hellwig }
2084ae259a9cSChristoph Hellwig }
2085ae259a9cSChristoph Hellwig
__block_write_begin_int(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block,const struct iomap * iomap)2086d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
20876d49cc85SChristoph Hellwig get_block_t *get_block, const struct iomap *iomap)
20881da177e4SLinus Torvalds {
2089b0619401SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos);
2090b0619401SMatthew Wilcox (Oracle) size_t to = from + len;
2091d1bd0b4eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
2092b0619401SMatthew Wilcox (Oracle) size_t block_start, block_end;
20931da177e4SLinus Torvalds sector_t block;
20941da177e4SLinus Torvalds int err = 0;
2095b0619401SMatthew Wilcox (Oracle) size_t blocksize;
20961da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
20971da177e4SLinus Torvalds
2098d1bd0b4eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
2099b0619401SMatthew Wilcox (Oracle) BUG_ON(to > folio_size(folio));
21001da177e4SLinus Torvalds BUG_ON(from > to);
21011da177e4SLinus Torvalds
2102c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0);
210345bce8f3SLinus Torvalds blocksize = head->b_size;
2104b0619401SMatthew Wilcox (Oracle) block = div_u64(folio_pos(folio), blocksize);
21051da177e4SLinus Torvalds
21061da177e4SLinus Torvalds for (bh = head, block_start = 0; bh != head || !block_start;
21071da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) {
21081da177e4SLinus Torvalds block_end = block_start + blocksize;
21091da177e4SLinus Torvalds if (block_end <= from || block_start >= to) {
2110d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
21111da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21121da177e4SLinus Torvalds set_buffer_uptodate(bh);
21131da177e4SLinus Torvalds }
21141da177e4SLinus Torvalds continue;
21151da177e4SLinus Torvalds }
21161da177e4SLinus Torvalds if (buffer_new(bh))
21171da177e4SLinus Torvalds clear_buffer_new(bh);
21181da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2119b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
21204aa8cdd5SChristoph Hellwig if (get_block)
21211da177e4SLinus Torvalds err = get_block(inode, block, bh, 1);
21224aa8cdd5SChristoph Hellwig else
21234aa8cdd5SChristoph Hellwig err = iomap_to_bh(inode, block, bh, iomap);
21241da177e4SLinus Torvalds if (err)
2125f3ddbdc6SNick Piggin break;
2126ae259a9cSChristoph Hellwig
21271da177e4SLinus Torvalds if (buffer_new(bh)) {
2128e64855c6SJan Kara clean_bdev_bh_alias(bh);
2129d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
2130637aff46SNick Piggin clear_buffer_new(bh);
21311da177e4SLinus Torvalds set_buffer_uptodate(bh);
2132637aff46SNick Piggin mark_buffer_dirty(bh);
21331da177e4SLinus Torvalds continue;
21341da177e4SLinus Torvalds }
2135eebd2aa3SChristoph Lameter if (block_end > to || block_start < from)
2136d1bd0b4eSMatthew Wilcox (Oracle) folio_zero_segments(folio,
2137eebd2aa3SChristoph Lameter to, block_end,
2138eebd2aa3SChristoph Lameter block_start, from);
21391da177e4SLinus Torvalds continue;
21401da177e4SLinus Torvalds }
21411da177e4SLinus Torvalds }
2142d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
21431da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21441da177e4SLinus Torvalds set_buffer_uptodate(bh);
21451da177e4SLinus Torvalds continue;
21461da177e4SLinus Torvalds }
21471da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
214833a266ddSDavid Chinner !buffer_unwritten(bh) &&
21491da177e4SLinus Torvalds (block_start < from || block_end > to)) {
2150e7ea1129SZhang Yi bh_read_nowait(bh, 0);
21511da177e4SLinus Torvalds *wait_bh++=bh;
21521da177e4SLinus Torvalds }
21531da177e4SLinus Torvalds }
21541da177e4SLinus Torvalds /*
21551da177e4SLinus Torvalds * If we issued read requests - let them complete.
21561da177e4SLinus Torvalds */
21571da177e4SLinus Torvalds while(wait_bh > wait) {
21581da177e4SLinus Torvalds wait_on_buffer(*--wait_bh);
21591da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh))
2160f3ddbdc6SNick Piggin err = -EIO;
21611da177e4SLinus Torvalds }
2162f9f07b6cSJan Kara if (unlikely(err))
21634a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, from, to);
21641da177e4SLinus Torvalds return err;
21651da177e4SLinus Torvalds }
2166ae259a9cSChristoph Hellwig
__block_write_begin(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block)21679f04609fSMatthew Wilcox (Oracle) int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2168ae259a9cSChristoph Hellwig get_block_t *get_block)
2169ae259a9cSChristoph Hellwig {
21709f04609fSMatthew Wilcox (Oracle) return __block_write_begin_int(folio, pos, len, get_block, NULL);
2171ae259a9cSChristoph Hellwig }
2172ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
21731da177e4SLinus Torvalds
__block_commit_write(struct folio * folio,size_t from,size_t to)2174a524fcfeSBean Huo static void __block_commit_write(struct folio *folio, size_t from, size_t to)
21751da177e4SLinus Torvalds {
21768c6cb3e3SMatthew Wilcox (Oracle) size_t block_start, block_end;
21778c6cb3e3SMatthew Wilcox (Oracle) bool partial = false;
21781da177e4SLinus Torvalds unsigned blocksize;
21791da177e4SLinus Torvalds struct buffer_head *bh, *head;
21801da177e4SLinus Torvalds
21818c6cb3e3SMatthew Wilcox (Oracle) bh = head = folio_buffers(folio);
218283f4414bSWojciech Gładysz if (!bh)
218383f4414bSWojciech Gładysz return;
218445bce8f3SLinus Torvalds blocksize = bh->b_size;
21851da177e4SLinus Torvalds
218645bce8f3SLinus Torvalds block_start = 0;
218745bce8f3SLinus Torvalds do {
21881da177e4SLinus Torvalds block_end = block_start + blocksize;
21891da177e4SLinus Torvalds if (block_end <= from || block_start >= to) {
21901da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21918c6cb3e3SMatthew Wilcox (Oracle) partial = true;
21921da177e4SLinus Torvalds } else {
21931da177e4SLinus Torvalds set_buffer_uptodate(bh);
21941da177e4SLinus Torvalds mark_buffer_dirty(bh);
21951da177e4SLinus Torvalds }
21964ebd3aecSYang Guo if (buffer_new(bh))
2197afddba49SNick Piggin clear_buffer_new(bh);
219845bce8f3SLinus Torvalds
219945bce8f3SLinus Torvalds block_start = block_end;
220045bce8f3SLinus Torvalds bh = bh->b_this_page;
220145bce8f3SLinus Torvalds } while (bh != head);
22021da177e4SLinus Torvalds
22031da177e4SLinus Torvalds /*
22041da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers
22052c69e205SMatthew Wilcox (Oracle) * uptodate then we can optimize away a bogus read_folio() for
22068c6cb3e3SMatthew Wilcox (Oracle) * the next read(). Here we 'discover' whether the folio went
22071da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write.
22081da177e4SLinus Torvalds */
22091da177e4SLinus Torvalds if (!partial)
22108c6cb3e3SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
22111da177e4SLinus Torvalds }
22121da177e4SLinus Torvalds
22131da177e4SLinus Torvalds /*
2214155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and
2215155130a4SChristoph Hellwig * bringing partial write blocks uptodate first.
2216155130a4SChristoph Hellwig *
22177bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure.
2218afddba49SNick Piggin */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,get_block_t * get_block)2219155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
22201da86618SMatthew Wilcox (Oracle) struct folio **foliop, get_block_t *get_block)
2221afddba49SNick Piggin {
222209cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT;
22238eb835a1SMatthew Wilcox (Oracle) struct folio *folio;
22246e1db88dSChristoph Hellwig int status;
2225afddba49SNick Piggin
22268eb835a1SMatthew Wilcox (Oracle) folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
22278eb835a1SMatthew Wilcox (Oracle) mapping_gfp_mask(mapping));
22288eb835a1SMatthew Wilcox (Oracle) if (IS_ERR(folio))
22298eb835a1SMatthew Wilcox (Oracle) return PTR_ERR(folio);
2230afddba49SNick Piggin
22318eb835a1SMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2232afddba49SNick Piggin if (unlikely(status)) {
22338eb835a1SMatthew Wilcox (Oracle) folio_unlock(folio);
22348eb835a1SMatthew Wilcox (Oracle) folio_put(folio);
22358eb835a1SMatthew Wilcox (Oracle) folio = NULL;
2236afddba49SNick Piggin }
2237afddba49SNick Piggin
22381da86618SMatthew Wilcox (Oracle) *foliop = folio;
2239afddba49SNick Piggin return status;
2240afddba49SNick Piggin }
2241afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2242afddba49SNick Piggin
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2243afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2244afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied,
224597edbc02SMatthew Wilcox (Oracle) struct folio *folio, void *fsdata)
2246afddba49SNick Piggin {
22478c6cb3e3SMatthew Wilcox (Oracle) size_t start = pos - folio_pos(folio);
2248afddba49SNick Piggin
2249afddba49SNick Piggin if (unlikely(copied < len)) {
2250afddba49SNick Piggin /*
22512c69e205SMatthew Wilcox (Oracle) * The buffers that were written will now be uptodate, so
22522c69e205SMatthew Wilcox (Oracle) * we don't have to worry about a read_folio reading them
22532c69e205SMatthew Wilcox (Oracle) * and overwriting a partial write. However if we have
22542c69e205SMatthew Wilcox (Oracle) * encountered a short write and only partially written
22552c69e205SMatthew Wilcox (Oracle) * into a buffer, it will not be marked uptodate, so a
22562c69e205SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write.
2257afddba49SNick Piggin *
2258afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a
22598c6cb3e3SMatthew Wilcox (Oracle) * non uptodate folio as a zero-length write, and force the
2260afddba49SNick Piggin * caller to redo the whole thing.
2261afddba49SNick Piggin */
22628c6cb3e3SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
2263afddba49SNick Piggin copied = 0;
2264afddba49SNick Piggin
22654a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, start+copied, start+len);
2266afddba49SNick Piggin }
22678c6cb3e3SMatthew Wilcox (Oracle) flush_dcache_folio(folio);
2268afddba49SNick Piggin
2269afddba49SNick Piggin /* This could be a short (even 0-length) commit */
2270489b7e72SBean Huo __block_commit_write(folio, start, start + copied);
2271afddba49SNick Piggin
2272afddba49SNick Piggin return copied;
2273afddba49SNick Piggin }
2274afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2275afddba49SNick Piggin
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)2276afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2277afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied,
2278a225800fSMatthew Wilcox (Oracle) struct folio *folio, void *fsdata)
2279afddba49SNick Piggin {
22808af54f29SChristoph Hellwig struct inode *inode = mapping->host;
22818af54f29SChristoph Hellwig loff_t old_size = inode->i_size;
22828af54f29SChristoph Hellwig bool i_size_changed = false;
22838af54f29SChristoph Hellwig
228497edbc02SMatthew Wilcox (Oracle) copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
22858af54f29SChristoph Hellwig
22868af54f29SChristoph Hellwig /*
22878af54f29SChristoph Hellwig * No need to use i_size_read() here, the i_size cannot change under us
22888af54f29SChristoph Hellwig * because we hold i_rwsem.
22898af54f29SChristoph Hellwig *
2290696876d0SMatthew Wilcox (Oracle) * But it's important to update i_size while still holding folio lock:
22918af54f29SChristoph Hellwig * page writeout could otherwise come in and zero beyond i_size.
22928af54f29SChristoph Hellwig */
22938af54f29SChristoph Hellwig if (pos + copied > inode->i_size) {
22948af54f29SChristoph Hellwig i_size_write(inode, pos + copied);
22958af54f29SChristoph Hellwig i_size_changed = true;
22968af54f29SChristoph Hellwig }
22978af54f29SChristoph Hellwig
2298696876d0SMatthew Wilcox (Oracle) folio_unlock(folio);
2299696876d0SMatthew Wilcox (Oracle) folio_put(folio);
23008af54f29SChristoph Hellwig
23018af54f29SChristoph Hellwig if (old_size < pos)
23028af54f29SChristoph Hellwig pagecache_isize_extended(inode, old_size, pos);
23038af54f29SChristoph Hellwig /*
23048af54f29SChristoph Hellwig * Don't mark the inode dirty under page lock. First, it unnecessarily
23058af54f29SChristoph Hellwig * makes the holding time of page lock longer. Second, it forces lock
23068af54f29SChristoph Hellwig * ordering of page lock and transaction start for journaling
23078af54f29SChristoph Hellwig * filesystems.
23088af54f29SChristoph Hellwig */
23098af54f29SChristoph Hellwig if (i_size_changed)
23108af54f29SChristoph Hellwig mark_inode_dirty(inode);
231126ddb1f4SAndreas Gruenbacher return copied;
2312afddba49SNick Piggin }
2313afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2314afddba49SNick Piggin
2315afddba49SNick Piggin /*
23162e7e80f7SMatthew Wilcox (Oracle) * block_is_partially_uptodate checks whether buffers within a folio are
23178ab22b9aSHisashi Hifumi * uptodate or not.
23188ab22b9aSHisashi Hifumi *
23192e7e80f7SMatthew Wilcox (Oracle) * Returns true if all buffers which correspond to the specified part
23202e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate.
23218ab22b9aSHisashi Hifumi */
block_is_partially_uptodate(struct folio * folio,size_t from,size_t count)23222e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
23238ab22b9aSHisashi Hifumi {
23248ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize;
23258ab22b9aSHisashi Hifumi unsigned to;
23268ab22b9aSHisashi Hifumi struct buffer_head *bh, *head;
23272e7e80f7SMatthew Wilcox (Oracle) bool ret = true;
23288ab22b9aSHisashi Hifumi
23292e7e80f7SMatthew Wilcox (Oracle) head = folio_buffers(folio);
23302e7e80f7SMatthew Wilcox (Oracle) if (!head)
23312e7e80f7SMatthew Wilcox (Oracle) return false;
233245bce8f3SLinus Torvalds blocksize = head->b_size;
23332e7e80f7SMatthew Wilcox (Oracle) to = min_t(unsigned, folio_size(folio) - from, count);
23348ab22b9aSHisashi Hifumi to = from + to;
23352e7e80f7SMatthew Wilcox (Oracle) if (from < blocksize && to > folio_size(folio) - blocksize)
23362e7e80f7SMatthew Wilcox (Oracle) return false;
23378ab22b9aSHisashi Hifumi
23388ab22b9aSHisashi Hifumi bh = head;
23398ab22b9aSHisashi Hifumi block_start = 0;
23408ab22b9aSHisashi Hifumi do {
23418ab22b9aSHisashi Hifumi block_end = block_start + blocksize;
23428ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) {
23438ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) {
23442e7e80f7SMatthew Wilcox (Oracle) ret = false;
23458ab22b9aSHisashi Hifumi break;
23468ab22b9aSHisashi Hifumi }
23478ab22b9aSHisashi Hifumi if (block_end >= to)
23488ab22b9aSHisashi Hifumi break;
23498ab22b9aSHisashi Hifumi }
23508ab22b9aSHisashi Hifumi block_start = block_end;
23518ab22b9aSHisashi Hifumi bh = bh->b_this_page;
23528ab22b9aSHisashi Hifumi } while (bh != head);
23538ab22b9aSHisashi Hifumi
23548ab22b9aSHisashi Hifumi return ret;
23558ab22b9aSHisashi Hifumi }
23568ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
23578ab22b9aSHisashi Hifumi
23588ab22b9aSHisashi Hifumi /*
23592c69e205SMatthew Wilcox (Oracle) * Generic "read_folio" function for block devices that have the normal
23601da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems.
23612c69e205SMatthew Wilcox (Oracle) * Reads the folio asynchronously --- the unlock_buffer() and
23621da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the
23632c69e205SMatthew Wilcox (Oracle) * folio once IO has completed.
23641da177e4SLinus Torvalds */
block_read_full_folio(struct folio * folio,get_block_t * get_block)23652c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23661da177e4SLinus Torvalds {
23672c69e205SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
23681da177e4SLinus Torvalds sector_t iblock, lblock;
23691da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2370fa399c31SMatthew Wilcox (Oracle) size_t blocksize;
23711da177e4SLinus Torvalds int nr, i;
23721da177e4SLinus Torvalds int fully_mapped = 1;
2373b7a6eb22SMatthew Wilcox (Oracle) bool page_error = false;
23744fa512ceSEric Biggers loff_t limit = i_size_read(inode);
23754fa512ceSEric Biggers
23764fa512ceSEric Biggers /* This is needed for ext4. */
23774fa512ceSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
23784fa512ceSEric Biggers limit = inode->i_sb->s_maxbytes;
23791da177e4SLinus Torvalds
23802c69e205SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
23812c69e205SMatthew Wilcox (Oracle)
2382c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0);
238345bce8f3SLinus Torvalds blocksize = head->b_size;
23841da177e4SLinus Torvalds
2385fa399c31SMatthew Wilcox (Oracle) iblock = div_u64(folio_pos(folio), blocksize);
2386fa399c31SMatthew Wilcox (Oracle) lblock = div_u64(limit + blocksize - 1, blocksize);
23871da177e4SLinus Torvalds bh = head;
23881da177e4SLinus Torvalds nr = 0;
23891da177e4SLinus Torvalds i = 0;
23901da177e4SLinus Torvalds
23911da177e4SLinus Torvalds do {
23921da177e4SLinus Torvalds if (buffer_uptodate(bh))
23931da177e4SLinus Torvalds continue;
23941da177e4SLinus Torvalds
23951da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2396c64610baSAndrew Morton int err = 0;
2397c64610baSAndrew Morton
23981da177e4SLinus Torvalds fully_mapped = 0;
23991da177e4SLinus Torvalds if (iblock < lblock) {
2400b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
2401c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0);
24027ad635eaSMatthew Wilcox (Oracle) if (err)
2403b7a6eb22SMatthew Wilcox (Oracle) page_error = true;
2404b7a6eb22SMatthew Wilcox (Oracle) }
24051da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
24062c69e205SMatthew Wilcox (Oracle) folio_zero_range(folio, i * blocksize,
24072c69e205SMatthew Wilcox (Oracle) blocksize);
2408c64610baSAndrew Morton if (!err)
24091da177e4SLinus Torvalds set_buffer_uptodate(bh);
24101da177e4SLinus Torvalds continue;
24111da177e4SLinus Torvalds }
24121da177e4SLinus Torvalds /*
24131da177e4SLinus Torvalds * get_block() might have updated the buffer
24141da177e4SLinus Torvalds * synchronously
24151da177e4SLinus Torvalds */
24161da177e4SLinus Torvalds if (buffer_uptodate(bh))
24171da177e4SLinus Torvalds continue;
24181da177e4SLinus Torvalds }
24191da177e4SLinus Torvalds arr[nr++] = bh;
24201da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head);
24211da177e4SLinus Torvalds
24221da177e4SLinus Torvalds if (fully_mapped)
24232c69e205SMatthew Wilcox (Oracle) folio_set_mappedtodisk(folio);
24241da177e4SLinus Torvalds
24251da177e4SLinus Torvalds if (!nr) {
24261da177e4SLinus Torvalds /*
24276ba924d3SMatthew Wilcox (Oracle) * All buffers are uptodate or get_block() returned an
24286ba924d3SMatthew Wilcox (Oracle) * error when trying to map them - we can finish the read.
24291da177e4SLinus Torvalds */
24306ba924d3SMatthew Wilcox (Oracle) folio_end_read(folio, !page_error);
24311da177e4SLinus Torvalds return 0;
24321da177e4SLinus Torvalds }
24331da177e4SLinus Torvalds
24341da177e4SLinus Torvalds /* Stage two: lock the buffers */
24351da177e4SLinus Torvalds for (i = 0; i < nr; i++) {
24361da177e4SLinus Torvalds bh = arr[i];
24371da177e4SLinus Torvalds lock_buffer(bh);
24381da177e4SLinus Torvalds mark_buffer_async_read(bh);
24391da177e4SLinus Torvalds }
24401da177e4SLinus Torvalds
24411da177e4SLinus Torvalds /*
24421da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness
24431da177e4SLinus Torvalds * inside the buffer lock in case another process reading
24441da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix).
24451da177e4SLinus Torvalds */
24461da177e4SLinus Torvalds for (i = 0; i < nr; i++) {
24471da177e4SLinus Torvalds bh = arr[i];
24481da177e4SLinus Torvalds if (buffer_uptodate(bh))
24491da177e4SLinus Torvalds end_buffer_async_read(bh, 1);
24501da177e4SLinus Torvalds else
24511420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh);
24521da177e4SLinus Torvalds }
24531da177e4SLinus Torvalds return 0;
24541da177e4SLinus Torvalds }
24552c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
24561da177e4SLinus Torvalds
24571da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
245889e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to
24591da177e4SLinus Torvalds * deal with the hole.
24601da177e4SLinus Torvalds */
generic_cont_expand_simple(struct inode * inode,loff_t size)246189e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
24621da177e4SLinus Torvalds {
24631da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
246453b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
24651da86618SMatthew Wilcox (Oracle) struct folio *folio;
24661468c6f4SAlexander Potapenko void *fsdata = NULL;
24671da177e4SLinus Torvalds int err;
24681da177e4SLinus Torvalds
2469c08d3b0eSnpiggin@suse.de err = inode_newsize_ok(inode, size);
2470c08d3b0eSnpiggin@suse.de if (err)
24711da177e4SLinus Torvalds goto out;
24721da177e4SLinus Torvalds
24731da86618SMatthew Wilcox (Oracle) err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
247489e10787SNick Piggin if (err)
247505eb0b51SOGAWA Hirofumi goto out;
247605eb0b51SOGAWA Hirofumi
24771da86618SMatthew Wilcox (Oracle) err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
247889e10787SNick Piggin BUG_ON(err > 0);
247905eb0b51SOGAWA Hirofumi
248005eb0b51SOGAWA Hirofumi out:
248105eb0b51SOGAWA Hirofumi return err;
248205eb0b51SOGAWA Hirofumi }
24831fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
248405eb0b51SOGAWA Hirofumi
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2485f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
248689e10787SNick Piggin loff_t pos, loff_t *bytes)
248705eb0b51SOGAWA Hirofumi {
248889e10787SNick Piggin struct inode *inode = mapping->host;
248953b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
249093407472SFabian Frederick unsigned int blocksize = i_blocksize(inode);
24911da86618SMatthew Wilcox (Oracle) struct folio *folio;
24921468c6f4SAlexander Potapenko void *fsdata = NULL;
249389e10787SNick Piggin pgoff_t index, curidx;
249489e10787SNick Piggin loff_t curpos;
249589e10787SNick Piggin unsigned zerofrom, offset, len;
249689e10787SNick Piggin int err = 0;
249705eb0b51SOGAWA Hirofumi
249809cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT;
249909cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK;
250089e10787SNick Piggin
250109cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
250209cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK;
250389e10787SNick Piggin if (zerofrom & (blocksize-1)) {
250489e10787SNick Piggin *bytes |= (blocksize-1);
250589e10787SNick Piggin (*bytes)++;
250689e10787SNick Piggin }
250709cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom;
250889e10787SNick Piggin
250953b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len,
25101da86618SMatthew Wilcox (Oracle) &folio, &fsdata);
251189e10787SNick Piggin if (err)
251289e10787SNick Piggin goto out;
25131da86618SMatthew Wilcox (Oracle) folio_zero_range(folio, offset_in_folio(folio, curpos), len);
251453b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len,
25151da86618SMatthew Wilcox (Oracle) folio, fsdata);
251689e10787SNick Piggin if (err < 0)
251789e10787SNick Piggin goto out;
251889e10787SNick Piggin BUG_ON(err != len);
251989e10787SNick Piggin err = 0;
2520061e9746SOGAWA Hirofumi
2521061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping);
2522c2ca0fcdSMikulas Patocka
252308d405c8SDavidlohr Bueso if (fatal_signal_pending(current)) {
2524c2ca0fcdSMikulas Patocka err = -EINTR;
2525c2ca0fcdSMikulas Patocka goto out;
2526c2ca0fcdSMikulas Patocka }
252789e10787SNick Piggin }
252889e10787SNick Piggin
252989e10787SNick Piggin /* page covers the boundary, find the boundary offset */
253089e10787SNick Piggin if (index == curidx) {
253109cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK;
253289e10787SNick Piggin /* if we will expand the thing last block will be filled */
253389e10787SNick Piggin if (offset <= zerofrom) {
253489e10787SNick Piggin goto out;
253589e10787SNick Piggin }
253689e10787SNick Piggin if (zerofrom & (blocksize-1)) {
253789e10787SNick Piggin *bytes |= (blocksize-1);
253889e10787SNick Piggin (*bytes)++;
253989e10787SNick Piggin }
254089e10787SNick Piggin len = offset - zerofrom;
254189e10787SNick Piggin
254253b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len,
25431da86618SMatthew Wilcox (Oracle) &folio, &fsdata);
254489e10787SNick Piggin if (err)
254589e10787SNick Piggin goto out;
25461da86618SMatthew Wilcox (Oracle) folio_zero_range(folio, offset_in_folio(folio, curpos), len);
254753b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len,
25481da86618SMatthew Wilcox (Oracle) folio, fsdata);
254989e10787SNick Piggin if (err < 0)
255089e10787SNick Piggin goto out;
255189e10787SNick Piggin BUG_ON(err != len);
255289e10787SNick Piggin err = 0;
255389e10787SNick Piggin }
255489e10787SNick Piggin out:
255589e10787SNick Piggin return err;
25561da177e4SLinus Torvalds }
25571da177e4SLinus Torvalds
25581da177e4SLinus Torvalds /*
25591da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file.
25601da177e4SLinus Torvalds * We may have to extend the file.
25611da177e4SLinus Torvalds */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata,get_block_t * get_block,loff_t * bytes)2562282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2563be3bbbc5SMatthew Wilcox (Oracle) loff_t pos, unsigned len,
25641da86618SMatthew Wilcox (Oracle) struct folio **foliop, void **fsdata,
256589e10787SNick Piggin get_block_t *get_block, loff_t *bytes)
25661da177e4SLinus Torvalds {
25671da177e4SLinus Torvalds struct inode *inode = mapping->host;
256893407472SFabian Frederick unsigned int blocksize = i_blocksize(inode);
256993407472SFabian Frederick unsigned int zerofrom;
257089e10787SNick Piggin int err;
25711da177e4SLinus Torvalds
257289e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes);
257389e10787SNick Piggin if (err)
2574155130a4SChristoph Hellwig return err;
25751da177e4SLinus Torvalds
257609cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK;
257789e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) {
25781da177e4SLinus Torvalds *bytes |= (blocksize-1);
25791da177e4SLinus Torvalds (*bytes)++;
25801da177e4SLinus Torvalds }
25811da177e4SLinus Torvalds
25821da86618SMatthew Wilcox (Oracle) return block_write_begin(mapping, pos, len, foliop, get_block);
25831da177e4SLinus Torvalds }
25841fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
25851da177e4SLinus Torvalds
block_commit_write(struct page * page,unsigned from,unsigned to)2586a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned from, unsigned to)
25871da177e4SLinus Torvalds {
25888c6cb3e3SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
2589489b7e72SBean Huo __block_commit_write(folio, from, to);
25901da177e4SLinus Torvalds }
25911fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
25921da177e4SLinus Torvalds
259354171690SDavid Chinner /*
259454171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets
259554171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must
259654171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly
259754171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into
259854171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that
259954171690SDavid Chinner * support these features.
260054171690SDavid Chinner *
260154171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to
260254171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because
26037bb46a67Snpiggin@suse.de * truncate writes the inode size before removing pages, once we have the
260454171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not
260554171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we
260654171690SDavid Chinner * unlock the page.
2607ea13a864SJan Kara *
260814da9200SJan Kara * Direct callers of this function should protect against filesystem freezing
26095c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions.
261054171690SDavid Chinner */
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)26115c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
261254171690SDavid Chinner get_block_t get_block)
261354171690SDavid Chinner {
2614fe181377SMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page);
2615496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file);
261654171690SDavid Chinner unsigned long end;
261754171690SDavid Chinner loff_t size;
261824da4fabSJan Kara int ret;
261954171690SDavid Chinner
2620fe181377SMatthew Wilcox (Oracle) folio_lock(folio);
262154171690SDavid Chinner size = i_size_read(inode);
2622fe181377SMatthew Wilcox (Oracle) if ((folio->mapping != inode->i_mapping) ||
2623fe181377SMatthew Wilcox (Oracle) (folio_pos(folio) >= size)) {
262424da4fabSJan Kara /* We overload EFAULT to mean page got truncated */
262524da4fabSJan Kara ret = -EFAULT;
262624da4fabSJan Kara goto out_unlock;
262754171690SDavid Chinner }
262854171690SDavid Chinner
2629fe181377SMatthew Wilcox (Oracle) end = folio_size(folio);
2630fe181377SMatthew Wilcox (Oracle) /* folio is wholly or partially inside EOF */
2631fe181377SMatthew Wilcox (Oracle) if (folio_pos(folio) + end > size)
2632fe181377SMatthew Wilcox (Oracle) end = size - folio_pos(folio);
263354171690SDavid Chinner
2634fe181377SMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2635a524fcfeSBean Huo if (unlikely(ret))
263624da4fabSJan Kara goto out_unlock;
2637a524fcfeSBean Huo
2638a524fcfeSBean Huo __block_commit_write(folio, 0, end);
2639a524fcfeSBean Huo
2640fe181377SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
2641fe181377SMatthew Wilcox (Oracle) folio_wait_stable(folio);
264224da4fabSJan Kara return 0;
264324da4fabSJan Kara out_unlock:
2644fe181377SMatthew Wilcox (Oracle) folio_unlock(folio);
264554171690SDavid Chinner return ret;
264654171690SDavid Chinner }
26471fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
26481da177e4SLinus Torvalds
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)26491da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
26501da177e4SLinus Torvalds loff_t from, get_block_t *get_block)
26511da177e4SLinus Torvalds {
265209cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT;
26531da177e4SLinus Torvalds unsigned blocksize;
265454b21a79SAndrew Morton sector_t iblock;
26556d68f644SMatthew Wilcox (Oracle) size_t offset, length, pos;
26561da177e4SLinus Torvalds struct inode *inode = mapping->host;
26576d68f644SMatthew Wilcox (Oracle) struct folio *folio;
26581da177e4SLinus Torvalds struct buffer_head *bh;
2659dc7cb2d2SJiapeng Chong int err = 0;
26601da177e4SLinus Torvalds
266193407472SFabian Frederick blocksize = i_blocksize(inode);
26626d68f644SMatthew Wilcox (Oracle) length = from & (blocksize - 1);
26631da177e4SLinus Torvalds
26641da177e4SLinus Torvalds /* Block boundary? Nothing to do */
26651da177e4SLinus Torvalds if (!length)
26661da177e4SLinus Torvalds return 0;
26671da177e4SLinus Torvalds
26681da177e4SLinus Torvalds length = blocksize - length;
26694b04646cSMatthew Wilcox (Oracle) iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
26701da177e4SLinus Torvalds
26716d68f644SMatthew Wilcox (Oracle) folio = filemap_grab_folio(mapping, index);
26726d68f644SMatthew Wilcox (Oracle) if (IS_ERR(folio))
26736d68f644SMatthew Wilcox (Oracle) return PTR_ERR(folio);
26741da177e4SLinus Torvalds
26756d68f644SMatthew Wilcox (Oracle) bh = folio_buffers(folio);
26763decb856SMatthew Wilcox (Oracle) if (!bh)
26770a88810dSMatthew Wilcox (Oracle) bh = create_empty_buffers(folio, blocksize, 0);
26781da177e4SLinus Torvalds
26791da177e4SLinus Torvalds /* Find the buffer that contains "offset" */
26806d68f644SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, from);
26811da177e4SLinus Torvalds pos = blocksize;
26821da177e4SLinus Torvalds while (offset >= pos) {
26831da177e4SLinus Torvalds bh = bh->b_this_page;
26841da177e4SLinus Torvalds iblock++;
26851da177e4SLinus Torvalds pos += blocksize;
26861da177e4SLinus Torvalds }
26871da177e4SLinus Torvalds
26881da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2689b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
26901da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0);
26911da177e4SLinus Torvalds if (err)
26921da177e4SLinus Torvalds goto unlock;
26931da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */
26941da177e4SLinus Torvalds if (!buffer_mapped(bh))
26951da177e4SLinus Torvalds goto unlock;
26961da177e4SLinus Torvalds }
26971da177e4SLinus Torvalds
26981da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */
26996d68f644SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
27001da177e4SLinus Torvalds set_buffer_uptodate(bh);
27011da177e4SLinus Torvalds
270233a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2703e7ea1129SZhang Yi err = bh_read(bh, 0);
27041da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */
2705e7ea1129SZhang Yi if (err < 0)
27061da177e4SLinus Torvalds goto unlock;
27071da177e4SLinus Torvalds }
27081da177e4SLinus Torvalds
27096d68f644SMatthew Wilcox (Oracle) folio_zero_range(folio, offset, length);
27101da177e4SLinus Torvalds mark_buffer_dirty(bh);
27111da177e4SLinus Torvalds
27121da177e4SLinus Torvalds unlock:
27136d68f644SMatthew Wilcox (Oracle) folio_unlock(folio);
27146d68f644SMatthew Wilcox (Oracle) folio_put(folio);
2715dc7cb2d2SJiapeng Chong
27161da177e4SLinus Torvalds return err;
27171da177e4SLinus Torvalds }
27181fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
27191da177e4SLinus Torvalds
27201da177e4SLinus Torvalds /*
27211da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces
27221da177e4SLinus Torvalds */
block_write_full_folio(struct folio * folio,struct writeback_control * wbc,void * get_block)272317bf23a9SMatthew Wilcox (Oracle) int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
272417bf23a9SMatthew Wilcox (Oracle) void *get_block)
27251da177e4SLinus Torvalds {
2726bb0ea598SMatthew Wilcox (Oracle) struct inode * const inode = folio->mapping->host;
27271da177e4SLinus Torvalds loff_t i_size = i_size_read(inode);
27281da177e4SLinus Torvalds
2729bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully inside i_size? */
2730bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) + folio_size(folio) <= i_size)
273114059f66SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc);
27321da177e4SLinus Torvalds
2733bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully outside i_size? (truncate in progress) */
2734bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) >= i_size) {
273553418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
27361da177e4SLinus Torvalds return 0; /* don't care */
27371da177e4SLinus Torvalds }
27381da177e4SLinus Torvalds
27391da177e4SLinus Torvalds /*
2740bb0ea598SMatthew Wilcox (Oracle) * The folio straddles i_size. It must be zeroed out on each and every
27412a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped
27421da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of
27431da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and
27441da177e4SLinus Torvalds * writes to that region are not written out to the file."
27451da177e4SLinus Torvalds */
2746bb0ea598SMatthew Wilcox (Oracle) folio_zero_segment(folio, offset_in_folio(folio, i_size),
2747bb0ea598SMatthew Wilcox (Oracle) folio_size(folio));
274814059f66SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc);
274935c80d5fSChris Mason }
275035c80d5fSChris Mason
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)27511da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
27521da177e4SLinus Torvalds get_block_t *get_block)
27531da177e4SLinus Torvalds {
27541da177e4SLinus Torvalds struct inode *inode = mapping->host;
27552a527d68SAlexander Potapenko struct buffer_head tmp = {
27562a527d68SAlexander Potapenko .b_size = i_blocksize(inode),
27572a527d68SAlexander Potapenko };
27582a527d68SAlexander Potapenko
27591da177e4SLinus Torvalds get_block(inode, block, &tmp, 0);
27601da177e4SLinus Torvalds return tmp.b_blocknr;
27611da177e4SLinus Torvalds }
27621fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
27631da177e4SLinus Torvalds
end_bio_bh_io_sync(struct bio * bio)27644246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
27651da177e4SLinus Torvalds {
27661da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private;
27671da177e4SLinus Torvalds
2768b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET)))
276908bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state);
277008bafc03SKeith Mannthey
27714e4cbee9SChristoph Hellwig bh->b_end_io(bh, !bio->bi_status);
27721da177e4SLinus Torvalds bio_put(bio);
27731da177e4SLinus Torvalds }
27741da177e4SLinus Torvalds
submit_bh_wbc(blk_opf_t opf,struct buffer_head * bh,enum rw_hint write_hint,struct writeback_control * wbc)27755bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
277644981351SBart Van Assche enum rw_hint write_hint,
27771420c4a5SBart Van Assche struct writeback_control *wbc)
27781da177e4SLinus Torvalds {
27791420c4a5SBart Van Assche const enum req_op op = opf & REQ_OP_MASK;
27801da177e4SLinus Torvalds struct bio *bio;
27811da177e4SLinus Torvalds
27821da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh));
27831da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh));
27841da177e4SLinus Torvalds BUG_ON(!bh->b_end_io);
27858fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh));
27868fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh));
27871da177e4SLinus Torvalds
278848fd4f93SJens Axboe /*
278948fd4f93SJens Axboe * Only clear out a write error when rewriting
27901da177e4SLinus Torvalds */
27912a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
27921da177e4SLinus Torvalds clear_buffer_write_io_error(bh);
27931da177e4SLinus Torvalds
279407888c66SChristoph Hellwig if (buffer_meta(bh))
27951420c4a5SBart Van Assche opf |= REQ_META;
279607888c66SChristoph Hellwig if (buffer_prio(bh))
27971420c4a5SBart Van Assche opf |= REQ_PRIO;
279807888c66SChristoph Hellwig
27991420c4a5SBart Van Assche bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
28001da177e4SLinus Torvalds
28014f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
28024f74d15fSEric Biggers
28034f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
280444981351SBart Van Assche bio->bi_write_hint = write_hint;
28051da177e4SLinus Torvalds
2806741af75dSJohannes Thumshirn __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
28071da177e4SLinus Torvalds
28081da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync;
28091da177e4SLinus Torvalds bio->bi_private = bh;
28101da177e4SLinus Torvalds
281183c9c547SMing Lei /* Take care of bh's that straddle the end of the device */
281283c9c547SMing Lei guard_bio_eod(bio);
281383c9c547SMing Lei
2814fd42df30SDennis Zhou if (wbc) {
2815fd42df30SDennis Zhou wbc_init_bio(wbc, bio);
281634e51a5eSTejun Heo wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2817fd42df30SDennis Zhou }
2818fd42df30SDennis Zhou
28194e49ea4aSMike Christie submit_bio(bio);
28201da177e4SLinus Torvalds }
2821bafc0dbaSTejun Heo
submit_bh(blk_opf_t opf,struct buffer_head * bh)28225bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh)
282371368511SDarrick J. Wong {
282444981351SBart Van Assche submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
282571368511SDarrick J. Wong }
28261fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
28271da177e4SLinus Torvalds
write_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)28283ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28299cb569d6SChristoph Hellwig {
28309cb569d6SChristoph Hellwig lock_buffer(bh);
28319cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) {
28329cb569d6SChristoph Hellwig unlock_buffer(bh);
28339cb569d6SChristoph Hellwig return;
28349cb569d6SChristoph Hellwig }
28359cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync;
28369cb569d6SChristoph Hellwig get_bh(bh);
28371420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | op_flags, bh);
28389cb569d6SChristoph Hellwig }
28399cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
28409cb569d6SChristoph Hellwig
28411da177e4SLinus Torvalds /*
28421da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O
28431da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on
28441da177e4SLinus Torvalds * the buffer_head.
28451da177e4SLinus Torvalds */
__sync_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)28463ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28471da177e4SLinus Torvalds {
28481da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1);
28491da177e4SLinus Torvalds lock_buffer(bh);
28501da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
2851377254b2SXianting Tian /*
2852377254b2SXianting Tian * The bh should be mapped, but it might not be if the
2853377254b2SXianting Tian * device was hot-removed. Not much we can do but fail the I/O.
2854377254b2SXianting Tian */
2855377254b2SXianting Tian if (!buffer_mapped(bh)) {
2856377254b2SXianting Tian unlock_buffer(bh);
2857377254b2SXianting Tian return -EIO;
2858377254b2SXianting Tian }
2859377254b2SXianting Tian
28601da177e4SLinus Torvalds get_bh(bh);
28611da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync;
2862ab620620SRitesh Harjani (IBM) submit_bh(REQ_OP_WRITE | op_flags, bh);
28631da177e4SLinus Torvalds wait_on_buffer(bh);
2864ab620620SRitesh Harjani (IBM) if (!buffer_uptodate(bh))
2865ab620620SRitesh Harjani (IBM) return -EIO;
28661da177e4SLinus Torvalds } else {
28671da177e4SLinus Torvalds unlock_buffer(bh);
28681da177e4SLinus Torvalds }
2869ab620620SRitesh Harjani (IBM) return 0;
28701da177e4SLinus Torvalds }
287187e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
287287e99511SChristoph Hellwig
sync_dirty_buffer(struct buffer_head * bh)287387e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
287487e99511SChristoph Hellwig {
287570fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC);
287687e99511SChristoph Hellwig }
28771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28781da177e4SLinus Torvalds
buffer_busy(struct buffer_head * bh)28791da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28801da177e4SLinus Torvalds {
28811da177e4SLinus Torvalds return atomic_read(&bh->b_count) |
28821da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28831da177e4SLinus Torvalds }
28841da177e4SLinus Torvalds
288564394763SMatthew Wilcox (Oracle) static bool
drop_buffers(struct folio * folio,struct buffer_head ** buffers_to_free)288664394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
28871da177e4SLinus Torvalds {
288864394763SMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio);
28891da177e4SLinus Torvalds struct buffer_head *bh;
28901da177e4SLinus Torvalds
28911da177e4SLinus Torvalds bh = head;
28921da177e4SLinus Torvalds do {
28931da177e4SLinus Torvalds if (buffer_busy(bh))
28941da177e4SLinus Torvalds goto failed;
28951da177e4SLinus Torvalds bh = bh->b_this_page;
28961da177e4SLinus Torvalds } while (bh != head);
28971da177e4SLinus Torvalds
28981da177e4SLinus Torvalds do {
28991da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
29001da177e4SLinus Torvalds
2901535ee2fbSJan Kara if (bh->b_assoc_map)
29021da177e4SLinus Torvalds __remove_assoc_queue(bh);
29031da177e4SLinus Torvalds bh = next;
29041da177e4SLinus Torvalds } while (bh != head);
29051da177e4SLinus Torvalds *buffers_to_free = head;
290664394763SMatthew Wilcox (Oracle) folio_detach_private(folio);
290764394763SMatthew Wilcox (Oracle) return true;
29081da177e4SLinus Torvalds failed:
290964394763SMatthew Wilcox (Oracle) return false;
29101da177e4SLinus Torvalds }
29111da177e4SLinus Torvalds
2912b1888d14SMatthew Wilcox (Oracle) /**
2913b1888d14SMatthew Wilcox (Oracle) * try_to_free_buffers - Release buffers attached to this folio.
2914b1888d14SMatthew Wilcox (Oracle) * @folio: The folio.
2915b1888d14SMatthew Wilcox (Oracle) *
2916b1888d14SMatthew Wilcox (Oracle) * If any buffers are in use (dirty, under writeback, elevated refcount),
2917b1888d14SMatthew Wilcox (Oracle) * no buffers will be freed.
2918b1888d14SMatthew Wilcox (Oracle) *
2919b1888d14SMatthew Wilcox (Oracle) * If the folio is dirty but all the buffers are clean then we need to
2920b1888d14SMatthew Wilcox (Oracle) * be sure to mark the folio clean as well. This is because the folio
2921b1888d14SMatthew Wilcox (Oracle) * may be against a block device, and a later reattachment of buffers
2922b1888d14SMatthew Wilcox (Oracle) * to a dirty folio will set *all* buffers dirty. Which would corrupt
2923b1888d14SMatthew Wilcox (Oracle) * filesystem data on the same device.
2924b1888d14SMatthew Wilcox (Oracle) *
2925b1888d14SMatthew Wilcox (Oracle) * The same applies to regular filesystem folios: if all the buffers are
2926b1888d14SMatthew Wilcox (Oracle) * clean then we set the folio clean and proceed. To do that, we require
2927b1888d14SMatthew Wilcox (Oracle) * total exclusion from block_dirty_folio(). That is obtained with
2928b1888d14SMatthew Wilcox (Oracle) * i_private_lock.
2929b1888d14SMatthew Wilcox (Oracle) *
2930b1888d14SMatthew Wilcox (Oracle) * Exclusion against try_to_free_buffers may be obtained by either
2931b1888d14SMatthew Wilcox (Oracle) * locking the folio or by holding its mapping's i_private_lock.
2932b1888d14SMatthew Wilcox (Oracle) *
2933b1888d14SMatthew Wilcox (Oracle) * Context: Process context. @folio must be locked. Will not sleep.
2934b1888d14SMatthew Wilcox (Oracle) * Return: true if all buffers attached to this folio were freed.
2935b1888d14SMatthew Wilcox (Oracle) */
try_to_free_buffers(struct folio * folio)293668189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
29371da177e4SLinus Torvalds {
293868189fefSMatthew Wilcox (Oracle) struct address_space * const mapping = folio->mapping;
29391da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL;
294068189fefSMatthew Wilcox (Oracle) bool ret = 0;
29411da177e4SLinus Torvalds
294268189fefSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
294368189fefSMatthew Wilcox (Oracle) if (folio_test_writeback(folio))
294468189fefSMatthew Wilcox (Oracle) return false;
29451da177e4SLinus Torvalds
29461da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */
294764394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free);
29481da177e4SLinus Torvalds goto out;
29491da177e4SLinus Torvalds }
29501da177e4SLinus Torvalds
2951600f111eSMatthew Wilcox (Oracle) spin_lock(&mapping->i_private_lock);
295264394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free);
2953ecdfc978SLinus Torvalds
2954ecdfc978SLinus Torvalds /*
2955ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3)
295668189fefSMatthew Wilcox (Oracle) * then we can have clean buffers against a dirty folio. We
295768189fefSMatthew Wilcox (Oracle) * clean the folio here; otherwise the VM will never notice
2958ecdfc978SLinus Torvalds * that the filesystem did any IO at all.
2959ecdfc978SLinus Torvalds *
2960ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all
296168189fefSMatthew Wilcox (Oracle) * the folio's buffers clean. We discover that here and clean
296268189fefSMatthew Wilcox (Oracle) * the folio also.
296387df7241SNick Piggin *
2964600f111eSMatthew Wilcox (Oracle) * i_private_lock must be held over this entire operation in order
2965e621900aSMatthew Wilcox (Oracle) * to synchronise against block_dirty_folio and prevent the
296687df7241SNick Piggin * dirty bit from being lost.
2967ecdfc978SLinus Torvalds */
296811f81becSTejun Heo if (ret)
296968189fefSMatthew Wilcox (Oracle) folio_cancel_dirty(folio);
2970600f111eSMatthew Wilcox (Oracle) spin_unlock(&mapping->i_private_lock);
29711da177e4SLinus Torvalds out:
29721da177e4SLinus Torvalds if (buffers_to_free) {
29731da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free;
29741da177e4SLinus Torvalds
29751da177e4SLinus Torvalds do {
29761da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
29771da177e4SLinus Torvalds free_buffer_head(bh);
29781da177e4SLinus Torvalds bh = next;
29791da177e4SLinus Torvalds } while (bh != buffers_to_free);
29801da177e4SLinus Torvalds }
29811da177e4SLinus Torvalds return ret;
29821da177e4SLinus Torvalds }
29831da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29841da177e4SLinus Torvalds
29851da177e4SLinus Torvalds /*
29861da177e4SLinus Torvalds * Buffer-head allocation
29871da177e4SLinus Torvalds */
298868279f9cSAlexey Dobriyan static struct kmem_cache *bh_cachep __ro_after_init;
29891da177e4SLinus Torvalds
29901da177e4SLinus Torvalds /*
29911da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start
29921da177e4SLinus Torvalds * stripping them in writeback.
29931da177e4SLinus Torvalds */
299468279f9cSAlexey Dobriyan static unsigned long max_buffer_heads __ro_after_init;
29951da177e4SLinus Torvalds
29961da177e4SLinus Torvalds int buffer_heads_over_limit;
29971da177e4SLinus Torvalds
29981da177e4SLinus Torvalds struct bh_accounting {
29991da177e4SLinus Torvalds int nr; /* Number of live bh's */
30001da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */
30011da177e4SLinus Torvalds };
30021da177e4SLinus Torvalds
30031da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
30041da177e4SLinus Torvalds
recalc_bh_state(void)30051da177e4SLinus Torvalds static void recalc_bh_state(void)
30061da177e4SLinus Torvalds {
30071da177e4SLinus Torvalds int i;
30081da177e4SLinus Torvalds int tot = 0;
30091da177e4SLinus Torvalds
3010ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
30111da177e4SLinus Torvalds return;
3012c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0);
30138a143426SEric Dumazet for_each_online_cpu(i)
30141da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr;
30151da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads);
30161da177e4SLinus Torvalds }
30171da177e4SLinus Torvalds
alloc_buffer_head(gfp_t gfp_flags)3018dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
30191da177e4SLinus Torvalds {
3020019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
30211da177e4SLinus Torvalds if (ret) {
3022a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers);
3023f1e67e35SThomas Gleixner spin_lock_init(&ret->b_uptodate_lock);
3024c7b92516SChristoph Lameter preempt_disable();
3025c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr);
30261da177e4SLinus Torvalds recalc_bh_state();
3027c7b92516SChristoph Lameter preempt_enable();
30281da177e4SLinus Torvalds }
30291da177e4SLinus Torvalds return ret;
30301da177e4SLinus Torvalds }
30311da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
30321da177e4SLinus Torvalds
free_buffer_head(struct buffer_head * bh)30331da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
30341da177e4SLinus Torvalds {
30351da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers));
30361da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh);
3037c7b92516SChristoph Lameter preempt_disable();
3038c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr);
30391da177e4SLinus Torvalds recalc_bh_state();
3040c7b92516SChristoph Lameter preempt_enable();
30411da177e4SLinus Torvalds }
30421da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
30431da177e4SLinus Torvalds
buffer_exit_cpu_dead(unsigned int cpu)3044fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
30451da177e4SLinus Torvalds {
30461da177e4SLinus Torvalds int i;
30471da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30481da177e4SLinus Torvalds
30491da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) {
30501da177e4SLinus Torvalds brelse(b->bhs[i]);
30511da177e4SLinus Torvalds b->bhs[i] = NULL;
30521da177e4SLinus Torvalds }
3053c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
30548a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0;
3055fc4d24c9SSebastian Andrzej Siewior return 0;
30561da177e4SLinus Torvalds }
30571da177e4SLinus Torvalds
3058389d1b08SAneesh Kumar K.V /**
3059a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate
3060389d1b08SAneesh Kumar K.V * @bh: struct buffer_head
3061389d1b08SAneesh Kumar K.V *
3062389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false,
3063389d1b08SAneesh Kumar K.V * with the buffer locked, if not.
3064389d1b08SAneesh Kumar K.V */
bh_uptodate_or_lock(struct buffer_head * bh)3065389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3066389d1b08SAneesh Kumar K.V {
3067389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) {
3068389d1b08SAneesh Kumar K.V lock_buffer(bh);
3069389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh))
3070389d1b08SAneesh Kumar K.V return 0;
3071389d1b08SAneesh Kumar K.V unlock_buffer(bh);
3072389d1b08SAneesh Kumar K.V }
3073389d1b08SAneesh Kumar K.V return 1;
3074389d1b08SAneesh Kumar K.V }
3075389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3076389d1b08SAneesh Kumar K.V
3077389d1b08SAneesh Kumar K.V /**
3078fdee117eSZhang Yi * __bh_read - Submit read for a locked buffer
3079389d1b08SAneesh Kumar K.V * @bh: struct buffer_head
3080fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3081fdee117eSZhang Yi * @wait: wait until reading finish
3082389d1b08SAneesh Kumar K.V *
3083fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error.
3084389d1b08SAneesh Kumar K.V */
__bh_read(struct buffer_head * bh,blk_opf_t op_flags,bool wait)3085fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3086389d1b08SAneesh Kumar K.V {
3087fdee117eSZhang Yi int ret = 0;
3088389d1b08SAneesh Kumar K.V
3089fdee117eSZhang Yi BUG_ON(!buffer_locked(bh));
3090389d1b08SAneesh Kumar K.V
3091389d1b08SAneesh Kumar K.V get_bh(bh);
3092389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync;
3093fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh);
3094fdee117eSZhang Yi if (wait) {
3095389d1b08SAneesh Kumar K.V wait_on_buffer(bh);
3096fdee117eSZhang Yi if (!buffer_uptodate(bh))
3097fdee117eSZhang Yi ret = -EIO;
3098389d1b08SAneesh Kumar K.V }
3099fdee117eSZhang Yi return ret;
3100fdee117eSZhang Yi }
3101fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read);
3102fdee117eSZhang Yi
3103fdee117eSZhang Yi /**
3104fdee117eSZhang Yi * __bh_read_batch - Submit read for a batch of unlocked buffers
3105fdee117eSZhang Yi * @nr: entry number of the buffer batch
3106fdee117eSZhang Yi * @bhs: a batch of struct buffer_head
3107fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3108fdee117eSZhang Yi * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3109fdee117eSZhang Yi * buffer that cannot lock.
3110fdee117eSZhang Yi *
3111fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error.
3112fdee117eSZhang Yi */
__bh_read_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags,bool force_lock)3113fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
3114fdee117eSZhang Yi blk_opf_t op_flags, bool force_lock)
3115fdee117eSZhang Yi {
3116fdee117eSZhang Yi int i;
3117fdee117eSZhang Yi
3118fdee117eSZhang Yi for (i = 0; i < nr; i++) {
3119fdee117eSZhang Yi struct buffer_head *bh = bhs[i];
3120fdee117eSZhang Yi
3121fdee117eSZhang Yi if (buffer_uptodate(bh))
3122fdee117eSZhang Yi continue;
3123fdee117eSZhang Yi
3124fdee117eSZhang Yi if (force_lock)
3125fdee117eSZhang Yi lock_buffer(bh);
3126fdee117eSZhang Yi else
3127fdee117eSZhang Yi if (!trylock_buffer(bh))
3128fdee117eSZhang Yi continue;
3129fdee117eSZhang Yi
3130fdee117eSZhang Yi if (buffer_uptodate(bh)) {
3131fdee117eSZhang Yi unlock_buffer(bh);
3132fdee117eSZhang Yi continue;
3133fdee117eSZhang Yi }
3134fdee117eSZhang Yi
3135fdee117eSZhang Yi bh->b_end_io = end_buffer_read_sync;
3136fdee117eSZhang Yi get_bh(bh);
3137fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh);
3138fdee117eSZhang Yi }
3139fdee117eSZhang Yi }
3140fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch);
3141389d1b08SAneesh Kumar K.V
buffer_init(void)31421da177e4SLinus Torvalds void __init buffer_init(void)
31431da177e4SLinus Torvalds {
314443be594aSZhang Yanfei unsigned long nrpages;
3145fc4d24c9SSebastian Andrzej Siewior int ret;
31461da177e4SLinus Torvalds
3147de8a3207SKunwu Chan bh_cachep = KMEM_CACHE(buffer_head,
3148c997d683SChengming Zhou SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
31491da177e4SLinus Torvalds /*
31501da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL
31511da177e4SLinus Torvalds */
31521da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100;
31531da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3154fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3155fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead);
3156fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0);
31571da177e4SLinus Torvalds }
3158