xref: /linux/fs/buffer.c (revision 808441943f6b817f4836752c6e0d1c07507f375e)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/fs/buffer.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
151da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
161da177e4SLinus Torvalds  *
171da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
514fa512ceSEric Biggers #include <linux/fsverity.h>
528a237adfSMarcelo Tosatti #include <linux/sched/isolation.h>
531da177e4SLinus Torvalds 
542b211dc0SBen Dooks #include "internal.h"
552b211dc0SBen Dooks 
561da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
575bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
581420c4a5SBart Van Assche 			  struct writeback_control *wbc);
591da177e4SLinus Torvalds 
601da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
611da177e4SLinus Torvalds 
62f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
63f0059afdSTejun Heo {
645305cb83STejun Heo 	trace_block_touch_buffer(bh);
6503c5f331SMatthew Wilcox (Oracle) 	folio_mark_accessed(bh->b_folio);
66f0059afdSTejun Heo }
67f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
68f0059afdSTejun Heo 
69fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
701da177e4SLinus Torvalds {
7174316201SNeilBrown 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
721da177e4SLinus Torvalds }
731da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
741da177e4SLinus Torvalds 
75fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
761da177e4SLinus Torvalds {
7751b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
784e857c58SPeter Zijlstra 	smp_mb__after_atomic();
791da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
801da177e4SLinus Torvalds }
811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
821da177e4SLinus Torvalds 
831da177e4SLinus Torvalds /*
84520f301cSMatthew Wilcox (Oracle)  * Returns if the folio has dirty or writeback buffers. If all the buffers
85520f301cSMatthew Wilcox (Oracle)  * are unlocked and clean then the folio_test_dirty information is stale. If
86520f301cSMatthew Wilcox (Oracle)  * any of the buffers are locked, it is assumed they are locked for IO.
87b4597226SMel Gorman  */
88520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
89b4597226SMel Gorman 				     bool *dirty, bool *writeback)
90b4597226SMel Gorman {
91b4597226SMel Gorman 	struct buffer_head *head, *bh;
92b4597226SMel Gorman 	*dirty = false;
93b4597226SMel Gorman 	*writeback = false;
94b4597226SMel Gorman 
95520f301cSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
96b4597226SMel Gorman 
97520f301cSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
98520f301cSMatthew Wilcox (Oracle) 	if (!head)
99b4597226SMel Gorman 		return;
100b4597226SMel Gorman 
101520f301cSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
102b4597226SMel Gorman 		*writeback = true;
103b4597226SMel Gorman 
104b4597226SMel Gorman 	bh = head;
105b4597226SMel Gorman 	do {
106b4597226SMel Gorman 		if (buffer_locked(bh))
107b4597226SMel Gorman 			*writeback = true;
108b4597226SMel Gorman 
109b4597226SMel Gorman 		if (buffer_dirty(bh))
110b4597226SMel Gorman 			*dirty = true;
111b4597226SMel Gorman 
112b4597226SMel Gorman 		bh = bh->b_this_page;
113b4597226SMel Gorman 	} while (bh != head);
114b4597226SMel Gorman }
115b4597226SMel Gorman 
116b4597226SMel Gorman /*
1171da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
1181da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
1191da177e4SLinus Torvalds  * if you want to preserve its state.
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1221da177e4SLinus Torvalds {
12374316201SNeilBrown 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1241da177e4SLinus Torvalds }
1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1261da177e4SLinus Torvalds 
127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1281da177e4SLinus Torvalds {
129432f16e6SRobert Elliott 	if (!test_bit(BH_Quiet, &bh->b_state))
130432f16e6SRobert Elliott 		printk_ratelimited(KERN_ERR
131a1c6f057SDmitry Monakhov 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
132a1c6f057SDmitry Monakhov 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds /*
13668671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
13768671f35SDmitry Monakhov  * unlocking it.
13868671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13968671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
14068671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
14168671f35SDmitry Monakhov  * itself.
1421da177e4SLinus Torvalds  */
14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds 	if (uptodate) {
1461da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds 	} else {
14870246286SChristoph Hellwig 		/* This happens, due to failed read-ahead attempts. */
1491da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1501da177e4SLinus Torvalds 	}
1511da177e4SLinus Torvalds 	unlock_buffer(bh);
15268671f35SDmitry Monakhov }
15368671f35SDmitry Monakhov 
15468671f35SDmitry Monakhov /*
15568671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
15679f59784SZhang Yi  * unlock the buffer.
15768671f35SDmitry Monakhov  */
15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15968671f35SDmitry Monakhov {
16068671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1611da177e4SLinus Torvalds 	put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds 	if (uptodate) {
1681da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1691da177e4SLinus Torvalds 	} else {
170b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost sync page write");
17187354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
1721da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1731da177e4SLinus Torvalds 	}
1741da177e4SLinus Torvalds 	unlock_buffer(bh);
1751da177e4SLinus Torvalds 	put_bh(bh);
1761da177e4SLinus Torvalds }
1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1811da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1821da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1831da177e4SLinus Torvalds  * private_lock.
1841da177e4SLinus Torvalds  *
185b93b0163SMatthew Wilcox  * Hack idea: for the blockdev mapping, private_lock contention
1861da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
187b93b0163SMatthew Wilcox  * succeeds, there is no need to take private_lock.
1881da177e4SLinus Torvalds  */
1891da177e4SLinus Torvalds static struct buffer_head *
190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1911da177e4SLinus Torvalds {
1921da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1931da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1941da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1951da177e4SLinus Torvalds 	pgoff_t index;
1961da177e4SLinus Torvalds 	struct buffer_head *bh;
1971da177e4SLinus Torvalds 	struct buffer_head *head;
198eee25182SMatthew Wilcox (Oracle) 	struct folio *folio;
1991da177e4SLinus Torvalds 	int all_mapped = 1;
20043636c80STetsuo Handa 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
2011da177e4SLinus Torvalds 
20209cbfeafSKirill A. Shutemov 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
203eee25182SMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204eee25182SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
2051da177e4SLinus Torvalds 		goto out;
2061da177e4SLinus Torvalds 
2071da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
208eee25182SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
209eee25182SMatthew Wilcox (Oracle) 	if (!head)
2101da177e4SLinus Torvalds 		goto out_unlock;
2111da177e4SLinus Torvalds 	bh = head;
2121da177e4SLinus Torvalds 	do {
21397f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
21497f76d3dSNikanth Karthikesan 			all_mapped = 0;
21597f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2161da177e4SLinus Torvalds 			ret = bh;
2171da177e4SLinus Torvalds 			get_bh(bh);
2181da177e4SLinus Torvalds 			goto out_unlock;
2191da177e4SLinus Torvalds 		}
2201da177e4SLinus Torvalds 		bh = bh->b_this_page;
2211da177e4SLinus Torvalds 	} while (bh != head);
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2241da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2251da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2261da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2271da177e4SLinus Torvalds 	 */
22843636c80STetsuo Handa 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
22943636c80STetsuo Handa 	if (all_mapped && __ratelimit(&last_warned)) {
23043636c80STetsuo Handa 		printk("__find_get_block_slow() failed. block=%llu, "
23143636c80STetsuo Handa 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
23243636c80STetsuo Handa 		       "device %pg blocksize: %d\n",
233205f87f6SBadari Pulavarty 		       (unsigned long long)block,
23443636c80STetsuo Handa 		       (unsigned long long)bh->b_blocknr,
23543636c80STetsuo Handa 		       bh->b_state, bh->b_size, bdev,
23672a2ebd8STao Ma 		       1 << bd_inode->i_blkbits);
2371da177e4SLinus Torvalds 	}
2381da177e4SLinus Torvalds out_unlock:
2391da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
240eee25182SMatthew Wilcox (Oracle) 	folio_put(folio);
2411da177e4SLinus Torvalds out:
2421da177e4SLinus Torvalds 	return ret;
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds 	unsigned long flags;
248a3972203SNick Piggin 	struct buffer_head *first;
2491da177e4SLinus Torvalds 	struct buffer_head *tmp;
2502e2dba15SMatthew Wilcox (Oracle) 	struct folio *folio;
2512e2dba15SMatthew Wilcox (Oracle) 	int folio_uptodate = 1;
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
2541da177e4SLinus Torvalds 
2552e2dba15SMatthew Wilcox (Oracle) 	folio = bh->b_folio;
2561da177e4SLinus Torvalds 	if (uptodate) {
2571da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
2581da177e4SLinus Torvalds 	} else {
2591da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
260b744c2acSRobert Elliott 		buffer_io_error(bh, ", async page read");
2612e2dba15SMatthew Wilcox (Oracle) 		folio_set_error(folio);
2621da177e4SLinus Torvalds 	}
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds 	/*
2651da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
2661da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
2671da177e4SLinus Torvalds 	 * decide that the page is now completely done.
2681da177e4SLinus Torvalds 	 */
2692e2dba15SMatthew Wilcox (Oracle) 	first = folio_buffers(folio);
270f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
2711da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
2721da177e4SLinus Torvalds 	unlock_buffer(bh);
2731da177e4SLinus Torvalds 	tmp = bh;
2741da177e4SLinus Torvalds 	do {
2751da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
2762e2dba15SMatthew Wilcox (Oracle) 			folio_uptodate = 0;
2771da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
2781da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
2791da177e4SLinus Torvalds 			goto still_busy;
2801da177e4SLinus Torvalds 		}
2811da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
2821da177e4SLinus Torvalds 	} while (tmp != bh);
283f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2841da177e4SLinus Torvalds 
2856ba924d3SMatthew Wilcox (Oracle) 	folio_end_read(folio, folio_uptodate);
2861da177e4SLinus Torvalds 	return;
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds still_busy:
289f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2901da177e4SLinus Torvalds 	return;
2911da177e4SLinus Torvalds }
2921da177e4SLinus Torvalds 
2934fa512ceSEric Biggers struct postprocess_bh_ctx {
29431fb992cSEric Biggers 	struct work_struct work;
29531fb992cSEric Biggers 	struct buffer_head *bh;
29631fb992cSEric Biggers };
29731fb992cSEric Biggers 
2984fa512ceSEric Biggers static void verify_bh(struct work_struct *work)
2994fa512ceSEric Biggers {
3004fa512ceSEric Biggers 	struct postprocess_bh_ctx *ctx =
3014fa512ceSEric Biggers 		container_of(work, struct postprocess_bh_ctx, work);
3024fa512ceSEric Biggers 	struct buffer_head *bh = ctx->bh;
3034fa512ceSEric Biggers 	bool valid;
3044fa512ceSEric Biggers 
3058b7d3fe9SEric Biggers 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
3064fa512ceSEric Biggers 	end_buffer_async_read(bh, valid);
3074fa512ceSEric Biggers 	kfree(ctx);
3084fa512ceSEric Biggers }
3094fa512ceSEric Biggers 
3104fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh)
3114fa512ceSEric Biggers {
3128b7d3fe9SEric Biggers 	struct folio *folio = bh->b_folio;
3138b7d3fe9SEric Biggers 	struct inode *inode = folio->mapping->host;
3144fa512ceSEric Biggers 
3154fa512ceSEric Biggers 	return fsverity_active(inode) &&
3164fa512ceSEric Biggers 		/* needed by ext4 */
3178b7d3fe9SEric Biggers 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
3184fa512ceSEric Biggers }
3194fa512ceSEric Biggers 
32031fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
32131fb992cSEric Biggers {
3224fa512ceSEric Biggers 	struct postprocess_bh_ctx *ctx =
3234fa512ceSEric Biggers 		container_of(work, struct postprocess_bh_ctx, work);
32431fb992cSEric Biggers 	struct buffer_head *bh = ctx->bh;
32531fb992cSEric Biggers 	int err;
32631fb992cSEric Biggers 
3279c7fb7f7SEric Biggers 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
3289c7fb7f7SEric Biggers 					       bh_offset(bh));
3294fa512ceSEric Biggers 	if (err == 0 && need_fsverity(bh)) {
3304fa512ceSEric Biggers 		/*
3314fa512ceSEric Biggers 		 * We use different work queues for decryption and for verity
3324fa512ceSEric Biggers 		 * because verity may require reading metadata pages that need
3334fa512ceSEric Biggers 		 * decryption, and we shouldn't recurse to the same workqueue.
3344fa512ceSEric Biggers 		 */
3354fa512ceSEric Biggers 		INIT_WORK(&ctx->work, verify_bh);
3364fa512ceSEric Biggers 		fsverity_enqueue_verify_work(&ctx->work);
3374fa512ceSEric Biggers 		return;
3384fa512ceSEric Biggers 	}
33931fb992cSEric Biggers 	end_buffer_async_read(bh, err == 0);
34031fb992cSEric Biggers 	kfree(ctx);
34131fb992cSEric Biggers }
34231fb992cSEric Biggers 
34331fb992cSEric Biggers /*
3442c69e205SMatthew Wilcox (Oracle)  * I/O completion handler for block_read_full_folio() - pages
34531fb992cSEric Biggers  * which come unlocked at the end of I/O.
34631fb992cSEric Biggers  */
34731fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
34831fb992cSEric Biggers {
3493822a7c4SLinus Torvalds 	struct inode *inode = bh->b_folio->mapping->host;
3504fa512ceSEric Biggers 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
3514fa512ceSEric Biggers 	bool verify = need_fsverity(bh);
3524fa512ceSEric Biggers 
3534fa512ceSEric Biggers 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
3544fa512ceSEric Biggers 	if (uptodate && (decrypt || verify)) {
3554fa512ceSEric Biggers 		struct postprocess_bh_ctx *ctx =
3564fa512ceSEric Biggers 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
35731fb992cSEric Biggers 
35831fb992cSEric Biggers 		if (ctx) {
35931fb992cSEric Biggers 			ctx->bh = bh;
3604fa512ceSEric Biggers 			if (decrypt) {
3614fa512ceSEric Biggers 				INIT_WORK(&ctx->work, decrypt_bh);
36231fb992cSEric Biggers 				fscrypt_enqueue_decrypt_work(&ctx->work);
3634fa512ceSEric Biggers 			} else {
3644fa512ceSEric Biggers 				INIT_WORK(&ctx->work, verify_bh);
3654fa512ceSEric Biggers 				fsverity_enqueue_verify_work(&ctx->work);
3664fa512ceSEric Biggers 			}
36731fb992cSEric Biggers 			return;
36831fb992cSEric Biggers 		}
36931fb992cSEric Biggers 		uptodate = 0;
37031fb992cSEric Biggers 	}
37131fb992cSEric Biggers 	end_buffer_async_read(bh, uptodate);
37231fb992cSEric Biggers }
37331fb992cSEric Biggers 
3741da177e4SLinus Torvalds /*
3751da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3761da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3771da177e4SLinus Torvalds  */
37835c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3791da177e4SLinus Torvalds {
3801da177e4SLinus Torvalds 	unsigned long flags;
381a3972203SNick Piggin 	struct buffer_head *first;
3821da177e4SLinus Torvalds 	struct buffer_head *tmp;
383743ed81eSMatthew Wilcox (Oracle) 	struct folio *folio;
3841da177e4SLinus Torvalds 
3851da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3861da177e4SLinus Torvalds 
387743ed81eSMatthew Wilcox (Oracle) 	folio = bh->b_folio;
3881da177e4SLinus Torvalds 	if (uptodate) {
3891da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3901da177e4SLinus Torvalds 	} else {
391b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost async page write");
39287354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
3931da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
394743ed81eSMatthew Wilcox (Oracle) 		folio_set_error(folio);
3951da177e4SLinus Torvalds 	}
3961da177e4SLinus Torvalds 
397743ed81eSMatthew Wilcox (Oracle) 	first = folio_buffers(folio);
398f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
399a3972203SNick Piggin 
4001da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4011da177e4SLinus Torvalds 	unlock_buffer(bh);
4021da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4031da177e4SLinus Torvalds 	while (tmp != bh) {
4041da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4051da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4061da177e4SLinus Torvalds 			goto still_busy;
4071da177e4SLinus Torvalds 		}
4081da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4091da177e4SLinus Torvalds 	}
410f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
411743ed81eSMatthew Wilcox (Oracle) 	folio_end_writeback(folio);
4121da177e4SLinus Torvalds 	return;
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds still_busy:
415f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
4161da177e4SLinus Torvalds 	return;
4171da177e4SLinus Torvalds }
4181fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write);
4191da177e4SLinus Torvalds 
4201da177e4SLinus Torvalds /*
4211da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4221da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4231da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4241da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4251da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4261da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4271da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4281da177e4SLinus Torvalds  *
4291da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4301da177e4SLinus Torvalds  * left.
4311da177e4SLinus Torvalds  *
4321da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4331da177e4SLinus Torvalds  * the buffers.
4341da177e4SLinus Torvalds  *
4351da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4361da177e4SLinus Torvalds  * page.
4371da177e4SLinus Torvalds  *
4381da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4391da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4401da177e4SLinus Torvalds  */
4411da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4421da177e4SLinus Torvalds {
44331fb992cSEric Biggers 	bh->b_end_io = end_buffer_async_read_io;
4441da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4451da177e4SLinus Torvalds }
4461da177e4SLinus Torvalds 
4471fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
44835c80d5fSChris Mason 					  bh_end_io_t *handler)
44935c80d5fSChris Mason {
45035c80d5fSChris Mason 	bh->b_end_io = handler;
45135c80d5fSChris Mason 	set_buffer_async_write(bh);
45235c80d5fSChris Mason }
45335c80d5fSChris Mason 
4541da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4551da177e4SLinus Torvalds {
45635c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4571da177e4SLinus Torvalds }
4581da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4591da177e4SLinus Torvalds 
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds /*
4621da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4631da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4641da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4651da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4661da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4671da177e4SLinus Torvalds  *
4681da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4691da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4701da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4711da177e4SLinus Torvalds  *
4721da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4731da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4741da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4751da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4761da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4771da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4781da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4791da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4801da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4811da177e4SLinus Torvalds  * ->private_lock.
4821da177e4SLinus Torvalds  *
4831da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4841da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4851da177e4SLinus Torvalds  *
4861da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4871da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4881da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4891da177e4SLinus Torvalds  * be true at clear_inode() time.
4901da177e4SLinus Torvalds  *
4911da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4921da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4931da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4941da177e4SLinus Torvalds  *
4951da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4961da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4971da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4981da177e4SLinus Torvalds  * queued up.
4991da177e4SLinus Torvalds  *
5001da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5011da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5021da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5031da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5041da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5051da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5061da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5071da177e4SLinus Torvalds  * b_inode back.
5081da177e4SLinus Torvalds  */
5091da177e4SLinus Torvalds 
5101da177e4SLinus Torvalds /*
5111da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5121da177e4SLinus Torvalds  */
513dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5141da177e4SLinus Torvalds {
5151da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
51658ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
51758ff407bSJan Kara 	bh->b_assoc_map = NULL;
5181da177e4SLinus Torvalds }
5191da177e4SLinus Torvalds 
5201da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5211da177e4SLinus Torvalds {
5221da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5231da177e4SLinus Torvalds }
5241da177e4SLinus Torvalds 
5251da177e4SLinus Torvalds /*
5261da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5271da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5281da177e4SLinus Torvalds  * writes to the disk.
5291da177e4SLinus Torvalds  *
53079f59784SZhang Yi  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
53179f59784SZhang Yi  * as you dirty the buffers, and then use osync_inode_buffers to wait for
5321da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5331da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5341da177e4SLinus Torvalds  */
5351da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5361da177e4SLinus Torvalds {
5371da177e4SLinus Torvalds 	struct buffer_head *bh;
5381da177e4SLinus Torvalds 	struct list_head *p;
5391da177e4SLinus Torvalds 	int err = 0;
5401da177e4SLinus Torvalds 
5411da177e4SLinus Torvalds 	spin_lock(lock);
5421da177e4SLinus Torvalds repeat:
5431da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5441da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5451da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5461da177e4SLinus Torvalds 			get_bh(bh);
5471da177e4SLinus Torvalds 			spin_unlock(lock);
5481da177e4SLinus Torvalds 			wait_on_buffer(bh);
5491da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5501da177e4SLinus Torvalds 				err = -EIO;
5511da177e4SLinus Torvalds 			brelse(bh);
5521da177e4SLinus Torvalds 			spin_lock(lock);
5531da177e4SLinus Torvalds 			goto repeat;
5541da177e4SLinus Torvalds 		}
5551da177e4SLinus Torvalds 	}
5561da177e4SLinus Torvalds 	spin_unlock(lock);
5571da177e4SLinus Torvalds 	return err;
5581da177e4SLinus Torvalds }
5591da177e4SLinus Torvalds 
5601da177e4SLinus Torvalds /**
56178a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
56267be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5631da177e4SLinus Torvalds  *
5641da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
5651da177e4SLinus Torvalds  * that I/O.
5661da177e4SLinus Torvalds  *
56767be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
56867be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
56967be2dd1SMartin Waitz  * a successful fsync().
5701da177e4SLinus Torvalds  */
5711da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5721da177e4SLinus Torvalds {
573252aa6f5SRafael Aquini 	struct address_space *buffer_mapping = mapping->private_data;
5741da177e4SLinus Torvalds 
5751da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
5761da177e4SLinus Torvalds 		return 0;
5771da177e4SLinus Torvalds 
5781da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
5791da177e4SLinus Torvalds 					&mapping->private_list);
5801da177e4SLinus Torvalds }
5811da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5821da177e4SLinus Torvalds 
58331b2ebc0SRitesh Harjani (IBM) /**
58431b2ebc0SRitesh Harjani (IBM)  * generic_buffers_fsync_noflush - generic buffer fsync implementation
58531b2ebc0SRitesh Harjani (IBM)  * for simple filesystems with no inode lock
58631b2ebc0SRitesh Harjani (IBM)  *
58731b2ebc0SRitesh Harjani (IBM)  * @file:	file to synchronize
58831b2ebc0SRitesh Harjani (IBM)  * @start:	start offset in bytes
58931b2ebc0SRitesh Harjani (IBM)  * @end:	end offset in bytes (inclusive)
59031b2ebc0SRitesh Harjani (IBM)  * @datasync:	only synchronize essential metadata if true
59131b2ebc0SRitesh Harjani (IBM)  *
59231b2ebc0SRitesh Harjani (IBM)  * This is a generic implementation of the fsync method for simple
59331b2ebc0SRitesh Harjani (IBM)  * filesystems which track all non-inode metadata in the buffers list
59431b2ebc0SRitesh Harjani (IBM)  * hanging off the address_space structure.
59531b2ebc0SRitesh Harjani (IBM)  */
59631b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
59731b2ebc0SRitesh Harjani (IBM) 				  bool datasync)
59831b2ebc0SRitesh Harjani (IBM) {
59931b2ebc0SRitesh Harjani (IBM) 	struct inode *inode = file->f_mapping->host;
60031b2ebc0SRitesh Harjani (IBM) 	int err;
60131b2ebc0SRitesh Harjani (IBM) 	int ret;
60231b2ebc0SRitesh Harjani (IBM) 
60331b2ebc0SRitesh Harjani (IBM) 	err = file_write_and_wait_range(file, start, end);
60431b2ebc0SRitesh Harjani (IBM) 	if (err)
60531b2ebc0SRitesh Harjani (IBM) 		return err;
60631b2ebc0SRitesh Harjani (IBM) 
60731b2ebc0SRitesh Harjani (IBM) 	ret = sync_mapping_buffers(inode->i_mapping);
60831b2ebc0SRitesh Harjani (IBM) 	if (!(inode->i_state & I_DIRTY_ALL))
60931b2ebc0SRitesh Harjani (IBM) 		goto out;
61031b2ebc0SRitesh Harjani (IBM) 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
61131b2ebc0SRitesh Harjani (IBM) 		goto out;
61231b2ebc0SRitesh Harjani (IBM) 
61331b2ebc0SRitesh Harjani (IBM) 	err = sync_inode_metadata(inode, 1);
61431b2ebc0SRitesh Harjani (IBM) 	if (ret == 0)
61531b2ebc0SRitesh Harjani (IBM) 		ret = err;
61631b2ebc0SRitesh Harjani (IBM) 
61731b2ebc0SRitesh Harjani (IBM) out:
61831b2ebc0SRitesh Harjani (IBM) 	/* check and advance again to catch errors after syncing out buffers */
61931b2ebc0SRitesh Harjani (IBM) 	err = file_check_and_advance_wb_err(file);
62031b2ebc0SRitesh Harjani (IBM) 	if (ret == 0)
62131b2ebc0SRitesh Harjani (IBM) 		ret = err;
62231b2ebc0SRitesh Harjani (IBM) 	return ret;
62331b2ebc0SRitesh Harjani (IBM) }
62431b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush);
62531b2ebc0SRitesh Harjani (IBM) 
62631b2ebc0SRitesh Harjani (IBM) /**
62731b2ebc0SRitesh Harjani (IBM)  * generic_buffers_fsync - generic buffer fsync implementation
62831b2ebc0SRitesh Harjani (IBM)  * for simple filesystems with no inode lock
62931b2ebc0SRitesh Harjani (IBM)  *
63031b2ebc0SRitesh Harjani (IBM)  * @file:	file to synchronize
63131b2ebc0SRitesh Harjani (IBM)  * @start:	start offset in bytes
63231b2ebc0SRitesh Harjani (IBM)  * @end:	end offset in bytes (inclusive)
63331b2ebc0SRitesh Harjani (IBM)  * @datasync:	only synchronize essential metadata if true
63431b2ebc0SRitesh Harjani (IBM)  *
63531b2ebc0SRitesh Harjani (IBM)  * This is a generic implementation of the fsync method for simple
63631b2ebc0SRitesh Harjani (IBM)  * filesystems which track all non-inode metadata in the buffers list
63731b2ebc0SRitesh Harjani (IBM)  * hanging off the address_space structure. This also makes sure that
63831b2ebc0SRitesh Harjani (IBM)  * a device cache flush operation is called at the end.
63931b2ebc0SRitesh Harjani (IBM)  */
64031b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
64131b2ebc0SRitesh Harjani (IBM) 			  bool datasync)
64231b2ebc0SRitesh Harjani (IBM) {
64331b2ebc0SRitesh Harjani (IBM) 	struct inode *inode = file->f_mapping->host;
64431b2ebc0SRitesh Harjani (IBM) 	int ret;
64531b2ebc0SRitesh Harjani (IBM) 
64631b2ebc0SRitesh Harjani (IBM) 	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
64731b2ebc0SRitesh Harjani (IBM) 	if (!ret)
64831b2ebc0SRitesh Harjani (IBM) 		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
64931b2ebc0SRitesh Harjani (IBM) 	return ret;
65031b2ebc0SRitesh Harjani (IBM) }
65131b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync);
65231b2ebc0SRitesh Harjani (IBM) 
6531da177e4SLinus Torvalds /*
6541da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6551da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6561da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6571da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6581da177e4SLinus Torvalds  */
6591da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6601da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6611da177e4SLinus Torvalds {
6621da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6631da177e4SLinus Torvalds 	if (bh) {
6641da177e4SLinus Torvalds 		if (buffer_dirty(bh))
665e7ea1129SZhang Yi 			write_dirty_buffer(bh, 0);
6661da177e4SLinus Torvalds 		put_bh(bh);
6671da177e4SLinus Torvalds 	}
6681da177e4SLinus Torvalds }
6691da177e4SLinus Torvalds 
6701da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
673abc8a8a2SMatthew Wilcox (Oracle) 	struct address_space *buffer_mapping = bh->b_folio->mapping;
6741da177e4SLinus Torvalds 
6751da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
676252aa6f5SRafael Aquini 	if (!mapping->private_data) {
677252aa6f5SRafael Aquini 		mapping->private_data = buffer_mapping;
6781da177e4SLinus Torvalds 	} else {
679252aa6f5SRafael Aquini 		BUG_ON(mapping->private_data != buffer_mapping);
6801da177e4SLinus Torvalds 	}
681535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6821da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6831da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6841da177e4SLinus Torvalds 				&mapping->private_list);
68558ff407bSJan Kara 		bh->b_assoc_map = mapping;
6861da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6871da177e4SLinus Torvalds 	}
6881da177e4SLinus Torvalds }
6891da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6901da177e4SLinus Torvalds 
6911da177e4SLinus Torvalds /*
6921da177e4SLinus Torvalds  * Add a page to the dirty page list.
6931da177e4SLinus Torvalds  *
6941da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6951da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6961da177e4SLinus Torvalds  *
6971da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6981da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6991da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
7001da177e4SLinus Torvalds  * dirty.
7011da177e4SLinus Torvalds  *
7021da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
7031da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
7041da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
7051da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
7061da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
7071da177e4SLinus Torvalds  * page on the dirty page list.
7081da177e4SLinus Torvalds  *
7091da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
7101da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
7111da177e4SLinus Torvalds  * added to the page after it was set dirty.
7121da177e4SLinus Torvalds  *
7131da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7141da177e4SLinus Torvalds  * address_space though.
7151da177e4SLinus Torvalds  */
716e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
7171da177e4SLinus Torvalds {
718e621900aSMatthew Wilcox (Oracle) 	struct buffer_head *head;
719e621900aSMatthew Wilcox (Oracle) 	bool newly_dirty;
7201da177e4SLinus Torvalds 
7211da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
722e621900aSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
723e621900aSMatthew Wilcox (Oracle) 	if (head) {
7241da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7251da177e4SLinus Torvalds 
7261da177e4SLinus Torvalds 		do {
7271da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7281da177e4SLinus Torvalds 			bh = bh->b_this_page;
7291da177e4SLinus Torvalds 		} while (bh != head);
7301da177e4SLinus Torvalds 	}
731c4843a75SGreg Thelen 	/*
732bcfe06bfSRoman Gushchin 	 * Lock out page's memcg migration to keep PageDirty
73381f8c3a4SJohannes Weiner 	 * synchronized with per-memcg dirty page counters.
734c4843a75SGreg Thelen 	 */
735e621900aSMatthew Wilcox (Oracle) 	folio_memcg_lock(folio);
736e621900aSMatthew Wilcox (Oracle) 	newly_dirty = !folio_test_set_dirty(folio);
7371da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7381da177e4SLinus Torvalds 
739a8e7d49aSLinus Torvalds 	if (newly_dirty)
740e621900aSMatthew Wilcox (Oracle) 		__folio_mark_dirty(folio, mapping, 1);
741c4843a75SGreg Thelen 
742e621900aSMatthew Wilcox (Oracle) 	folio_memcg_unlock(folio);
743c4843a75SGreg Thelen 
744c4843a75SGreg Thelen 	if (newly_dirty)
745c4843a75SGreg Thelen 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
746c4843a75SGreg Thelen 
747a8e7d49aSLinus Torvalds 	return newly_dirty;
7481da177e4SLinus Torvalds }
749e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
7501da177e4SLinus Torvalds 
7511da177e4SLinus Torvalds /*
7521da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7531da177e4SLinus Torvalds  *
7541da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7551da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7561da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7571da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7581da177e4SLinus Torvalds  *
7591da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7601da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7611da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7621da177e4SLinus Torvalds  *
7631da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7641da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7651da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7661da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7671da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7681da177e4SLinus Torvalds  * any newly dirty buffers for write.
7691da177e4SLinus Torvalds  */
7701da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7711da177e4SLinus Torvalds {
7721da177e4SLinus Torvalds 	struct buffer_head *bh;
7731da177e4SLinus Torvalds 	struct list_head tmp;
7747eaceaccSJens Axboe 	struct address_space *mapping;
7751da177e4SLinus Torvalds 	int err = 0, err2;
7764ee2491eSJens Axboe 	struct blk_plug plug;
7771da177e4SLinus Torvalds 
7781da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7794ee2491eSJens Axboe 	blk_start_plug(&plug);
7801da177e4SLinus Torvalds 
7811da177e4SLinus Torvalds 	spin_lock(lock);
7821da177e4SLinus Torvalds 	while (!list_empty(list)) {
7831da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
784535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
78558ff407bSJan Kara 		__remove_assoc_queue(bh);
786535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
787535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
788535ee2fbSJan Kara 		smp_mb();
7891da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7901da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
791535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7921da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7931da177e4SLinus Torvalds 				get_bh(bh);
7941da177e4SLinus Torvalds 				spin_unlock(lock);
7951da177e4SLinus Torvalds 				/*
7961da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7979cb569d6SChristoph Hellwig 				 * write_dirty_buffer() actually writes the
7989cb569d6SChristoph Hellwig 				 * current contents - it is a noop if I/O is
7999cb569d6SChristoph Hellwig 				 * still in flight on potentially older
8009cb569d6SChristoph Hellwig 				 * contents.
8011da177e4SLinus Torvalds 				 */
80270fd7614SChristoph Hellwig 				write_dirty_buffer(bh, REQ_SYNC);
8039cf6b720SJens Axboe 
8049cf6b720SJens Axboe 				/*
8059cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
8069cf6b720SJens Axboe 				 * that we will not run the very last mapping,
8079cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
8089cf6b720SJens Axboe 				 * through sync_buffer().
8099cf6b720SJens Axboe 				 */
8101da177e4SLinus Torvalds 				brelse(bh);
8111da177e4SLinus Torvalds 				spin_lock(lock);
8121da177e4SLinus Torvalds 			}
8131da177e4SLinus Torvalds 		}
8141da177e4SLinus Torvalds 	}
8151da177e4SLinus Torvalds 
8164ee2491eSJens Axboe 	spin_unlock(lock);
8174ee2491eSJens Axboe 	blk_finish_plug(&plug);
8184ee2491eSJens Axboe 	spin_lock(lock);
8194ee2491eSJens Axboe 
8201da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
8211da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
8221da177e4SLinus Torvalds 		get_bh(bh);
823535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
824535ee2fbSJan Kara 		__remove_assoc_queue(bh);
825535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
826535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
827535ee2fbSJan Kara 		smp_mb();
828535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
829535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
830e3892296SJan Kara 				 &mapping->private_list);
831535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
832535ee2fbSJan Kara 		}
8331da177e4SLinus Torvalds 		spin_unlock(lock);
8341da177e4SLinus Torvalds 		wait_on_buffer(bh);
8351da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8361da177e4SLinus Torvalds 			err = -EIO;
8371da177e4SLinus Torvalds 		brelse(bh);
8381da177e4SLinus Torvalds 		spin_lock(lock);
8391da177e4SLinus Torvalds 	}
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds 	spin_unlock(lock);
8421da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8431da177e4SLinus Torvalds 	if (err)
8441da177e4SLinus Torvalds 		return err;
8451da177e4SLinus Torvalds 	else
8461da177e4SLinus Torvalds 		return err2;
8471da177e4SLinus Torvalds }
8481da177e4SLinus Torvalds 
8491da177e4SLinus Torvalds /*
8501da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8511da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8521da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8531da177e4SLinus Torvalds  *
8541da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8551da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8561da177e4SLinus Torvalds  * for reiserfs.
8571da177e4SLinus Torvalds  */
8581da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8591da177e4SLinus Torvalds {
8601da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8611da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8621da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
863252aa6f5SRafael Aquini 		struct address_space *buffer_mapping = mapping->private_data;
8641da177e4SLinus Torvalds 
8651da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8661da177e4SLinus Torvalds 		while (!list_empty(list))
8671da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8681da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8691da177e4SLinus Torvalds 	}
8701da177e4SLinus Torvalds }
87152b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8721da177e4SLinus Torvalds 
8731da177e4SLinus Torvalds /*
8741da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8751da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8761da177e4SLinus Torvalds  *
8771da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8781da177e4SLinus Torvalds  */
8791da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8801da177e4SLinus Torvalds {
8811da177e4SLinus Torvalds 	int ret = 1;
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8841da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8851da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
886252aa6f5SRafael Aquini 		struct address_space *buffer_mapping = mapping->private_data;
8871da177e4SLinus Torvalds 
8881da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8891da177e4SLinus Torvalds 		while (!list_empty(list)) {
8901da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8911da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8921da177e4SLinus Torvalds 				ret = 0;
8931da177e4SLinus Torvalds 				break;
8941da177e4SLinus Torvalds 			}
8951da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8961da177e4SLinus Torvalds 		}
8971da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8981da177e4SLinus Torvalds 	}
8991da177e4SLinus Torvalds 	return ret;
9001da177e4SLinus Torvalds }
9011da177e4SLinus Torvalds 
9021da177e4SLinus Torvalds /*
903c71124a8SPankaj Raghav  * Create the appropriate buffers when given a folio for data area and
9041da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
9051da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
9061da177e4SLinus Torvalds  * buffers.
9071da177e4SLinus Torvalds  *
9081da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
9091da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
9101da177e4SLinus Torvalds  */
911c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
9122a418157SMatthew Wilcox (Oracle) 					gfp_t gfp)
9131da177e4SLinus Torvalds {
9141da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
9151da177e4SLinus Torvalds 	long offset;
916b87d8cefSRoman Gushchin 	struct mem_cgroup *memcg, *old_memcg;
9171da177e4SLinus Torvalds 
918c71124a8SPankaj Raghav 	/* The folio lock pins the memcg */
919c71124a8SPankaj Raghav 	memcg = folio_memcg(folio);
920b87d8cefSRoman Gushchin 	old_memcg = set_active_memcg(memcg);
921f745c6f5SShakeel Butt 
9221da177e4SLinus Torvalds 	head = NULL;
923c71124a8SPankaj Raghav 	offset = folio_size(folio);
9241da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
925640ab98fSJens Axboe 		bh = alloc_buffer_head(gfp);
9261da177e4SLinus Torvalds 		if (!bh)
9271da177e4SLinus Torvalds 			goto no_grow;
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 		bh->b_this_page = head;
9301da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9311da177e4SLinus Torvalds 		head = bh;
9321da177e4SLinus Torvalds 
9331da177e4SLinus Torvalds 		bh->b_size = size;
9341da177e4SLinus Torvalds 
935c71124a8SPankaj Raghav 		/* Link the buffer to its folio */
936c71124a8SPankaj Raghav 		folio_set_bh(bh, folio, offset);
9371da177e4SLinus Torvalds 	}
938f745c6f5SShakeel Butt out:
939b87d8cefSRoman Gushchin 	set_active_memcg(old_memcg);
9401da177e4SLinus Torvalds 	return head;
9411da177e4SLinus Torvalds /*
9421da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9431da177e4SLinus Torvalds  */
9441da177e4SLinus Torvalds no_grow:
9451da177e4SLinus Torvalds 	if (head) {
9461da177e4SLinus Torvalds 		do {
9471da177e4SLinus Torvalds 			bh = head;
9481da177e4SLinus Torvalds 			head = head->b_this_page;
9491da177e4SLinus Torvalds 			free_buffer_head(bh);
9501da177e4SLinus Torvalds 		} while (head);
9511da177e4SLinus Torvalds 	}
9521da177e4SLinus Torvalds 
953f745c6f5SShakeel Butt 	goto out;
9541da177e4SLinus Torvalds }
955c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers);
956c71124a8SPankaj Raghav 
957c71124a8SPankaj Raghav struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
958c71124a8SPankaj Raghav 				       bool retry)
959c71124a8SPankaj Raghav {
9602a418157SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
9612a418157SMatthew Wilcox (Oracle) 	if (retry)
9622a418157SMatthew Wilcox (Oracle) 		gfp |= __GFP_NOFAIL;
9632a418157SMatthew Wilcox (Oracle) 
9642a418157SMatthew Wilcox (Oracle) 	return folio_alloc_buffers(page_folio(page), size, gfp);
965c71124a8SPankaj Raghav }
9661da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9671da177e4SLinus Torvalds 
96808d84addSMatthew Wilcox (Oracle) static inline void link_dev_buffers(struct folio *folio,
96908d84addSMatthew Wilcox (Oracle) 		struct buffer_head *head)
9701da177e4SLinus Torvalds {
9711da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds 	bh = head;
9741da177e4SLinus Torvalds 	do {
9751da177e4SLinus Torvalds 		tail = bh;
9761da177e4SLinus Torvalds 		bh = bh->b_this_page;
9771da177e4SLinus Torvalds 	} while (bh);
9781da177e4SLinus Torvalds 	tail->b_this_page = head;
97908d84addSMatthew Wilcox (Oracle) 	folio_attach_private(folio, head);
9801da177e4SLinus Torvalds }
9811da177e4SLinus Torvalds 
982bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
983bbec0270SLinus Torvalds {
984bbec0270SLinus Torvalds 	sector_t retval = ~((sector_t)0);
985b86058f9SChristoph Hellwig 	loff_t sz = bdev_nr_bytes(bdev);
986bbec0270SLinus Torvalds 
987bbec0270SLinus Torvalds 	if (sz) {
988bbec0270SLinus Torvalds 		unsigned int sizebits = blksize_bits(size);
989bbec0270SLinus Torvalds 		retval = (sz >> sizebits);
990bbec0270SLinus Torvalds 	}
991bbec0270SLinus Torvalds 	return retval;
992bbec0270SLinus Torvalds }
993bbec0270SLinus Torvalds 
9941da177e4SLinus Torvalds /*
9956f24ce6bSMatthew Wilcox (Oracle)  * Initialise the state of a blockdev folio's buffers.
9961da177e4SLinus Torvalds  */
9976f24ce6bSMatthew Wilcox (Oracle) static sector_t folio_init_buffers(struct folio *folio,
998382497adSMatthew Wilcox (Oracle) 		struct block_device *bdev, unsigned size)
9991da177e4SLinus Torvalds {
10006f24ce6bSMatthew Wilcox (Oracle) 	struct buffer_head *head = folio_buffers(folio);
10011da177e4SLinus Torvalds 	struct buffer_head *bh = head;
10026f24ce6bSMatthew Wilcox (Oracle) 	bool uptodate = folio_test_uptodate(folio);
1003382497adSMatthew Wilcox (Oracle) 	sector_t block = div_u64(folio_pos(folio), size);
1004bcd1d063SChristoph Hellwig 	sector_t end_block = blkdev_max_block(bdev, size);
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds 	do {
10071da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
100801950a34SEric Biggers 			bh->b_end_io = NULL;
100901950a34SEric Biggers 			bh->b_private = NULL;
10101da177e4SLinus Torvalds 			bh->b_bdev = bdev;
10111da177e4SLinus Torvalds 			bh->b_blocknr = block;
10121da177e4SLinus Torvalds 			if (uptodate)
10131da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
1014080399aaSJeff Moyer 			if (block < end_block)
10151da177e4SLinus Torvalds 				set_buffer_mapped(bh);
10161da177e4SLinus Torvalds 		}
10171da177e4SLinus Torvalds 		block++;
10181da177e4SLinus Torvalds 		bh = bh->b_this_page;
10191da177e4SLinus Torvalds 	} while (bh != head);
1020676ce6d5SHugh Dickins 
1021676ce6d5SHugh Dickins 	/*
1022676ce6d5SHugh Dickins 	 * Caller needs to validate requested block against end of device.
1023676ce6d5SHugh Dickins 	 */
1024676ce6d5SHugh Dickins 	return end_block;
10251da177e4SLinus Torvalds }
10261da177e4SLinus Torvalds 
10271da177e4SLinus Torvalds /*
10286d840a18SMatthew Wilcox (Oracle)  * Create the page-cache folio that contains the requested block.
10291da177e4SLinus Torvalds  *
1030676ce6d5SHugh Dickins  * This is used purely for blockdev mappings.
10316d840a18SMatthew Wilcox (Oracle)  *
10326d840a18SMatthew Wilcox (Oracle)  * Returns false if we have a 'permanent' failure.  Returns true if
10336d840a18SMatthew Wilcox (Oracle)  * we succeeded, or the caller should retry.
10341da177e4SLinus Torvalds  */
10356d840a18SMatthew Wilcox (Oracle) static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1036382497adSMatthew Wilcox (Oracle) 		pgoff_t index, unsigned size, gfp_t gfp)
10371da177e4SLinus Torvalds {
10381da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
10393c98a41cSMatthew Wilcox (Oracle) 	struct folio *folio;
10401da177e4SLinus Torvalds 	struct buffer_head *bh;
10416d840a18SMatthew Wilcox (Oracle) 	sector_t end_block = 0;
104284235de3SJohannes Weiner 
10433c98a41cSMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(inode->i_mapping, index,
10443ed65f04SMatthew Wilcox (Oracle) 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
10453ed65f04SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
10466d840a18SMatthew Wilcox (Oracle) 		return false;
10471da177e4SLinus Torvalds 
10483c98a41cSMatthew Wilcox (Oracle) 	bh = folio_buffers(folio);
10493c98a41cSMatthew Wilcox (Oracle) 	if (bh) {
10501da177e4SLinus Torvalds 		if (bh->b_size == size) {
1051382497adSMatthew Wilcox (Oracle) 			end_block = folio_init_buffers(folio, bdev, size);
10526d840a18SMatthew Wilcox (Oracle) 			goto unlock;
10531da177e4SLinus Torvalds 		}
10541da177e4SLinus Torvalds 
10556d840a18SMatthew Wilcox (Oracle) 		/* Caller should retry if this call fails */
10566d840a18SMatthew Wilcox (Oracle) 		end_block = ~0ULL;
10576d840a18SMatthew Wilcox (Oracle) 		if (!try_to_free_buffers(folio))
10586d840a18SMatthew Wilcox (Oracle) 			goto unlock;
10596d840a18SMatthew Wilcox (Oracle) 	}
10606d840a18SMatthew Wilcox (Oracle) 
10613ed65f04SMatthew Wilcox (Oracle) 	bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
10623ed65f04SMatthew Wilcox (Oracle) 	if (!bh)
10636d840a18SMatthew Wilcox (Oracle) 		goto unlock;
10641da177e4SLinus Torvalds 
10651da177e4SLinus Torvalds 	/*
10663c98a41cSMatthew Wilcox (Oracle) 	 * Link the folio to the buffers and initialise them.  Take the
10671da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10683c98a41cSMatthew Wilcox (Oracle) 	 * run under the folio lock.
10691da177e4SLinus Torvalds 	 */
10701da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
107108d84addSMatthew Wilcox (Oracle) 	link_dev_buffers(folio, bh);
1072382497adSMatthew Wilcox (Oracle) 	end_block = folio_init_buffers(folio, bdev, size);
10731da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10746d840a18SMatthew Wilcox (Oracle) unlock:
10753c98a41cSMatthew Wilcox (Oracle) 	folio_unlock(folio);
10763c98a41cSMatthew Wilcox (Oracle) 	folio_put(folio);
10776d840a18SMatthew Wilcox (Oracle) 	return block < end_block;
10781da177e4SLinus Torvalds }
10791da177e4SLinus Torvalds 
10801da177e4SLinus Torvalds /*
10816d840a18SMatthew Wilcox (Oracle)  * Create buffers for the specified block device block's folio.  If
10826d840a18SMatthew Wilcox (Oracle)  * that folio was dirty, the buffers are set dirty also.  Returns false
10836d840a18SMatthew Wilcox (Oracle)  * if we've hit a permanent error.
10841da177e4SLinus Torvalds  */
10856d840a18SMatthew Wilcox (Oracle) static bool grow_buffers(struct block_device *bdev, sector_t block,
10866d840a18SMatthew Wilcox (Oracle) 		unsigned size, gfp_t gfp)
10871da177e4SLinus Torvalds {
10885f3bd90dSMatthew Wilcox (Oracle) 	loff_t pos;
10891da177e4SLinus Torvalds 
1090e5657933SAndrew Morton 	/*
10915f3bd90dSMatthew Wilcox (Oracle) 	 * Check for a block which lies outside our maximum possible
10925f3bd90dSMatthew Wilcox (Oracle) 	 * pagecache index.
1093e5657933SAndrew Morton 	 */
10945f3bd90dSMatthew Wilcox (Oracle) 	if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
10955f3bd90dSMatthew Wilcox (Oracle) 		printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
10968e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1097a1c6f057SDmitry Monakhov 			bdev);
10986d840a18SMatthew Wilcox (Oracle) 		return false;
1099e5657933SAndrew Morton 	}
1100676ce6d5SHugh Dickins 
11016d840a18SMatthew Wilcox (Oracle) 	/* Create a folio with the proper size buffers */
11025f3bd90dSMatthew Wilcox (Oracle) 	return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
11031da177e4SLinus Torvalds }
11041da177e4SLinus Torvalds 
11050026ba40SEric Biggers static struct buffer_head *
11063b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
11073b5e6454SGioh Kim 	     unsigned size, gfp_t gfp)
11081da177e4SLinus Torvalds {
11091da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1110e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
11111da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11121da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11131da177e4SLinus Torvalds 					size);
1114e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1115e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
11161da177e4SLinus Torvalds 
11171da177e4SLinus Torvalds 		dump_stack();
11181da177e4SLinus Torvalds 		return NULL;
11191da177e4SLinus Torvalds 	}
11201da177e4SLinus Torvalds 
1121676ce6d5SHugh Dickins 	for (;;) {
1122676ce6d5SHugh Dickins 		struct buffer_head *bh;
1123676ce6d5SHugh Dickins 
11241da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11251da177e4SLinus Torvalds 		if (bh)
11261da177e4SLinus Torvalds 			return bh;
11271da177e4SLinus Torvalds 
11286d840a18SMatthew Wilcox (Oracle) 		if (!grow_buffers(bdev, block, size, gfp))
112991f68c89SJeff Moyer 			return NULL;
1130676ce6d5SHugh Dickins 	}
11311da177e4SLinus Torvalds }
11321da177e4SLinus Torvalds 
11331da177e4SLinus Torvalds /*
11341da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11351da177e4SLinus Torvalds  *
11361da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1137ec82e1c1SMatthew Wilcox  * the page is tagged dirty in the page cache.
11381da177e4SLinus Torvalds  *
11391da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11401da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11411da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11421da177e4SLinus Torvalds  *
11431da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11441da177e4SLinus Torvalds  * (if the page has buffers).
11451da177e4SLinus Torvalds  *
11461da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11471da177e4SLinus Torvalds  * buffers are not.
11481da177e4SLinus Torvalds  *
11491da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11501da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11511da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11522c69e205SMatthew Wilcox (Oracle)  * block_read_full_folio() against that folio will discover all the uptodate
11532c69e205SMatthew Wilcox (Oracle)  * buffers, will set the folio uptodate and will perform no I/O.
11541da177e4SLinus Torvalds  */
11551da177e4SLinus Torvalds 
11561da177e4SLinus Torvalds /**
11571da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
115867be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11591da177e4SLinus Torvalds  *
1160ec82e1c1SMatthew Wilcox  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1161ec82e1c1SMatthew Wilcox  * its backing page dirty, then tag the page as dirty in the page cache
1162ec82e1c1SMatthew Wilcox  * and then attach the address_space's inode to its superblock's dirty
11631da177e4SLinus Torvalds  * inode list.
11641da177e4SLinus Torvalds  *
1165abc8a8a2SMatthew Wilcox (Oracle)  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1166b93b0163SMatthew Wilcox  * i_pages lock and mapping->host->i_lock.
11671da177e4SLinus Torvalds  */
1168fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11691da177e4SLinus Torvalds {
1170787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11711be62dc1SLinus Torvalds 
11725305cb83STejun Heo 	trace_block_dirty_buffer(bh);
11735305cb83STejun Heo 
11741be62dc1SLinus Torvalds 	/*
11751be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11761be62dc1SLinus Torvalds 	 *
11771be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11781be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11791be62dc1SLinus Torvalds 	 */
11801be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11811be62dc1SLinus Torvalds 		smp_mb();
11821be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11831be62dc1SLinus Torvalds 			return;
11841be62dc1SLinus Torvalds 	}
11851be62dc1SLinus Torvalds 
1186a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1187cf1d3417SMatthew Wilcox (Oracle) 		struct folio *folio = bh->b_folio;
1188c4843a75SGreg Thelen 		struct address_space *mapping = NULL;
1189c4843a75SGreg Thelen 
1190cf1d3417SMatthew Wilcox (Oracle) 		folio_memcg_lock(folio);
1191cf1d3417SMatthew Wilcox (Oracle) 		if (!folio_test_set_dirty(folio)) {
1192cf1d3417SMatthew Wilcox (Oracle) 			mapping = folio->mapping;
11938e9d78edSLinus Torvalds 			if (mapping)
1194cf1d3417SMatthew Wilcox (Oracle) 				__folio_mark_dirty(folio, mapping, 0);
11958e9d78edSLinus Torvalds 		}
1196cf1d3417SMatthew Wilcox (Oracle) 		folio_memcg_unlock(folio);
1197c4843a75SGreg Thelen 		if (mapping)
1198c4843a75SGreg Thelen 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1199a8e7d49aSLinus Torvalds 	}
12001da177e4SLinus Torvalds }
12011fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
12021da177e4SLinus Torvalds 
120387354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
120487354e5dSJeff Layton {
120587354e5dSJeff Layton 	set_buffer_write_io_error(bh);
120687354e5dSJeff Layton 	/* FIXME: do we need to set this in both places? */
1207abc8a8a2SMatthew Wilcox (Oracle) 	if (bh->b_folio && bh->b_folio->mapping)
1208abc8a8a2SMatthew Wilcox (Oracle) 		mapping_set_error(bh->b_folio->mapping, -EIO);
12094b2201daSChristoph Hellwig 	if (bh->b_assoc_map) {
121087354e5dSJeff Layton 		mapping_set_error(bh->b_assoc_map, -EIO);
12114b2201daSChristoph Hellwig 		errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
12124b2201daSChristoph Hellwig 	}
121387354e5dSJeff Layton }
121487354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
121587354e5dSJeff Layton 
12161da177e4SLinus Torvalds /*
12171da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
12181da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
12191da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
12201da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
12211da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
12221da177e4SLinus Torvalds  */
12231da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
12241da177e4SLinus Torvalds {
12251da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
12261da177e4SLinus Torvalds 		put_bh(buf);
12271da177e4SLinus Torvalds 		return;
12281da177e4SLinus Torvalds 	}
12295c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12301da177e4SLinus Torvalds }
12311fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
12321da177e4SLinus Torvalds 
12331da177e4SLinus Torvalds /*
12341da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
12351da177e4SLinus Torvalds  * potentially dirty data.
12361da177e4SLinus Torvalds  */
12371da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12381da177e4SLinus Torvalds {
12391da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1240535ee2fbSJan Kara 	if (bh->b_assoc_map) {
1241abc8a8a2SMatthew Wilcox (Oracle) 		struct address_space *buffer_mapping = bh->b_folio->mapping;
12421da177e4SLinus Torvalds 
12431da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12441da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
124558ff407bSJan Kara 		bh->b_assoc_map = NULL;
12461da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12471da177e4SLinus Torvalds 	}
12481da177e4SLinus Torvalds 	__brelse(bh);
12491da177e4SLinus Torvalds }
12501fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12511da177e4SLinus Torvalds 
12521da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12531da177e4SLinus Torvalds {
12541da177e4SLinus Torvalds 	lock_buffer(bh);
12551da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12561da177e4SLinus Torvalds 		unlock_buffer(bh);
12571da177e4SLinus Torvalds 		return bh;
12581da177e4SLinus Torvalds 	} else {
12591da177e4SLinus Torvalds 		get_bh(bh);
12601da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12611420c4a5SBart Van Assche 		submit_bh(REQ_OP_READ, bh);
12621da177e4SLinus Torvalds 		wait_on_buffer(bh);
12631da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12641da177e4SLinus Torvalds 			return bh;
12651da177e4SLinus Torvalds 	}
12661da177e4SLinus Torvalds 	brelse(bh);
12671da177e4SLinus Torvalds 	return NULL;
12681da177e4SLinus Torvalds }
12691da177e4SLinus Torvalds 
12701da177e4SLinus Torvalds /*
12711da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12721da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12731da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12741da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12751da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12761da177e4SLinus Torvalds  *
12771da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12781da177e4SLinus Torvalds  * sb_find_get_block().
12791da177e4SLinus Torvalds  *
12801da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12811da177e4SLinus Torvalds  * a local interrupt disable for that.
12821da177e4SLinus Torvalds  */
12831da177e4SLinus Torvalds 
128486cf78d7SSebastien Buisson #define BH_LRU_SIZE	16
12851da177e4SLinus Torvalds 
12861da177e4SLinus Torvalds struct bh_lru {
12871da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12881da177e4SLinus Torvalds };
12891da177e4SLinus Torvalds 
12901da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds #ifdef CONFIG_SMP
12931da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12941da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12951da177e4SLinus Torvalds #else
12961da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12971da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12981da177e4SLinus Torvalds #endif
12991da177e4SLinus Torvalds 
13001da177e4SLinus Torvalds static inline void check_irqs_on(void)
13011da177e4SLinus Torvalds {
13021da177e4SLinus Torvalds #ifdef irqs_disabled
13031da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
13041da177e4SLinus Torvalds #endif
13051da177e4SLinus Torvalds }
13061da177e4SLinus Torvalds 
13071da177e4SLinus Torvalds /*
1308241f01fbSEric Biggers  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1309241f01fbSEric Biggers  * inserted at the front, and the buffer_head at the back if any is evicted.
1310241f01fbSEric Biggers  * Or, if already in the LRU it is moved to the front.
13111da177e4SLinus Torvalds  */
13121da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13131da177e4SLinus Torvalds {
1314241f01fbSEric Biggers 	struct buffer_head *evictee = bh;
1315241f01fbSEric Biggers 	struct bh_lru *b;
1316241f01fbSEric Biggers 	int i;
13171da177e4SLinus Torvalds 
13181da177e4SLinus Torvalds 	check_irqs_on();
1319c0226eb8SMinchan Kim 	bh_lru_lock();
1320c0226eb8SMinchan Kim 
13218cc621d2SMinchan Kim 	/*
13228cc621d2SMinchan Kim 	 * the refcount of buffer_head in bh_lru prevents dropping the
13238cc621d2SMinchan Kim 	 * attached page(i.e., try_to_free_buffers) so it could cause
13248cc621d2SMinchan Kim 	 * failing page migration.
13258cc621d2SMinchan Kim 	 * Skip putting upcoming bh into bh_lru until migration is done.
13268cc621d2SMinchan Kim 	 */
13278a237adfSMarcelo Tosatti 	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1328c0226eb8SMinchan Kim 		bh_lru_unlock();
13298cc621d2SMinchan Kim 		return;
1330c0226eb8SMinchan Kim 	}
1331241f01fbSEric Biggers 
1332241f01fbSEric Biggers 	b = this_cpu_ptr(&bh_lrus);
1333241f01fbSEric Biggers 	for (i = 0; i < BH_LRU_SIZE; i++) {
1334241f01fbSEric Biggers 		swap(evictee, b->bhs[i]);
1335241f01fbSEric Biggers 		if (evictee == bh) {
1336241f01fbSEric Biggers 			bh_lru_unlock();
1337241f01fbSEric Biggers 			return;
1338241f01fbSEric Biggers 		}
1339241f01fbSEric Biggers 	}
13401da177e4SLinus Torvalds 
13411da177e4SLinus Torvalds 	get_bh(bh);
13421da177e4SLinus Torvalds 	bh_lru_unlock();
1343241f01fbSEric Biggers 	brelse(evictee);
13441da177e4SLinus Torvalds }
13451da177e4SLinus Torvalds 
13461da177e4SLinus Torvalds /*
13471da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13481da177e4SLinus Torvalds  */
1349858119e1SArjan van de Ven static struct buffer_head *
13503991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13511da177e4SLinus Torvalds {
13521da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13533991d3bdSTomasz Kvarsin 	unsigned int i;
13541da177e4SLinus Torvalds 
13551da177e4SLinus Torvalds 	check_irqs_on();
13561da177e4SLinus Torvalds 	bh_lru_lock();
13578a237adfSMarcelo Tosatti 	if (cpu_is_isolated(smp_processor_id())) {
13588a237adfSMarcelo Tosatti 		bh_lru_unlock();
13598a237adfSMarcelo Tosatti 		return NULL;
13608a237adfSMarcelo Tosatti 	}
13611da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
1362c7b92516SChristoph Lameter 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13631da177e4SLinus Torvalds 
13649470dd5dSZach Brown 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
13659470dd5dSZach Brown 		    bh->b_size == size) {
13661da177e4SLinus Torvalds 			if (i) {
13671da177e4SLinus Torvalds 				while (i) {
1368c7b92516SChristoph Lameter 					__this_cpu_write(bh_lrus.bhs[i],
1369c7b92516SChristoph Lameter 						__this_cpu_read(bh_lrus.bhs[i - 1]));
13701da177e4SLinus Torvalds 					i--;
13711da177e4SLinus Torvalds 				}
1372c7b92516SChristoph Lameter 				__this_cpu_write(bh_lrus.bhs[0], bh);
13731da177e4SLinus Torvalds 			}
13741da177e4SLinus Torvalds 			get_bh(bh);
13751da177e4SLinus Torvalds 			ret = bh;
13761da177e4SLinus Torvalds 			break;
13771da177e4SLinus Torvalds 		}
13781da177e4SLinus Torvalds 	}
13791da177e4SLinus Torvalds 	bh_lru_unlock();
13801da177e4SLinus Torvalds 	return ret;
13811da177e4SLinus Torvalds }
13821da177e4SLinus Torvalds 
13831da177e4SLinus Torvalds /*
13841da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13851da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13861da177e4SLinus Torvalds  * NULL
13871da177e4SLinus Torvalds  */
13881da177e4SLinus Torvalds struct buffer_head *
13893991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13901da177e4SLinus Torvalds {
13911da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13921da177e4SLinus Torvalds 
13931da177e4SLinus Torvalds 	if (bh == NULL) {
13942457aec6SMel Gorman 		/* __find_get_block_slow will mark the page accessed */
1395385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13961da177e4SLinus Torvalds 		if (bh)
13971da177e4SLinus Torvalds 			bh_lru_install(bh);
13982457aec6SMel Gorman 	} else
13991da177e4SLinus Torvalds 		touch_buffer(bh);
14002457aec6SMel Gorman 
14011da177e4SLinus Torvalds 	return bh;
14021da177e4SLinus Torvalds }
14031da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14041da177e4SLinus Torvalds 
14053ed65f04SMatthew Wilcox (Oracle) /**
14063ed65f04SMatthew Wilcox (Oracle)  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
14073ed65f04SMatthew Wilcox (Oracle)  * @bdev: The block device.
14083ed65f04SMatthew Wilcox (Oracle)  * @block: The block number.
14093ed65f04SMatthew Wilcox (Oracle)  * @size: The size of buffer_heads for this @bdev.
14103ed65f04SMatthew Wilcox (Oracle)  * @gfp: The memory allocation flags to use.
14113ed65f04SMatthew Wilcox (Oracle)  *
14123ed65f04SMatthew Wilcox (Oracle)  * Return: The buffer head, or NULL if memory could not be allocated.
14133ed65f04SMatthew Wilcox (Oracle)  */
14143ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
14153ed65f04SMatthew Wilcox (Oracle) 		unsigned size, gfp_t gfp)
14163ed65f04SMatthew Wilcox (Oracle) {
14173ed65f04SMatthew Wilcox (Oracle) 	struct buffer_head *bh = __find_get_block(bdev, block, size);
14183ed65f04SMatthew Wilcox (Oracle) 
14193ed65f04SMatthew Wilcox (Oracle) 	might_alloc(gfp);
14203ed65f04SMatthew Wilcox (Oracle) 	if (bh)
14213ed65f04SMatthew Wilcox (Oracle) 		return bh;
14223ed65f04SMatthew Wilcox (Oracle) 
14233ed65f04SMatthew Wilcox (Oracle) 	return __getblk_slow(bdev, block, size, gfp);
14243ed65f04SMatthew Wilcox (Oracle) }
14253ed65f04SMatthew Wilcox (Oracle) EXPORT_SYMBOL(bdev_getblk);
14263ed65f04SMatthew Wilcox (Oracle) 
14271da177e4SLinus Torvalds /*
14281da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
14291da177e4SLinus Torvalds  */
14303991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14311da177e4SLinus Torvalds {
1432775d9b10SMatthew Wilcox (Oracle) 	struct buffer_head *bh = bdev_getblk(bdev, block, size,
1433775d9b10SMatthew Wilcox (Oracle) 			GFP_NOWAIT | __GFP_MOVABLE);
1434775d9b10SMatthew Wilcox (Oracle) 
1435a3e713b5SAndrew Morton 	if (likely(bh)) {
1436e7ea1129SZhang Yi 		bh_readahead(bh, REQ_RAHEAD);
14371da177e4SLinus Torvalds 		brelse(bh);
14381da177e4SLinus Torvalds 	}
1439a3e713b5SAndrew Morton }
14401da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14411da177e4SLinus Torvalds 
14421da177e4SLinus Torvalds /**
14433b5e6454SGioh Kim  *  __bread_gfp() - reads a specified block and returns the bh
144467be2dd1SMartin Waitz  *  @bdev: the block_device to read from
14451da177e4SLinus Torvalds  *  @block: number of block
14461da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14473b5e6454SGioh Kim  *  @gfp: page allocation flag
14481da177e4SLinus Torvalds  *
14491da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14503b5e6454SGioh Kim  *  The page cache can be allocated from non-movable area
14513b5e6454SGioh Kim  *  not to prevent page migration if you set gfp to zero.
14521da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14531da177e4SLinus Torvalds  */
14541da177e4SLinus Torvalds struct buffer_head *
14553b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block,
14563b5e6454SGioh Kim 		   unsigned size, gfp_t gfp)
14571da177e4SLinus Torvalds {
145893b13ecaSMatthew Wilcox (Oracle) 	struct buffer_head *bh;
145993b13ecaSMatthew Wilcox (Oracle) 
146093b13ecaSMatthew Wilcox (Oracle) 	gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
146193b13ecaSMatthew Wilcox (Oracle) 
146293b13ecaSMatthew Wilcox (Oracle) 	/*
146393b13ecaSMatthew Wilcox (Oracle) 	 * Prefer looping in the allocator rather than here, at least that
146493b13ecaSMatthew Wilcox (Oracle) 	 * code knows what it's doing.
146593b13ecaSMatthew Wilcox (Oracle) 	 */
146693b13ecaSMatthew Wilcox (Oracle) 	gfp |= __GFP_NOFAIL;
146793b13ecaSMatthew Wilcox (Oracle) 
146893b13ecaSMatthew Wilcox (Oracle) 	bh = bdev_getblk(bdev, block, size, gfp);
14691da177e4SLinus Torvalds 
1470a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14711da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14721da177e4SLinus Torvalds 	return bh;
14731da177e4SLinus Torvalds }
14743b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
14751da177e4SLinus Torvalds 
14768cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
14778cc621d2SMinchan Kim {
14788cc621d2SMinchan Kim 	int i;
14798cc621d2SMinchan Kim 
14808cc621d2SMinchan Kim 	for (i = 0; i < BH_LRU_SIZE; i++) {
14818cc621d2SMinchan Kim 		brelse(b->bhs[i]);
14828cc621d2SMinchan Kim 		b->bhs[i] = NULL;
14838cc621d2SMinchan Kim 	}
14848cc621d2SMinchan Kim }
14851da177e4SLinus Torvalds /*
14861da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14871da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14881da177e4SLinus Torvalds  * or with preempt disabled.
14891da177e4SLinus Torvalds  */
14901da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14911da177e4SLinus Torvalds {
14921da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14931da177e4SLinus Torvalds 
14948cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
14951da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14961da177e4SLinus Torvalds }
14971da177e4SLinus Torvalds 
14988cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
149942be35d0SGilad Ben-Yossef {
150042be35d0SGilad Ben-Yossef 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
150142be35d0SGilad Ben-Yossef 	int i;
150242be35d0SGilad Ben-Yossef 
150342be35d0SGilad Ben-Yossef 	for (i = 0; i < BH_LRU_SIZE; i++) {
150442be35d0SGilad Ben-Yossef 		if (b->bhs[i])
15051d706679SSaurav Girepunje 			return true;
150642be35d0SGilad Ben-Yossef 	}
150742be35d0SGilad Ben-Yossef 
15081d706679SSaurav Girepunje 	return false;
150942be35d0SGilad Ben-Yossef }
151042be35d0SGilad Ben-Yossef 
1511f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
15121da177e4SLinus Torvalds {
1513cb923159SSebastian Andrzej Siewior 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
15141da177e4SLinus Torvalds }
15159db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
15161da177e4SLinus Torvalds 
1517243418e3SMinchan Kim /*
1518243418e3SMinchan Kim  * It's called from workqueue context so we need a bh_lru_lock to close
1519243418e3SMinchan Kim  * the race with preemption/irq.
1520243418e3SMinchan Kim  */
1521243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
15228cc621d2SMinchan Kim {
15238cc621d2SMinchan Kim 	struct bh_lru *b;
15248cc621d2SMinchan Kim 
15258cc621d2SMinchan Kim 	bh_lru_lock();
1526243418e3SMinchan Kim 	b = this_cpu_ptr(&bh_lrus);
15278cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
15288cc621d2SMinchan Kim 	bh_lru_unlock();
15298cc621d2SMinchan Kim }
15308cc621d2SMinchan Kim 
1531465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1532465e5e6aSPankaj Raghav 		  unsigned long offset)
1533465e5e6aSPankaj Raghav {
1534465e5e6aSPankaj Raghav 	bh->b_folio = folio;
1535465e5e6aSPankaj Raghav 	BUG_ON(offset >= folio_size(folio));
1536465e5e6aSPankaj Raghav 	if (folio_test_highmem(folio))
1537465e5e6aSPankaj Raghav 		/*
1538465e5e6aSPankaj Raghav 		 * This catches illegal uses and preserves the offset:
1539465e5e6aSPankaj Raghav 		 */
1540465e5e6aSPankaj Raghav 		bh->b_data = (char *)(0 + offset);
1541465e5e6aSPankaj Raghav 	else
1542465e5e6aSPankaj Raghav 		bh->b_data = folio_address(folio) + offset;
1543465e5e6aSPankaj Raghav }
1544465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh);
1545465e5e6aSPankaj Raghav 
15461da177e4SLinus Torvalds /*
15471da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
15481da177e4SLinus Torvalds  */
1549e7470ee8SMel Gorman 
1550e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1551e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1552e7470ee8SMel Gorman 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1553e7470ee8SMel Gorman 	 1 << BH_Delay | 1 << BH_Unwritten)
1554e7470ee8SMel Gorman 
1555858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
15561da177e4SLinus Torvalds {
1557b0192296SUros Bizjak 	unsigned long b_state;
1558e7470ee8SMel Gorman 
15591da177e4SLinus Torvalds 	lock_buffer(bh);
15601da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
15611da177e4SLinus Torvalds 	bh->b_bdev = NULL;
1562b0192296SUros Bizjak 	b_state = READ_ONCE(bh->b_state);
1563b0192296SUros Bizjak 	do {
1564b0192296SUros Bizjak 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1565b0192296SUros Bizjak 			      b_state & ~BUFFER_FLAGS_DISCARD));
15661da177e4SLinus Torvalds 	unlock_buffer(bh);
15671da177e4SLinus Torvalds }
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds /**
15707ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
15717ba13abbSMatthew Wilcox (Oracle)  * @folio: The folio which is affected.
1572d47992f8SLukas Czerner  * @offset: start of the range to invalidate
1573d47992f8SLukas Czerner  * @length: length of the range to invalidate
15741da177e4SLinus Torvalds  *
15757ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() is called when all or part of the folio has been
15761da177e4SLinus Torvalds  * invalidated by a truncate operation.
15771da177e4SLinus Torvalds  *
15787ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() does not have to release all buffers, but it must
15791da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
15801da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
15811da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
15821da177e4SLinus Torvalds  * blocks on-disk.
15831da177e4SLinus Torvalds  */
15847ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
15851da177e4SLinus Torvalds {
15861da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
15877ba13abbSMatthew Wilcox (Oracle) 	size_t curr_off = 0;
15887ba13abbSMatthew Wilcox (Oracle) 	size_t stop = length + offset;
15891da177e4SLinus Torvalds 
15907ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
15911da177e4SLinus Torvalds 
1592d47992f8SLukas Czerner 	/*
1593d47992f8SLukas Czerner 	 * Check for overflow
1594d47992f8SLukas Czerner 	 */
15957ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(stop > folio_size(folio) || stop < length);
1596d47992f8SLukas Czerner 
15977ba13abbSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
15987ba13abbSMatthew Wilcox (Oracle) 	if (!head)
15997ba13abbSMatthew Wilcox (Oracle) 		return;
16007ba13abbSMatthew Wilcox (Oracle) 
16011da177e4SLinus Torvalds 	bh = head;
16021da177e4SLinus Torvalds 	do {
16037ba13abbSMatthew Wilcox (Oracle) 		size_t next_off = curr_off + bh->b_size;
16041da177e4SLinus Torvalds 		next = bh->b_this_page;
16051da177e4SLinus Torvalds 
16061da177e4SLinus Torvalds 		/*
1607d47992f8SLukas Czerner 		 * Are we still fully in range ?
1608d47992f8SLukas Czerner 		 */
1609d47992f8SLukas Czerner 		if (next_off > stop)
1610d47992f8SLukas Czerner 			goto out;
1611d47992f8SLukas Czerner 
1612d47992f8SLukas Czerner 		/*
16131da177e4SLinus Torvalds 		 * is this block fully invalidated?
16141da177e4SLinus Torvalds 		 */
16151da177e4SLinus Torvalds 		if (offset <= curr_off)
16161da177e4SLinus Torvalds 			discard_buffer(bh);
16171da177e4SLinus Torvalds 		curr_off = next_off;
16181da177e4SLinus Torvalds 		bh = next;
16191da177e4SLinus Torvalds 	} while (bh != head);
16201da177e4SLinus Torvalds 
16211da177e4SLinus Torvalds 	/*
16227ba13abbSMatthew Wilcox (Oracle) 	 * We release buffers only if the entire folio is being invalidated.
16231da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
16241da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
16251da177e4SLinus Torvalds 	 */
16267ba13abbSMatthew Wilcox (Oracle) 	if (length == folio_size(folio))
16277ba13abbSMatthew Wilcox (Oracle) 		filemap_release_folio(folio, 0);
16281da177e4SLinus Torvalds out:
16292ff28e22SNeilBrown 	return;
16301da177e4SLinus Torvalds }
16317ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds /*
16341da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
1635e621900aSMatthew Wilcox (Oracle)  * block_dirty_folio() via private_lock.  try_to_free_buffers
16368e2e1756SPankaj Raghav  * is already excluded via the folio lock.
16371da177e4SLinus Torvalds  */
16380a88810dSMatthew Wilcox (Oracle) struct buffer_head *create_empty_buffers(struct folio *folio,
16393decb856SMatthew Wilcox (Oracle) 		unsigned long blocksize, unsigned long b_state)
16401da177e4SLinus Torvalds {
16411da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
16422a418157SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
16431da177e4SLinus Torvalds 
16442a418157SMatthew Wilcox (Oracle) 	head = folio_alloc_buffers(folio, blocksize, gfp);
16451da177e4SLinus Torvalds 	bh = head;
16461da177e4SLinus Torvalds 	do {
16471da177e4SLinus Torvalds 		bh->b_state |= b_state;
16481da177e4SLinus Torvalds 		tail = bh;
16491da177e4SLinus Torvalds 		bh = bh->b_this_page;
16501da177e4SLinus Torvalds 	} while (bh);
16511da177e4SLinus Torvalds 	tail->b_this_page = head;
16521da177e4SLinus Torvalds 
16538e2e1756SPankaj Raghav 	spin_lock(&folio->mapping->private_lock);
16548e2e1756SPankaj Raghav 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
16551da177e4SLinus Torvalds 		bh = head;
16561da177e4SLinus Torvalds 		do {
16578e2e1756SPankaj Raghav 			if (folio_test_dirty(folio))
16581da177e4SLinus Torvalds 				set_buffer_dirty(bh);
16598e2e1756SPankaj Raghav 			if (folio_test_uptodate(folio))
16601da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
16611da177e4SLinus Torvalds 			bh = bh->b_this_page;
16621da177e4SLinus Torvalds 		} while (bh != head);
16631da177e4SLinus Torvalds 	}
16648e2e1756SPankaj Raghav 	folio_attach_private(folio, head);
16658e2e1756SPankaj Raghav 	spin_unlock(&folio->mapping->private_lock);
16663decb856SMatthew Wilcox (Oracle) 
16673decb856SMatthew Wilcox (Oracle) 	return head;
16688e2e1756SPankaj Raghav }
16691da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16701da177e4SLinus Torvalds 
167129f3ad7dSJan Kara /**
167229f3ad7dSJan Kara  * clean_bdev_aliases: clean a range of buffers in block device
167329f3ad7dSJan Kara  * @bdev: Block device to clean buffers in
167429f3ad7dSJan Kara  * @block: Start of a range of blocks to clean
167529f3ad7dSJan Kara  * @len: Number of blocks to clean
16761da177e4SLinus Torvalds  *
167729f3ad7dSJan Kara  * We are taking a range of blocks for data and we don't want writeback of any
167829f3ad7dSJan Kara  * buffer-cache aliases starting from return from this function and until the
167929f3ad7dSJan Kara  * moment when something will explicitly mark the buffer dirty (hopefully that
168029f3ad7dSJan Kara  * will not happen until we will free that block ;-) We don't even need to mark
168129f3ad7dSJan Kara  * it not-uptodate - nobody can expect anything from a newly allocated buffer
168229f3ad7dSJan Kara  * anyway. We used to use unmap_buffer() for such invalidation, but that was
168329f3ad7dSJan Kara  * wrong. We definitely don't want to mark the alias unmapped, for example - it
168429f3ad7dSJan Kara  * would confuse anyone who might pick it with bread() afterwards...
168529f3ad7dSJan Kara  *
168629f3ad7dSJan Kara  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
168729f3ad7dSJan Kara  * writeout I/O going on against recently-freed buffers.  We don't wait on that
168829f3ad7dSJan Kara  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
168929f3ad7dSJan Kara  * need to.  That happens here.
16901da177e4SLinus Torvalds  */
169129f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
16921da177e4SLinus Torvalds {
169329f3ad7dSJan Kara 	struct inode *bd_inode = bdev->bd_inode;
169429f3ad7dSJan Kara 	struct address_space *bd_mapping = bd_inode->i_mapping;
16959e0b6f31SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
169629f3ad7dSJan Kara 	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
169729f3ad7dSJan Kara 	pgoff_t end;
1698c10f778dSJan Kara 	int i, count;
169929f3ad7dSJan Kara 	struct buffer_head *bh;
170029f3ad7dSJan Kara 	struct buffer_head *head;
17011da177e4SLinus Torvalds 
170229f3ad7dSJan Kara 	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
17039e0b6f31SMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
17049e0b6f31SMatthew Wilcox (Oracle) 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
17059e0b6f31SMatthew Wilcox (Oracle) 		count = folio_batch_count(&fbatch);
1706c10f778dSJan Kara 		for (i = 0; i < count; i++) {
17079e0b6f31SMatthew Wilcox (Oracle) 			struct folio *folio = fbatch.folios[i];
17081da177e4SLinus Torvalds 
17099e0b6f31SMatthew Wilcox (Oracle) 			if (!folio_buffers(folio))
171029f3ad7dSJan Kara 				continue;
171129f3ad7dSJan Kara 			/*
17129e0b6f31SMatthew Wilcox (Oracle) 			 * We use folio lock instead of bd_mapping->private_lock
171329f3ad7dSJan Kara 			 * to pin buffers here since we can afford to sleep and
171429f3ad7dSJan Kara 			 * it scales better than a global spinlock lock.
171529f3ad7dSJan Kara 			 */
17169e0b6f31SMatthew Wilcox (Oracle) 			folio_lock(folio);
17179e0b6f31SMatthew Wilcox (Oracle) 			/* Recheck when the folio is locked which pins bhs */
17189e0b6f31SMatthew Wilcox (Oracle) 			head = folio_buffers(folio);
17199e0b6f31SMatthew Wilcox (Oracle) 			if (!head)
172029f3ad7dSJan Kara 				goto unlock_page;
172129f3ad7dSJan Kara 			bh = head;
172229f3ad7dSJan Kara 			do {
17236c006a9dSChandan Rajendra 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
172429f3ad7dSJan Kara 					goto next;
172529f3ad7dSJan Kara 				if (bh->b_blocknr >= block + len)
172629f3ad7dSJan Kara 					break;
172729f3ad7dSJan Kara 				clear_buffer_dirty(bh);
172829f3ad7dSJan Kara 				wait_on_buffer(bh);
172929f3ad7dSJan Kara 				clear_buffer_req(bh);
173029f3ad7dSJan Kara next:
173129f3ad7dSJan Kara 				bh = bh->b_this_page;
173229f3ad7dSJan Kara 			} while (bh != head);
173329f3ad7dSJan Kara unlock_page:
17349e0b6f31SMatthew Wilcox (Oracle) 			folio_unlock(folio);
173529f3ad7dSJan Kara 		}
17369e0b6f31SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
173729f3ad7dSJan Kara 		cond_resched();
1738c10f778dSJan Kara 		/* End of range already reached? */
1739c10f778dSJan Kara 		if (index > end || !index)
1740c10f778dSJan Kara 			break;
17411da177e4SLinus Torvalds 	}
17421da177e4SLinus Torvalds }
174329f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
17441da177e4SLinus Torvalds 
17451da177e4SLinus Torvalds /*
174645bce8f3SLinus Torvalds  * Size is a power-of-two in the range 512..PAGE_SIZE,
174745bce8f3SLinus Torvalds  * and the case we care about most is PAGE_SIZE.
174845bce8f3SLinus Torvalds  *
174945bce8f3SLinus Torvalds  * So this *could* possibly be written with those
175045bce8f3SLinus Torvalds  * constraints in mind (relevant mostly if some
175145bce8f3SLinus Torvalds  * architecture has a slow bit-scan instruction)
175245bce8f3SLinus Torvalds  */
175345bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize)
175445bce8f3SLinus Torvalds {
175545bce8f3SLinus Torvalds 	return ilog2(blocksize);
175645bce8f3SLinus Torvalds }
175745bce8f3SLinus Torvalds 
1758c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio,
1759c6c8c3e7SPankaj Raghav 						struct inode *inode,
1760c6c8c3e7SPankaj Raghav 						unsigned int b_state)
176145bce8f3SLinus Torvalds {
17623decb856SMatthew Wilcox (Oracle) 	struct buffer_head *bh;
17633decb856SMatthew Wilcox (Oracle) 
1764c6c8c3e7SPankaj Raghav 	BUG_ON(!folio_test_locked(folio));
176545bce8f3SLinus Torvalds 
17663decb856SMatthew Wilcox (Oracle) 	bh = folio_buffers(folio);
17673decb856SMatthew Wilcox (Oracle) 	if (!bh)
17680a88810dSMatthew Wilcox (Oracle) 		bh = create_empty_buffers(folio,
17693decb856SMatthew Wilcox (Oracle) 				1 << READ_ONCE(inode->i_blkbits), b_state);
17703decb856SMatthew Wilcox (Oracle) 	return bh;
177145bce8f3SLinus Torvalds }
177245bce8f3SLinus Torvalds 
177345bce8f3SLinus Torvalds /*
17741da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
17751da177e4SLinus Torvalds  *
17761da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
17771da177e4SLinus Torvalds  *
17781da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
17791da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
17801da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
17811da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
17821da177e4SLinus Torvalds  *
17831da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
17841da177e4SLinus Torvalds  */
17851da177e4SLinus Torvalds 
17861da177e4SLinus Torvalds /*
17871da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
17881da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
17891da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
17901da177e4SLinus Torvalds  * state inside lock_buffer().
17911da177e4SLinus Torvalds  *
17921da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
17931da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
17941da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
17951da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
17961da177e4SLinus Torvalds  * prevents this contention from occurring.
17976e34eeddSTheodore Ts'o  *
17986e34eeddSTheodore Ts'o  * If block_write_full_page() is called with wbc->sync_mode ==
179970fd7614SChristoph Hellwig  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1800721a9602SJens Axboe  * causes the writes to be flagged as synchronous writes.
18011da177e4SLinus Torvalds  */
180253418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio,
180335c80d5fSChris Mason 			get_block_t *get_block, struct writeback_control *wbc,
180435c80d5fSChris Mason 			bh_end_io_t *handler)
18051da177e4SLinus Torvalds {
18061da177e4SLinus Torvalds 	int err;
18071da177e4SLinus Torvalds 	sector_t block;
18081da177e4SLinus Torvalds 	sector_t last_block;
1809f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
181045bce8f3SLinus Torvalds 	unsigned int blocksize, bbits;
18111da177e4SLinus Torvalds 	int nr_underway = 0;
18123ae72869SBart Van Assche 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
18131da177e4SLinus Torvalds 
181453418a18SMatthew Wilcox (Oracle) 	head = folio_create_buffers(folio, inode,
18151da177e4SLinus Torvalds 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
18161da177e4SLinus Torvalds 
18171da177e4SLinus Torvalds 	/*
1818e621900aSMatthew Wilcox (Oracle) 	 * Be very careful.  We have no exclusion from block_dirty_folio
18191da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
18201da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
182153418a18SMatthew Wilcox (Oracle) 	 * then we just miss that fact, and the folio stays dirty.
18221da177e4SLinus Torvalds 	 *
1823e621900aSMatthew Wilcox (Oracle) 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
18241da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
18251da177e4SLinus Torvalds 	 */
18261da177e4SLinus Torvalds 
18271da177e4SLinus Torvalds 	bh = head;
182845bce8f3SLinus Torvalds 	blocksize = bh->b_size;
182945bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
183045bce8f3SLinus Torvalds 
183153418a18SMatthew Wilcox (Oracle) 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
183245bce8f3SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> bbits;
18331da177e4SLinus Torvalds 
18341da177e4SLinus Torvalds 	/*
18351da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
18361da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
18371da177e4SLinus Torvalds 	 */
18381da177e4SLinus Torvalds 	do {
18391da177e4SLinus Torvalds 		if (block > last_block) {
18401da177e4SLinus Torvalds 			/*
18411da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
184253418a18SMatthew Wilcox (Oracle) 			 * this folio can be outside i_size when there is a
18431da177e4SLinus Torvalds 			 * truncate in progress.
18441da177e4SLinus Torvalds 			 */
18451da177e4SLinus Torvalds 			/*
18461da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
18471da177e4SLinus Torvalds 			 */
18481da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18491da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
185029a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
185129a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1852b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18531da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18541da177e4SLinus Torvalds 			if (err)
18551da177e4SLinus Torvalds 				goto recover;
185629a814d2SAlex Tomas 			clear_buffer_delay(bh);
18571da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18581da177e4SLinus Torvalds 				/* blockdev mappings never come here */
18591da177e4SLinus Torvalds 				clear_buffer_new(bh);
1860e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
18611da177e4SLinus Torvalds 			}
18621da177e4SLinus Torvalds 		}
18631da177e4SLinus Torvalds 		bh = bh->b_this_page;
18641da177e4SLinus Torvalds 		block++;
18651da177e4SLinus Torvalds 	} while (bh != head);
18661da177e4SLinus Torvalds 
18671da177e4SLinus Torvalds 	do {
18681da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
18691da177e4SLinus Torvalds 			continue;
18701da177e4SLinus Torvalds 		/*
18711da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
187253418a18SMatthew Wilcox (Oracle) 		 * lock the buffer then redirty the folio.  Note that this can
18735b0830cbSJens Axboe 		 * potentially cause a busy-wait loop from writeback threads
18745b0830cbSJens Axboe 		 * and kswapd activity, but those code paths have their own
18755b0830cbSJens Axboe 		 * higher-level throttling.
18761da177e4SLinus Torvalds 		 */
18771b430beeSWu Fengguang 		if (wbc->sync_mode != WB_SYNC_NONE) {
18781da177e4SLinus Torvalds 			lock_buffer(bh);
1879ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
188053418a18SMatthew Wilcox (Oracle) 			folio_redirty_for_writepage(wbc, folio);
18811da177e4SLinus Torvalds 			continue;
18821da177e4SLinus Torvalds 		}
18831da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
188435c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
18851da177e4SLinus Torvalds 		} else {
18861da177e4SLinus Torvalds 			unlock_buffer(bh);
18871da177e4SLinus Torvalds 		}
18881da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18891da177e4SLinus Torvalds 
18901da177e4SLinus Torvalds 	/*
189153418a18SMatthew Wilcox (Oracle) 	 * The folio and its buffers are protected by the writeback flag,
189253418a18SMatthew Wilcox (Oracle) 	 * so we can drop the bh refcounts early.
18931da177e4SLinus Torvalds 	 */
189453418a18SMatthew Wilcox (Oracle) 	BUG_ON(folio_test_writeback(folio));
189553418a18SMatthew Wilcox (Oracle) 	folio_start_writeback(folio);
18961da177e4SLinus Torvalds 
18971da177e4SLinus Torvalds 	do {
18981da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18991da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
19001420c4a5SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
19011da177e4SLinus Torvalds 			nr_underway++;
1902ad576e63SNick Piggin 		}
19031da177e4SLinus Torvalds 		bh = next;
19041da177e4SLinus Torvalds 	} while (bh != head);
190553418a18SMatthew Wilcox (Oracle) 	folio_unlock(folio);
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds 	err = 0;
19081da177e4SLinus Torvalds done:
19091da177e4SLinus Torvalds 	if (nr_underway == 0) {
19101da177e4SLinus Torvalds 		/*
191153418a18SMatthew Wilcox (Oracle) 		 * The folio was marked dirty, but the buffers were
19121da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
191379f59784SZhang Yi 		 * write_dirty_buffer/submit_bh.  A rare case.
19141da177e4SLinus Torvalds 		 */
191553418a18SMatthew Wilcox (Oracle) 		folio_end_writeback(folio);
19163d67f2d7SNick Piggin 
19171da177e4SLinus Torvalds 		/*
191853418a18SMatthew Wilcox (Oracle) 		 * The folio and buffer_heads can be released at any time from
19191da177e4SLinus Torvalds 		 * here on.
19201da177e4SLinus Torvalds 		 */
19211da177e4SLinus Torvalds 	}
19221da177e4SLinus Torvalds 	return err;
19231da177e4SLinus Torvalds 
19241da177e4SLinus Torvalds recover:
19251da177e4SLinus Torvalds 	/*
19261da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
19271da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
19281da177e4SLinus Torvalds 	 * exposing stale data.
192953418a18SMatthew Wilcox (Oracle) 	 * The folio is currently locked and not marked for writeback
19301da177e4SLinus Torvalds 	 */
19311da177e4SLinus Torvalds 	bh = head;
19321da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
19331da177e4SLinus Torvalds 	do {
193429a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
193529a814d2SAlex Tomas 		    !buffer_delay(bh)) {
19361da177e4SLinus Torvalds 			lock_buffer(bh);
193735c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
19381da177e4SLinus Torvalds 		} else {
19391da177e4SLinus Torvalds 			/*
19401da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
194153418a18SMatthew Wilcox (Oracle) 			 * attachment to a dirty folio.
19421da177e4SLinus Torvalds 			 */
19431da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
19441da177e4SLinus Torvalds 		}
19451da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
194653418a18SMatthew Wilcox (Oracle) 	folio_set_error(folio);
194753418a18SMatthew Wilcox (Oracle) 	BUG_ON(folio_test_writeback(folio));
194853418a18SMatthew Wilcox (Oracle) 	mapping_set_error(folio->mapping, err);
194953418a18SMatthew Wilcox (Oracle) 	folio_start_writeback(folio);
19501da177e4SLinus Torvalds 	do {
19511da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
19521da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
19531da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
19541420c4a5SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
19551da177e4SLinus Torvalds 			nr_underway++;
1956ad576e63SNick Piggin 		}
19571da177e4SLinus Torvalds 		bh = next;
19581da177e4SLinus Torvalds 	} while (bh != head);
195953418a18SMatthew Wilcox (Oracle) 	folio_unlock(folio);
19601da177e4SLinus Torvalds 	goto done;
19611da177e4SLinus Torvalds }
196253418a18SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__block_write_full_folio);
19631da177e4SLinus Torvalds 
1964afddba49SNick Piggin /*
19654a9622f2SMatthew Wilcox (Oracle)  * If a folio has any new buffers, zero them out here, and mark them uptodate
1966afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1967afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1968afddba49SNick Piggin  */
19694a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1970afddba49SNick Piggin {
19714a9622f2SMatthew Wilcox (Oracle) 	size_t block_start, block_end;
1972afddba49SNick Piggin 	struct buffer_head *head, *bh;
1973afddba49SNick Piggin 
19744a9622f2SMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
19754a9622f2SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
19764a9622f2SMatthew Wilcox (Oracle) 	if (!head)
1977afddba49SNick Piggin 		return;
1978afddba49SNick Piggin 
19794a9622f2SMatthew Wilcox (Oracle) 	bh = head;
1980afddba49SNick Piggin 	block_start = 0;
1981afddba49SNick Piggin 	do {
1982afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1983afddba49SNick Piggin 
1984afddba49SNick Piggin 		if (buffer_new(bh)) {
1985afddba49SNick Piggin 			if (block_end > from && block_start < to) {
19864a9622f2SMatthew Wilcox (Oracle) 				if (!folio_test_uptodate(folio)) {
19874a9622f2SMatthew Wilcox (Oracle) 					size_t start, xend;
1988afddba49SNick Piggin 
1989afddba49SNick Piggin 					start = max(from, block_start);
19904a9622f2SMatthew Wilcox (Oracle) 					xend = min(to, block_end);
1991afddba49SNick Piggin 
19924a9622f2SMatthew Wilcox (Oracle) 					folio_zero_segment(folio, start, xend);
1993afddba49SNick Piggin 					set_buffer_uptodate(bh);
1994afddba49SNick Piggin 				}
1995afddba49SNick Piggin 
1996afddba49SNick Piggin 				clear_buffer_new(bh);
1997afddba49SNick Piggin 				mark_buffer_dirty(bh);
1998afddba49SNick Piggin 			}
1999afddba49SNick Piggin 		}
2000afddba49SNick Piggin 
2001afddba49SNick Piggin 		block_start = block_end;
2002afddba49SNick Piggin 		bh = bh->b_this_page;
2003afddba49SNick Piggin 	} while (bh != head);
2004afddba49SNick Piggin }
20054a9622f2SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_zero_new_buffers);
2006afddba49SNick Piggin 
20074aa8cdd5SChristoph Hellwig static int
2008ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
20096d49cc85SChristoph Hellwig 		const struct iomap *iomap)
2010ae259a9cSChristoph Hellwig {
2011*80844194SMatthew Wilcox (Oracle) 	loff_t offset = (loff_t)block << inode->i_blkbits;
2012ae259a9cSChristoph Hellwig 
2013ae259a9cSChristoph Hellwig 	bh->b_bdev = iomap->bdev;
2014ae259a9cSChristoph Hellwig 
2015ae259a9cSChristoph Hellwig 	/*
2016ae259a9cSChristoph Hellwig 	 * Block points to offset in file we need to map, iomap contains
2017ae259a9cSChristoph Hellwig 	 * the offset at which the map starts. If the map ends before the
2018ae259a9cSChristoph Hellwig 	 * current block, then do not map the buffer and let the caller
2019ae259a9cSChristoph Hellwig 	 * handle it.
2020ae259a9cSChristoph Hellwig 	 */
20214aa8cdd5SChristoph Hellwig 	if (offset >= iomap->offset + iomap->length)
20224aa8cdd5SChristoph Hellwig 		return -EIO;
2023ae259a9cSChristoph Hellwig 
2024ae259a9cSChristoph Hellwig 	switch (iomap->type) {
2025ae259a9cSChristoph Hellwig 	case IOMAP_HOLE:
2026ae259a9cSChristoph Hellwig 		/*
2027ae259a9cSChristoph Hellwig 		 * If the buffer is not up to date or beyond the current EOF,
2028ae259a9cSChristoph Hellwig 		 * we need to mark it as new to ensure sub-block zeroing is
2029ae259a9cSChristoph Hellwig 		 * executed if necessary.
2030ae259a9cSChristoph Hellwig 		 */
2031ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
2032ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
2033ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
20344aa8cdd5SChristoph Hellwig 		return 0;
2035ae259a9cSChristoph Hellwig 	case IOMAP_DELALLOC:
2036ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
2037ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
2038ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
2039ae259a9cSChristoph Hellwig 		set_buffer_uptodate(bh);
2040ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
2041ae259a9cSChristoph Hellwig 		set_buffer_delay(bh);
20424aa8cdd5SChristoph Hellwig 		return 0;
2043ae259a9cSChristoph Hellwig 	case IOMAP_UNWRITTEN:
2044ae259a9cSChristoph Hellwig 		/*
20453d7b6b21SAndreas Gruenbacher 		 * For unwritten regions, we always need to ensure that regions
20463d7b6b21SAndreas Gruenbacher 		 * in the block we are not writing to are zeroed. Mark the
20473d7b6b21SAndreas Gruenbacher 		 * buffer as new to ensure this.
2048ae259a9cSChristoph Hellwig 		 */
2049ae259a9cSChristoph Hellwig 		set_buffer_new(bh);
2050ae259a9cSChristoph Hellwig 		set_buffer_unwritten(bh);
2051df561f66SGustavo A. R. Silva 		fallthrough;
2052ae259a9cSChristoph Hellwig 	case IOMAP_MAPPED:
20533d7b6b21SAndreas Gruenbacher 		if ((iomap->flags & IOMAP_F_NEW) ||
2054381c0432SChristoph Hellwig 		    offset >= i_size_read(inode)) {
2055381c0432SChristoph Hellwig 			/*
2056381c0432SChristoph Hellwig 			 * This can happen if truncating the block device races
2057381c0432SChristoph Hellwig 			 * with the check in the caller as i_size updates on
2058381c0432SChristoph Hellwig 			 * block devices aren't synchronized by i_rwsem for
2059381c0432SChristoph Hellwig 			 * block devices.
2060381c0432SChristoph Hellwig 			 */
2061381c0432SChristoph Hellwig 			if (S_ISBLK(inode->i_mode))
2062381c0432SChristoph Hellwig 				return -EIO;
2063ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
2064381c0432SChristoph Hellwig 		}
206519fe5f64SAndreas Gruenbacher 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
206619fe5f64SAndreas Gruenbacher 				inode->i_blkbits;
2067ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
20684aa8cdd5SChristoph Hellwig 		return 0;
20694aa8cdd5SChristoph Hellwig 	default:
20704aa8cdd5SChristoph Hellwig 		WARN_ON_ONCE(1);
20714aa8cdd5SChristoph Hellwig 		return -EIO;
2072ae259a9cSChristoph Hellwig 	}
2073ae259a9cSChristoph Hellwig }
2074ae259a9cSChristoph Hellwig 
2075d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
20766d49cc85SChristoph Hellwig 		get_block_t *get_block, const struct iomap *iomap)
20771da177e4SLinus Torvalds {
207809cbfeafSKirill A. Shutemov 	unsigned from = pos & (PAGE_SIZE - 1);
2079ebdec241SChristoph Hellwig 	unsigned to = from + len;
2080d1bd0b4eSMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
20811da177e4SLinus Torvalds 	unsigned block_start, block_end;
20821da177e4SLinus Torvalds 	sector_t block;
20831da177e4SLinus Torvalds 	int err = 0;
20841da177e4SLinus Torvalds 	unsigned blocksize, bbits;
20851da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
20861da177e4SLinus Torvalds 
2087d1bd0b4eSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
208809cbfeafSKirill A. Shutemov 	BUG_ON(from > PAGE_SIZE);
208909cbfeafSKirill A. Shutemov 	BUG_ON(to > PAGE_SIZE);
20901da177e4SLinus Torvalds 	BUG_ON(from > to);
20911da177e4SLinus Torvalds 
2092c6c8c3e7SPankaj Raghav 	head = folio_create_buffers(folio, inode, 0);
209345bce8f3SLinus Torvalds 	blocksize = head->b_size;
209445bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
20951da177e4SLinus Torvalds 
2096d1bd0b4eSMatthew Wilcox (Oracle) 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
20971da177e4SLinus Torvalds 
20981da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
20991da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
21001da177e4SLinus Torvalds 		block_end = block_start + blocksize;
21011da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
2102d1bd0b4eSMatthew Wilcox (Oracle) 			if (folio_test_uptodate(folio)) {
21031da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
21041da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21051da177e4SLinus Torvalds 			}
21061da177e4SLinus Torvalds 			continue;
21071da177e4SLinus Torvalds 		}
21081da177e4SLinus Torvalds 		if (buffer_new(bh))
21091da177e4SLinus Torvalds 			clear_buffer_new(bh);
21101da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2111b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
21124aa8cdd5SChristoph Hellwig 			if (get_block)
21131da177e4SLinus Torvalds 				err = get_block(inode, block, bh, 1);
21144aa8cdd5SChristoph Hellwig 			else
21154aa8cdd5SChristoph Hellwig 				err = iomap_to_bh(inode, block, bh, iomap);
21161da177e4SLinus Torvalds 			if (err)
2117f3ddbdc6SNick Piggin 				break;
2118ae259a9cSChristoph Hellwig 
21191da177e4SLinus Torvalds 			if (buffer_new(bh)) {
2120e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
2121d1bd0b4eSMatthew Wilcox (Oracle) 				if (folio_test_uptodate(folio)) {
2122637aff46SNick Piggin 					clear_buffer_new(bh);
21231da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
2124637aff46SNick Piggin 					mark_buffer_dirty(bh);
21251da177e4SLinus Torvalds 					continue;
21261da177e4SLinus Torvalds 				}
2127eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
2128d1bd0b4eSMatthew Wilcox (Oracle) 					folio_zero_segments(folio,
2129eebd2aa3SChristoph Lameter 						to, block_end,
2130eebd2aa3SChristoph Lameter 						block_start, from);
21311da177e4SLinus Torvalds 				continue;
21321da177e4SLinus Torvalds 			}
21331da177e4SLinus Torvalds 		}
2134d1bd0b4eSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio)) {
21351da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
21361da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
21371da177e4SLinus Torvalds 			continue;
21381da177e4SLinus Torvalds 		}
21391da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
214033a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
21411da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
2142e7ea1129SZhang Yi 			bh_read_nowait(bh, 0);
21431da177e4SLinus Torvalds 			*wait_bh++=bh;
21441da177e4SLinus Torvalds 		}
21451da177e4SLinus Torvalds 	}
21461da177e4SLinus Torvalds 	/*
21471da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
21481da177e4SLinus Torvalds 	 */
21491da177e4SLinus Torvalds 	while(wait_bh > wait) {
21501da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
21511da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
2152f3ddbdc6SNick Piggin 			err = -EIO;
21531da177e4SLinus Torvalds 	}
2154f9f07b6cSJan Kara 	if (unlikely(err))
21554a9622f2SMatthew Wilcox (Oracle) 		folio_zero_new_buffers(folio, from, to);
21561da177e4SLinus Torvalds 	return err;
21571da177e4SLinus Torvalds }
2158ae259a9cSChristoph Hellwig 
2159ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2160ae259a9cSChristoph Hellwig 		get_block_t *get_block)
2161ae259a9cSChristoph Hellwig {
2162d1bd0b4eSMatthew Wilcox (Oracle) 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2163d1bd0b4eSMatthew Wilcox (Oracle) 				       NULL);
2164ae259a9cSChristoph Hellwig }
2165ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
21661da177e4SLinus Torvalds 
2167a524fcfeSBean Huo static void __block_commit_write(struct folio *folio, size_t from, size_t to)
21681da177e4SLinus Torvalds {
21698c6cb3e3SMatthew Wilcox (Oracle) 	size_t block_start, block_end;
21708c6cb3e3SMatthew Wilcox (Oracle) 	bool partial = false;
21711da177e4SLinus Torvalds 	unsigned blocksize;
21721da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
21731da177e4SLinus Torvalds 
21748c6cb3e3SMatthew Wilcox (Oracle) 	bh = head = folio_buffers(folio);
217545bce8f3SLinus Torvalds 	blocksize = bh->b_size;
21761da177e4SLinus Torvalds 
217745bce8f3SLinus Torvalds 	block_start = 0;
217845bce8f3SLinus Torvalds 	do {
21791da177e4SLinus Torvalds 		block_end = block_start + blocksize;
21801da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
21811da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
21828c6cb3e3SMatthew Wilcox (Oracle) 				partial = true;
21831da177e4SLinus Torvalds 		} else {
21841da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
21851da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
21861da177e4SLinus Torvalds 		}
21874ebd3aecSYang Guo 		if (buffer_new(bh))
2188afddba49SNick Piggin 			clear_buffer_new(bh);
218945bce8f3SLinus Torvalds 
219045bce8f3SLinus Torvalds 		block_start = block_end;
219145bce8f3SLinus Torvalds 		bh = bh->b_this_page;
219245bce8f3SLinus Torvalds 	} while (bh != head);
21931da177e4SLinus Torvalds 
21941da177e4SLinus Torvalds 	/*
21951da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
21962c69e205SMatthew Wilcox (Oracle) 	 * uptodate then we can optimize away a bogus read_folio() for
21978c6cb3e3SMatthew Wilcox (Oracle) 	 * the next read(). Here we 'discover' whether the folio went
21981da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
21991da177e4SLinus Torvalds 	 */
22001da177e4SLinus Torvalds 	if (!partial)
22018c6cb3e3SMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
22021da177e4SLinus Torvalds }
22031da177e4SLinus Torvalds 
22041da177e4SLinus Torvalds /*
2205155130a4SChristoph Hellwig  * block_write_begin takes care of the basic task of block allocation and
2206155130a4SChristoph Hellwig  * bringing partial write blocks uptodate first.
2207155130a4SChristoph Hellwig  *
22087bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
2209afddba49SNick Piggin  */
2210155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2211b3992d1eSMatthew Wilcox (Oracle) 		struct page **pagep, get_block_t *get_block)
2212afddba49SNick Piggin {
221309cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
2214afddba49SNick Piggin 	struct page *page;
22156e1db88dSChristoph Hellwig 	int status;
2216afddba49SNick Piggin 
2217b7446e7cSMatthew Wilcox (Oracle) 	page = grab_cache_page_write_begin(mapping, index);
22186e1db88dSChristoph Hellwig 	if (!page)
22196e1db88dSChristoph Hellwig 		return -ENOMEM;
2220afddba49SNick Piggin 
22216e1db88dSChristoph Hellwig 	status = __block_write_begin(page, pos, len, get_block);
2222afddba49SNick Piggin 	if (unlikely(status)) {
2223afddba49SNick Piggin 		unlock_page(page);
222409cbfeafSKirill A. Shutemov 		put_page(page);
22256e1db88dSChristoph Hellwig 		page = NULL;
2226afddba49SNick Piggin 	}
2227afddba49SNick Piggin 
22286e1db88dSChristoph Hellwig 	*pagep = page;
2229afddba49SNick Piggin 	return status;
2230afddba49SNick Piggin }
2231afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2232afddba49SNick Piggin 
2233afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2234afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2235afddba49SNick Piggin 			struct page *page, void *fsdata)
2236afddba49SNick Piggin {
22378c6cb3e3SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
22388c6cb3e3SMatthew Wilcox (Oracle) 	size_t start = pos - folio_pos(folio);
2239afddba49SNick Piggin 
2240afddba49SNick Piggin 	if (unlikely(copied < len)) {
2241afddba49SNick Piggin 		/*
22422c69e205SMatthew Wilcox (Oracle) 		 * The buffers that were written will now be uptodate, so
22432c69e205SMatthew Wilcox (Oracle) 		 * we don't have to worry about a read_folio reading them
22442c69e205SMatthew Wilcox (Oracle) 		 * and overwriting a partial write. However if we have
22452c69e205SMatthew Wilcox (Oracle) 		 * encountered a short write and only partially written
22462c69e205SMatthew Wilcox (Oracle) 		 * into a buffer, it will not be marked uptodate, so a
22472c69e205SMatthew Wilcox (Oracle) 		 * read_folio might come in and destroy our partial write.
2248afddba49SNick Piggin 		 *
2249afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
22508c6cb3e3SMatthew Wilcox (Oracle) 		 * non uptodate folio as a zero-length write, and force the
2251afddba49SNick Piggin 		 * caller to redo the whole thing.
2252afddba49SNick Piggin 		 */
22538c6cb3e3SMatthew Wilcox (Oracle) 		if (!folio_test_uptodate(folio))
2254afddba49SNick Piggin 			copied = 0;
2255afddba49SNick Piggin 
22564a9622f2SMatthew Wilcox (Oracle) 		folio_zero_new_buffers(folio, start+copied, start+len);
2257afddba49SNick Piggin 	}
22588c6cb3e3SMatthew Wilcox (Oracle) 	flush_dcache_folio(folio);
2259afddba49SNick Piggin 
2260afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2261489b7e72SBean Huo 	__block_commit_write(folio, start, start + copied);
2262afddba49SNick Piggin 
2263afddba49SNick Piggin 	return copied;
2264afddba49SNick Piggin }
2265afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2266afddba49SNick Piggin 
2267afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2268afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2269afddba49SNick Piggin 			struct page *page, void *fsdata)
2270afddba49SNick Piggin {
22718af54f29SChristoph Hellwig 	struct inode *inode = mapping->host;
22728af54f29SChristoph Hellwig 	loff_t old_size = inode->i_size;
22738af54f29SChristoph Hellwig 	bool i_size_changed = false;
22748af54f29SChristoph Hellwig 
2275afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
22768af54f29SChristoph Hellwig 
22778af54f29SChristoph Hellwig 	/*
22788af54f29SChristoph Hellwig 	 * No need to use i_size_read() here, the i_size cannot change under us
22798af54f29SChristoph Hellwig 	 * because we hold i_rwsem.
22808af54f29SChristoph Hellwig 	 *
22818af54f29SChristoph Hellwig 	 * But it's important to update i_size while still holding page lock:
22828af54f29SChristoph Hellwig 	 * page writeout could otherwise come in and zero beyond i_size.
22838af54f29SChristoph Hellwig 	 */
22848af54f29SChristoph Hellwig 	if (pos + copied > inode->i_size) {
22858af54f29SChristoph Hellwig 		i_size_write(inode, pos + copied);
22868af54f29SChristoph Hellwig 		i_size_changed = true;
22878af54f29SChristoph Hellwig 	}
22888af54f29SChristoph Hellwig 
22898af54f29SChristoph Hellwig 	unlock_page(page);
22907a77dad7SAndreas Gruenbacher 	put_page(page);
22918af54f29SChristoph Hellwig 
22928af54f29SChristoph Hellwig 	if (old_size < pos)
22938af54f29SChristoph Hellwig 		pagecache_isize_extended(inode, old_size, pos);
22948af54f29SChristoph Hellwig 	/*
22958af54f29SChristoph Hellwig 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
22968af54f29SChristoph Hellwig 	 * makes the holding time of page lock longer. Second, it forces lock
22978af54f29SChristoph Hellwig 	 * ordering of page lock and transaction start for journaling
22988af54f29SChristoph Hellwig 	 * filesystems.
22998af54f29SChristoph Hellwig 	 */
23008af54f29SChristoph Hellwig 	if (i_size_changed)
23018af54f29SChristoph Hellwig 		mark_inode_dirty(inode);
230226ddb1f4SAndreas Gruenbacher 	return copied;
2303afddba49SNick Piggin }
2304afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2305afddba49SNick Piggin 
2306afddba49SNick Piggin /*
23072e7e80f7SMatthew Wilcox (Oracle)  * block_is_partially_uptodate checks whether buffers within a folio are
23088ab22b9aSHisashi Hifumi  * uptodate or not.
23098ab22b9aSHisashi Hifumi  *
23102e7e80f7SMatthew Wilcox (Oracle)  * Returns true if all buffers which correspond to the specified part
23112e7e80f7SMatthew Wilcox (Oracle)  * of the folio are uptodate.
23128ab22b9aSHisashi Hifumi  */
23132e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
23148ab22b9aSHisashi Hifumi {
23158ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
23168ab22b9aSHisashi Hifumi 	unsigned to;
23178ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
23182e7e80f7SMatthew Wilcox (Oracle) 	bool ret = true;
23198ab22b9aSHisashi Hifumi 
23202e7e80f7SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
23212e7e80f7SMatthew Wilcox (Oracle) 	if (!head)
23222e7e80f7SMatthew Wilcox (Oracle) 		return false;
232345bce8f3SLinus Torvalds 	blocksize = head->b_size;
23242e7e80f7SMatthew Wilcox (Oracle) 	to = min_t(unsigned, folio_size(folio) - from, count);
23258ab22b9aSHisashi Hifumi 	to = from + to;
23262e7e80f7SMatthew Wilcox (Oracle) 	if (from < blocksize && to > folio_size(folio) - blocksize)
23272e7e80f7SMatthew Wilcox (Oracle) 		return false;
23288ab22b9aSHisashi Hifumi 
23298ab22b9aSHisashi Hifumi 	bh = head;
23308ab22b9aSHisashi Hifumi 	block_start = 0;
23318ab22b9aSHisashi Hifumi 	do {
23328ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
23338ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
23348ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
23352e7e80f7SMatthew Wilcox (Oracle) 				ret = false;
23368ab22b9aSHisashi Hifumi 				break;
23378ab22b9aSHisashi Hifumi 			}
23388ab22b9aSHisashi Hifumi 			if (block_end >= to)
23398ab22b9aSHisashi Hifumi 				break;
23408ab22b9aSHisashi Hifumi 		}
23418ab22b9aSHisashi Hifumi 		block_start = block_end;
23428ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
23438ab22b9aSHisashi Hifumi 	} while (bh != head);
23448ab22b9aSHisashi Hifumi 
23458ab22b9aSHisashi Hifumi 	return ret;
23468ab22b9aSHisashi Hifumi }
23478ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
23488ab22b9aSHisashi Hifumi 
23498ab22b9aSHisashi Hifumi /*
23502c69e205SMatthew Wilcox (Oracle)  * Generic "read_folio" function for block devices that have the normal
23511da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
23522c69e205SMatthew Wilcox (Oracle)  * Reads the folio asynchronously --- the unlock_buffer() and
23531da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
23542c69e205SMatthew Wilcox (Oracle)  * folio once IO has completed.
23551da177e4SLinus Torvalds  */
23562c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23571da177e4SLinus Torvalds {
23582c69e205SMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
23591da177e4SLinus Torvalds 	sector_t iblock, lblock;
23601da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
236145bce8f3SLinus Torvalds 	unsigned int blocksize, bbits;
23621da177e4SLinus Torvalds 	int nr, i;
23631da177e4SLinus Torvalds 	int fully_mapped = 1;
2364b7a6eb22SMatthew Wilcox (Oracle) 	bool page_error = false;
23654fa512ceSEric Biggers 	loff_t limit = i_size_read(inode);
23664fa512ceSEric Biggers 
23674fa512ceSEric Biggers 	/* This is needed for ext4. */
23684fa512ceSEric Biggers 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
23694fa512ceSEric Biggers 		limit = inode->i_sb->s_maxbytes;
23701da177e4SLinus Torvalds 
23712c69e205SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
23722c69e205SMatthew Wilcox (Oracle) 
2373c6c8c3e7SPankaj Raghav 	head = folio_create_buffers(folio, inode, 0);
237445bce8f3SLinus Torvalds 	blocksize = head->b_size;
237545bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
23761da177e4SLinus Torvalds 
23772c69e205SMatthew Wilcox (Oracle) 	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
23784fa512ceSEric Biggers 	lblock = (limit+blocksize-1) >> bbits;
23791da177e4SLinus Torvalds 	bh = head;
23801da177e4SLinus Torvalds 	nr = 0;
23811da177e4SLinus Torvalds 	i = 0;
23821da177e4SLinus Torvalds 
23831da177e4SLinus Torvalds 	do {
23841da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
23851da177e4SLinus Torvalds 			continue;
23861da177e4SLinus Torvalds 
23871da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2388c64610baSAndrew Morton 			int err = 0;
2389c64610baSAndrew Morton 
23901da177e4SLinus Torvalds 			fully_mapped = 0;
23911da177e4SLinus Torvalds 			if (iblock < lblock) {
2392b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2393c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2394b7a6eb22SMatthew Wilcox (Oracle) 				if (err) {
23952c69e205SMatthew Wilcox (Oracle) 					folio_set_error(folio);
2396b7a6eb22SMatthew Wilcox (Oracle) 					page_error = true;
2397b7a6eb22SMatthew Wilcox (Oracle) 				}
23981da177e4SLinus Torvalds 			}
23991da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
24002c69e205SMatthew Wilcox (Oracle) 				folio_zero_range(folio, i * blocksize,
24012c69e205SMatthew Wilcox (Oracle) 						blocksize);
2402c64610baSAndrew Morton 				if (!err)
24031da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
24041da177e4SLinus Torvalds 				continue;
24051da177e4SLinus Torvalds 			}
24061da177e4SLinus Torvalds 			/*
24071da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
24081da177e4SLinus Torvalds 			 * synchronously
24091da177e4SLinus Torvalds 			 */
24101da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
24111da177e4SLinus Torvalds 				continue;
24121da177e4SLinus Torvalds 		}
24131da177e4SLinus Torvalds 		arr[nr++] = bh;
24141da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
24151da177e4SLinus Torvalds 
24161da177e4SLinus Torvalds 	if (fully_mapped)
24172c69e205SMatthew Wilcox (Oracle) 		folio_set_mappedtodisk(folio);
24181da177e4SLinus Torvalds 
24191da177e4SLinus Torvalds 	if (!nr) {
24201da177e4SLinus Torvalds 		/*
24216ba924d3SMatthew Wilcox (Oracle) 		 * All buffers are uptodate or get_block() returned an
24226ba924d3SMatthew Wilcox (Oracle) 		 * error when trying to map them - we can finish the read.
24231da177e4SLinus Torvalds 		 */
24246ba924d3SMatthew Wilcox (Oracle) 		folio_end_read(folio, !page_error);
24251da177e4SLinus Torvalds 		return 0;
24261da177e4SLinus Torvalds 	}
24271da177e4SLinus Torvalds 
24281da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
24291da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
24301da177e4SLinus Torvalds 		bh = arr[i];
24311da177e4SLinus Torvalds 		lock_buffer(bh);
24321da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
24331da177e4SLinus Torvalds 	}
24341da177e4SLinus Torvalds 
24351da177e4SLinus Torvalds 	/*
24361da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
24371da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
24381da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
24391da177e4SLinus Torvalds 	 */
24401da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
24411da177e4SLinus Torvalds 		bh = arr[i];
24421da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
24431da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
24441da177e4SLinus Torvalds 		else
24451420c4a5SBart Van Assche 			submit_bh(REQ_OP_READ, bh);
24461da177e4SLinus Torvalds 	}
24471da177e4SLinus Torvalds 	return 0;
24481da177e4SLinus Torvalds }
24492c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
24501da177e4SLinus Torvalds 
24511da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
245289e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
24531da177e4SLinus Torvalds  * deal with the hole.
24541da177e4SLinus Torvalds  */
245589e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
24561da177e4SLinus Torvalds {
24571da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
245853b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
24591da177e4SLinus Torvalds 	struct page *page;
24601468c6f4SAlexander Potapenko 	void *fsdata = NULL;
24611da177e4SLinus Torvalds 	int err;
24621da177e4SLinus Torvalds 
2463c08d3b0eSnpiggin@suse.de 	err = inode_newsize_ok(inode, size);
2464c08d3b0eSnpiggin@suse.de 	if (err)
24651da177e4SLinus Torvalds 		goto out;
24661da177e4SLinus Torvalds 
246753b524b8SMatthew Wilcox (Oracle) 	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
246889e10787SNick Piggin 	if (err)
246905eb0b51SOGAWA Hirofumi 		goto out;
247005eb0b51SOGAWA Hirofumi 
247153b524b8SMatthew Wilcox (Oracle) 	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
247289e10787SNick Piggin 	BUG_ON(err > 0);
247305eb0b51SOGAWA Hirofumi 
247405eb0b51SOGAWA Hirofumi out:
247505eb0b51SOGAWA Hirofumi 	return err;
247605eb0b51SOGAWA Hirofumi }
24771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
247805eb0b51SOGAWA Hirofumi 
2479f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
248089e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
248105eb0b51SOGAWA Hirofumi {
248289e10787SNick Piggin 	struct inode *inode = mapping->host;
248353b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
248493407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
248589e10787SNick Piggin 	struct page *page;
24861468c6f4SAlexander Potapenko 	void *fsdata = NULL;
248789e10787SNick Piggin 	pgoff_t index, curidx;
248889e10787SNick Piggin 	loff_t curpos;
248989e10787SNick Piggin 	unsigned zerofrom, offset, len;
249089e10787SNick Piggin 	int err = 0;
249105eb0b51SOGAWA Hirofumi 
249209cbfeafSKirill A. Shutemov 	index = pos >> PAGE_SHIFT;
249309cbfeafSKirill A. Shutemov 	offset = pos & ~PAGE_MASK;
249489e10787SNick Piggin 
249509cbfeafSKirill A. Shutemov 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
249609cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
249789e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
249889e10787SNick Piggin 			*bytes |= (blocksize-1);
249989e10787SNick Piggin 			(*bytes)++;
250089e10787SNick Piggin 		}
250109cbfeafSKirill A. Shutemov 		len = PAGE_SIZE - zerofrom;
250289e10787SNick Piggin 
250353b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
250489e10787SNick Piggin 					    &page, &fsdata);
250589e10787SNick Piggin 		if (err)
250689e10787SNick Piggin 			goto out;
2507eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
250853b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
250989e10787SNick Piggin 						page, fsdata);
251089e10787SNick Piggin 		if (err < 0)
251189e10787SNick Piggin 			goto out;
251289e10787SNick Piggin 		BUG_ON(err != len);
251389e10787SNick Piggin 		err = 0;
2514061e9746SOGAWA Hirofumi 
2515061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
2516c2ca0fcdSMikulas Patocka 
251708d405c8SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
2518c2ca0fcdSMikulas Patocka 			err = -EINTR;
2519c2ca0fcdSMikulas Patocka 			goto out;
2520c2ca0fcdSMikulas Patocka 		}
252189e10787SNick Piggin 	}
252289e10787SNick Piggin 
252389e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
252489e10787SNick Piggin 	if (index == curidx) {
252509cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
252689e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
252789e10787SNick Piggin 		if (offset <= zerofrom) {
252889e10787SNick Piggin 			goto out;
252989e10787SNick Piggin 		}
253089e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
253189e10787SNick Piggin 			*bytes |= (blocksize-1);
253289e10787SNick Piggin 			(*bytes)++;
253389e10787SNick Piggin 		}
253489e10787SNick Piggin 		len = offset - zerofrom;
253589e10787SNick Piggin 
253653b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
253789e10787SNick Piggin 					    &page, &fsdata);
253889e10787SNick Piggin 		if (err)
253989e10787SNick Piggin 			goto out;
2540eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
254153b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
254289e10787SNick Piggin 						page, fsdata);
254389e10787SNick Piggin 		if (err < 0)
254489e10787SNick Piggin 			goto out;
254589e10787SNick Piggin 		BUG_ON(err != len);
254689e10787SNick Piggin 		err = 0;
254789e10787SNick Piggin 	}
254889e10787SNick Piggin out:
254989e10787SNick Piggin 	return err;
25501da177e4SLinus Torvalds }
25511da177e4SLinus Torvalds 
25521da177e4SLinus Torvalds /*
25531da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
25541da177e4SLinus Torvalds  * We may have to extend the file.
25551da177e4SLinus Torvalds  */
2556282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2557be3bbbc5SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
255889e10787SNick Piggin 			struct page **pagep, void **fsdata,
255989e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
25601da177e4SLinus Torvalds {
25611da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
256293407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
256393407472SFabian Frederick 	unsigned int zerofrom;
256489e10787SNick Piggin 	int err;
25651da177e4SLinus Torvalds 
256689e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
256789e10787SNick Piggin 	if (err)
2568155130a4SChristoph Hellwig 		return err;
25691da177e4SLinus Torvalds 
257009cbfeafSKirill A. Shutemov 	zerofrom = *bytes & ~PAGE_MASK;
257189e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
25721da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
25731da177e4SLinus Torvalds 		(*bytes)++;
25741da177e4SLinus Torvalds 	}
25751da177e4SLinus Torvalds 
2576b3992d1eSMatthew Wilcox (Oracle) 	return block_write_begin(mapping, pos, len, pagep, get_block);
25771da177e4SLinus Torvalds }
25781fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
25791da177e4SLinus Torvalds 
2580a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned from, unsigned to)
25811da177e4SLinus Torvalds {
25828c6cb3e3SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
2583489b7e72SBean Huo 	__block_commit_write(folio, from, to);
25841da177e4SLinus Torvalds }
25851fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
25861da177e4SLinus Torvalds 
258754171690SDavid Chinner /*
258854171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
258954171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
259054171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
259154171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
259254171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
259354171690SDavid Chinner  * support these features.
259454171690SDavid Chinner  *
259554171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
259654171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
25977bb46a67Snpiggin@suse.de  * truncate writes the inode size before removing pages, once we have the
259854171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
259954171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
260054171690SDavid Chinner  * unlock the page.
2601ea13a864SJan Kara  *
260214da9200SJan Kara  * Direct callers of this function should protect against filesystem freezing
26035c500029SRoss Zwisler  * using sb_start_pagefault() - sb_end_pagefault() functions.
260454171690SDavid Chinner  */
26055c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
260654171690SDavid Chinner 			 get_block_t get_block)
260754171690SDavid Chinner {
2608fe181377SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(vmf->page);
2609496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
261054171690SDavid Chinner 	unsigned long end;
261154171690SDavid Chinner 	loff_t size;
261224da4fabSJan Kara 	int ret;
261354171690SDavid Chinner 
2614fe181377SMatthew Wilcox (Oracle) 	folio_lock(folio);
261554171690SDavid Chinner 	size = i_size_read(inode);
2616fe181377SMatthew Wilcox (Oracle) 	if ((folio->mapping != inode->i_mapping) ||
2617fe181377SMatthew Wilcox (Oracle) 	    (folio_pos(folio) >= size)) {
261824da4fabSJan Kara 		/* We overload EFAULT to mean page got truncated */
261924da4fabSJan Kara 		ret = -EFAULT;
262024da4fabSJan Kara 		goto out_unlock;
262154171690SDavid Chinner 	}
262254171690SDavid Chinner 
2623fe181377SMatthew Wilcox (Oracle) 	end = folio_size(folio);
2624fe181377SMatthew Wilcox (Oracle) 	/* folio is wholly or partially inside EOF */
2625fe181377SMatthew Wilcox (Oracle) 	if (folio_pos(folio) + end > size)
2626fe181377SMatthew Wilcox (Oracle) 		end = size - folio_pos(folio);
262754171690SDavid Chinner 
2628fe181377SMatthew Wilcox (Oracle) 	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2629a524fcfeSBean Huo 	if (unlikely(ret))
263024da4fabSJan Kara 		goto out_unlock;
2631a524fcfeSBean Huo 
2632a524fcfeSBean Huo 	__block_commit_write(folio, 0, end);
2633a524fcfeSBean Huo 
2634fe181377SMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
2635fe181377SMatthew Wilcox (Oracle) 	folio_wait_stable(folio);
263624da4fabSJan Kara 	return 0;
263724da4fabSJan Kara out_unlock:
2638fe181377SMatthew Wilcox (Oracle) 	folio_unlock(folio);
263954171690SDavid Chinner 	return ret;
264054171690SDavid Chinner }
26411fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
26421da177e4SLinus Torvalds 
26431da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
26441da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
26451da177e4SLinus Torvalds {
264609cbfeafSKirill A. Shutemov 	pgoff_t index = from >> PAGE_SHIFT;
26471da177e4SLinus Torvalds 	unsigned blocksize;
264854b21a79SAndrew Morton 	sector_t iblock;
26496d68f644SMatthew Wilcox (Oracle) 	size_t offset, length, pos;
26501da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26516d68f644SMatthew Wilcox (Oracle) 	struct folio *folio;
26521da177e4SLinus Torvalds 	struct buffer_head *bh;
2653dc7cb2d2SJiapeng Chong 	int err = 0;
26541da177e4SLinus Torvalds 
265593407472SFabian Frederick 	blocksize = i_blocksize(inode);
26566d68f644SMatthew Wilcox (Oracle) 	length = from & (blocksize - 1);
26571da177e4SLinus Torvalds 
26581da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
26591da177e4SLinus Torvalds 	if (!length)
26601da177e4SLinus Torvalds 		return 0;
26611da177e4SLinus Torvalds 
26621da177e4SLinus Torvalds 	length = blocksize - length;
266309cbfeafSKirill A. Shutemov 	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
26641da177e4SLinus Torvalds 
26656d68f644SMatthew Wilcox (Oracle) 	folio = filemap_grab_folio(mapping, index);
26666d68f644SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
26676d68f644SMatthew Wilcox (Oracle) 		return PTR_ERR(folio);
26681da177e4SLinus Torvalds 
26696d68f644SMatthew Wilcox (Oracle) 	bh = folio_buffers(folio);
26703decb856SMatthew Wilcox (Oracle) 	if (!bh)
26710a88810dSMatthew Wilcox (Oracle) 		bh = create_empty_buffers(folio, blocksize, 0);
26721da177e4SLinus Torvalds 
26731da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
26746d68f644SMatthew Wilcox (Oracle) 	offset = offset_in_folio(folio, from);
26751da177e4SLinus Torvalds 	pos = blocksize;
26761da177e4SLinus Torvalds 	while (offset >= pos) {
26771da177e4SLinus Torvalds 		bh = bh->b_this_page;
26781da177e4SLinus Torvalds 		iblock++;
26791da177e4SLinus Torvalds 		pos += blocksize;
26801da177e4SLinus Torvalds 	}
26811da177e4SLinus Torvalds 
26821da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2683b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
26841da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
26851da177e4SLinus Torvalds 		if (err)
26861da177e4SLinus Torvalds 			goto unlock;
26871da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
26881da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
26891da177e4SLinus Torvalds 			goto unlock;
26901da177e4SLinus Torvalds 	}
26911da177e4SLinus Torvalds 
26921da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
26936d68f644SMatthew Wilcox (Oracle) 	if (folio_test_uptodate(folio))
26941da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
26951da177e4SLinus Torvalds 
269633a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2697e7ea1129SZhang Yi 		err = bh_read(bh, 0);
26981da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
2699e7ea1129SZhang Yi 		if (err < 0)
27001da177e4SLinus Torvalds 			goto unlock;
27011da177e4SLinus Torvalds 	}
27021da177e4SLinus Torvalds 
27036d68f644SMatthew Wilcox (Oracle) 	folio_zero_range(folio, offset, length);
27041da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
27051da177e4SLinus Torvalds 
27061da177e4SLinus Torvalds unlock:
27076d68f644SMatthew Wilcox (Oracle) 	folio_unlock(folio);
27086d68f644SMatthew Wilcox (Oracle) 	folio_put(folio);
2709dc7cb2d2SJiapeng Chong 
27101da177e4SLinus Torvalds 	return err;
27111da177e4SLinus Torvalds }
27121fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
27131da177e4SLinus Torvalds 
27141da177e4SLinus Torvalds /*
27151da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
27161da177e4SLinus Torvalds  */
27171b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block,
27181b938c08SMatthew Wilcox 			struct writeback_control *wbc)
27191da177e4SLinus Torvalds {
272053418a18SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
2721bb0ea598SMatthew Wilcox (Oracle) 	struct inode * const inode = folio->mapping->host;
27221da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27231da177e4SLinus Torvalds 
2724bb0ea598SMatthew Wilcox (Oracle) 	/* Is the folio fully inside i_size? */
2725bb0ea598SMatthew Wilcox (Oracle) 	if (folio_pos(folio) + folio_size(folio) <= i_size)
272653418a18SMatthew Wilcox (Oracle) 		return __block_write_full_folio(inode, folio, get_block, wbc,
27271b938c08SMatthew Wilcox 					       end_buffer_async_write);
27281da177e4SLinus Torvalds 
2729bb0ea598SMatthew Wilcox (Oracle) 	/* Is the folio fully outside i_size? (truncate in progress) */
2730bb0ea598SMatthew Wilcox (Oracle) 	if (folio_pos(folio) >= i_size) {
273153418a18SMatthew Wilcox (Oracle) 		folio_unlock(folio);
27321da177e4SLinus Torvalds 		return 0; /* don't care */
27331da177e4SLinus Torvalds 	}
27341da177e4SLinus Torvalds 
27351da177e4SLinus Torvalds 	/*
2736bb0ea598SMatthew Wilcox (Oracle) 	 * The folio straddles i_size.  It must be zeroed out on each and every
27372a61aa40SAdam Buchbinder 	 * writepage invocation because it may be mmapped.  "A file is mapped
27381da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
27391da177e4SLinus Torvalds 	 * the page size, the remaining memory is zeroed when mapped, and
27401da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
27411da177e4SLinus Torvalds 	 */
2742bb0ea598SMatthew Wilcox (Oracle) 	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2743bb0ea598SMatthew Wilcox (Oracle) 			folio_size(folio));
274453418a18SMatthew Wilcox (Oracle) 	return __block_write_full_folio(inode, folio, get_block, wbc,
274535c80d5fSChris Mason 			end_buffer_async_write);
274635c80d5fSChris Mason }
27471fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page);
274835c80d5fSChris Mason 
27491da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
27501da177e4SLinus Torvalds 			    get_block_t *get_block)
27511da177e4SLinus Torvalds {
27521da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27532a527d68SAlexander Potapenko 	struct buffer_head tmp = {
27542a527d68SAlexander Potapenko 		.b_size = i_blocksize(inode),
27552a527d68SAlexander Potapenko 	};
27562a527d68SAlexander Potapenko 
27571da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
27581da177e4SLinus Torvalds 	return tmp.b_blocknr;
27591da177e4SLinus Torvalds }
27601fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
27611da177e4SLinus Torvalds 
27624246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
27631da177e4SLinus Torvalds {
27641da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
27651da177e4SLinus Torvalds 
2766b7c44ed9SJens Axboe 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
276708bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
276808bafc03SKeith Mannthey 
27694e4cbee9SChristoph Hellwig 	bh->b_end_io(bh, !bio->bi_status);
27701da177e4SLinus Torvalds 	bio_put(bio);
27711da177e4SLinus Torvalds }
27721da177e4SLinus Torvalds 
27735bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
27741420c4a5SBart Van Assche 			  struct writeback_control *wbc)
27751da177e4SLinus Torvalds {
27761420c4a5SBart Van Assche 	const enum req_op op = opf & REQ_OP_MASK;
27771da177e4SLinus Torvalds 	struct bio *bio;
27781da177e4SLinus Torvalds 
27791da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
27801da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
27811da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
27828fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_delay(bh));
27838fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_unwritten(bh));
27841da177e4SLinus Torvalds 
278548fd4f93SJens Axboe 	/*
278648fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
27871da177e4SLinus Torvalds 	 */
27882a222ca9SMike Christie 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
27891da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
27901da177e4SLinus Torvalds 
279107888c66SChristoph Hellwig 	if (buffer_meta(bh))
27921420c4a5SBart Van Assche 		opf |= REQ_META;
279307888c66SChristoph Hellwig 	if (buffer_prio(bh))
27941420c4a5SBart Van Assche 		opf |= REQ_PRIO;
279507888c66SChristoph Hellwig 
27961420c4a5SBart Van Assche 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
27971da177e4SLinus Torvalds 
27984f74d15fSEric Biggers 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
27994f74d15fSEric Biggers 
28004f024f37SKent Overstreet 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
28011da177e4SLinus Torvalds 
2802741af75dSJohannes Thumshirn 	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
28031da177e4SLinus Torvalds 
28041da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
28051da177e4SLinus Torvalds 	bio->bi_private = bh;
28061da177e4SLinus Torvalds 
280783c9c547SMing Lei 	/* Take care of bh's that straddle the end of the device */
280883c9c547SMing Lei 	guard_bio_eod(bio);
280983c9c547SMing Lei 
2810fd42df30SDennis Zhou 	if (wbc) {
2811fd42df30SDennis Zhou 		wbc_init_bio(wbc, bio);
281234e51a5eSTejun Heo 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2813fd42df30SDennis Zhou 	}
2814fd42df30SDennis Zhou 
28154e49ea4aSMike Christie 	submit_bio(bio);
28161da177e4SLinus Torvalds }
2817bafc0dbaSTejun Heo 
28185bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh)
281971368511SDarrick J. Wong {
28205bdf402aSRitesh Harjani (IBM) 	submit_bh_wbc(opf, bh, NULL);
282171368511SDarrick J. Wong }
28221fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
28231da177e4SLinus Torvalds 
28243ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28259cb569d6SChristoph Hellwig {
28269cb569d6SChristoph Hellwig 	lock_buffer(bh);
28279cb569d6SChristoph Hellwig 	if (!test_clear_buffer_dirty(bh)) {
28289cb569d6SChristoph Hellwig 		unlock_buffer(bh);
28299cb569d6SChristoph Hellwig 		return;
28309cb569d6SChristoph Hellwig 	}
28319cb569d6SChristoph Hellwig 	bh->b_end_io = end_buffer_write_sync;
28329cb569d6SChristoph Hellwig 	get_bh(bh);
28331420c4a5SBart Van Assche 	submit_bh(REQ_OP_WRITE | op_flags, bh);
28349cb569d6SChristoph Hellwig }
28359cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
28369cb569d6SChristoph Hellwig 
28371da177e4SLinus Torvalds /*
28381da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
28391da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
28401da177e4SLinus Torvalds  * the buffer_head.
28411da177e4SLinus Torvalds  */
28423ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28431da177e4SLinus Torvalds {
28441da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
28451da177e4SLinus Torvalds 	lock_buffer(bh);
28461da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
2847377254b2SXianting Tian 		/*
2848377254b2SXianting Tian 		 * The bh should be mapped, but it might not be if the
2849377254b2SXianting Tian 		 * device was hot-removed. Not much we can do but fail the I/O.
2850377254b2SXianting Tian 		 */
2851377254b2SXianting Tian 		if (!buffer_mapped(bh)) {
2852377254b2SXianting Tian 			unlock_buffer(bh);
2853377254b2SXianting Tian 			return -EIO;
2854377254b2SXianting Tian 		}
2855377254b2SXianting Tian 
28561da177e4SLinus Torvalds 		get_bh(bh);
28571da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
2858ab620620SRitesh Harjani (IBM) 		submit_bh(REQ_OP_WRITE | op_flags, bh);
28591da177e4SLinus Torvalds 		wait_on_buffer(bh);
2860ab620620SRitesh Harjani (IBM) 		if (!buffer_uptodate(bh))
2861ab620620SRitesh Harjani (IBM) 			return -EIO;
28621da177e4SLinus Torvalds 	} else {
28631da177e4SLinus Torvalds 		unlock_buffer(bh);
28641da177e4SLinus Torvalds 	}
2865ab620620SRitesh Harjani (IBM) 	return 0;
28661da177e4SLinus Torvalds }
286787e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
286887e99511SChristoph Hellwig 
286987e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
287087e99511SChristoph Hellwig {
287170fd7614SChristoph Hellwig 	return __sync_dirty_buffer(bh, REQ_SYNC);
287287e99511SChristoph Hellwig }
28731fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28741da177e4SLinus Torvalds 
28751da177e4SLinus Torvalds /*
287668189fefSMatthew Wilcox (Oracle)  * try_to_free_buffers() checks if all the buffers on this particular folio
28771da177e4SLinus Torvalds  * are unused, and releases them if so.
28781da177e4SLinus Torvalds  *
28791da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
288068189fefSMatthew Wilcox (Oracle)  * locking the folio or by holding its mapping's private_lock.
28811da177e4SLinus Torvalds  *
288268189fefSMatthew Wilcox (Oracle)  * If the folio is dirty but all the buffers are clean then we need to
288368189fefSMatthew Wilcox (Oracle)  * be sure to mark the folio clean as well.  This is because the folio
28841da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
288568189fefSMatthew Wilcox (Oracle)  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
28861da177e4SLinus Torvalds  * filesystem data on the same device.
28871da177e4SLinus Torvalds  *
288868189fefSMatthew Wilcox (Oracle)  * The same applies to regular filesystem folios: if all the buffers are
288968189fefSMatthew Wilcox (Oracle)  * clean then we set the folio clean and proceed.  To do that, we require
2890e621900aSMatthew Wilcox (Oracle)  * total exclusion from block_dirty_folio().  That is obtained with
28911da177e4SLinus Torvalds  * private_lock.
28921da177e4SLinus Torvalds  *
28931da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
28941da177e4SLinus Torvalds  */
28951da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28961da177e4SLinus Torvalds {
28971da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
28981da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28991da177e4SLinus Torvalds }
29001da177e4SLinus Torvalds 
290164394763SMatthew Wilcox (Oracle) static bool
290264394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
29031da177e4SLinus Torvalds {
290464394763SMatthew Wilcox (Oracle) 	struct buffer_head *head = folio_buffers(folio);
29051da177e4SLinus Torvalds 	struct buffer_head *bh;
29061da177e4SLinus Torvalds 
29071da177e4SLinus Torvalds 	bh = head;
29081da177e4SLinus Torvalds 	do {
29091da177e4SLinus Torvalds 		if (buffer_busy(bh))
29101da177e4SLinus Torvalds 			goto failed;
29111da177e4SLinus Torvalds 		bh = bh->b_this_page;
29121da177e4SLinus Torvalds 	} while (bh != head);
29131da177e4SLinus Torvalds 
29141da177e4SLinus Torvalds 	do {
29151da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
29161da177e4SLinus Torvalds 
2917535ee2fbSJan Kara 		if (bh->b_assoc_map)
29181da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
29191da177e4SLinus Torvalds 		bh = next;
29201da177e4SLinus Torvalds 	} while (bh != head);
29211da177e4SLinus Torvalds 	*buffers_to_free = head;
292264394763SMatthew Wilcox (Oracle) 	folio_detach_private(folio);
292364394763SMatthew Wilcox (Oracle) 	return true;
29241da177e4SLinus Torvalds failed:
292564394763SMatthew Wilcox (Oracle) 	return false;
29261da177e4SLinus Torvalds }
29271da177e4SLinus Torvalds 
292868189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
29291da177e4SLinus Torvalds {
293068189fefSMatthew Wilcox (Oracle) 	struct address_space * const mapping = folio->mapping;
29311da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
293268189fefSMatthew Wilcox (Oracle) 	bool ret = 0;
29331da177e4SLinus Torvalds 
293468189fefSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
293568189fefSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
293668189fefSMatthew Wilcox (Oracle) 		return false;
29371da177e4SLinus Torvalds 
29381da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
293964394763SMatthew Wilcox (Oracle) 		ret = drop_buffers(folio, &buffers_to_free);
29401da177e4SLinus Torvalds 		goto out;
29411da177e4SLinus Torvalds 	}
29421da177e4SLinus Torvalds 
29431da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
294464394763SMatthew Wilcox (Oracle) 	ret = drop_buffers(folio, &buffers_to_free);
2945ecdfc978SLinus Torvalds 
2946ecdfc978SLinus Torvalds 	/*
2947ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
294868189fefSMatthew Wilcox (Oracle) 	 * then we can have clean buffers against a dirty folio.  We
294968189fefSMatthew Wilcox (Oracle) 	 * clean the folio here; otherwise the VM will never notice
2950ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2951ecdfc978SLinus Torvalds 	 *
2952ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
295368189fefSMatthew Wilcox (Oracle) 	 * the folio's buffers clean.  We discover that here and clean
295468189fefSMatthew Wilcox (Oracle) 	 * the folio also.
295587df7241SNick Piggin 	 *
295687df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
2957e621900aSMatthew Wilcox (Oracle) 	 * to synchronise against block_dirty_folio and prevent the
295887df7241SNick Piggin 	 * dirty bit from being lost.
2959ecdfc978SLinus Torvalds 	 */
296011f81becSTejun Heo 	if (ret)
296168189fefSMatthew Wilcox (Oracle) 		folio_cancel_dirty(folio);
296287df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
29631da177e4SLinus Torvalds out:
29641da177e4SLinus Torvalds 	if (buffers_to_free) {
29651da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
29661da177e4SLinus Torvalds 
29671da177e4SLinus Torvalds 		do {
29681da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
29691da177e4SLinus Torvalds 			free_buffer_head(bh);
29701da177e4SLinus Torvalds 			bh = next;
29711da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
29721da177e4SLinus Torvalds 	}
29731da177e4SLinus Torvalds 	return ret;
29741da177e4SLinus Torvalds }
29751da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29761da177e4SLinus Torvalds 
29771da177e4SLinus Torvalds /*
29781da177e4SLinus Torvalds  * Buffer-head allocation
29791da177e4SLinus Torvalds  */
298068279f9cSAlexey Dobriyan static struct kmem_cache *bh_cachep __ro_after_init;
29811da177e4SLinus Torvalds 
29821da177e4SLinus Torvalds /*
29831da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
29841da177e4SLinus Torvalds  * stripping them in writeback.
29851da177e4SLinus Torvalds  */
298668279f9cSAlexey Dobriyan static unsigned long max_buffer_heads __ro_after_init;
29871da177e4SLinus Torvalds 
29881da177e4SLinus Torvalds int buffer_heads_over_limit;
29891da177e4SLinus Torvalds 
29901da177e4SLinus Torvalds struct bh_accounting {
29911da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
29921da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
29931da177e4SLinus Torvalds };
29941da177e4SLinus Torvalds 
29951da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
29961da177e4SLinus Torvalds 
29971da177e4SLinus Torvalds static void recalc_bh_state(void)
29981da177e4SLinus Torvalds {
29991da177e4SLinus Torvalds 	int i;
30001da177e4SLinus Torvalds 	int tot = 0;
30011da177e4SLinus Torvalds 
3002ee1be862SChristoph Lameter 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
30031da177e4SLinus Torvalds 		return;
3004c7b92516SChristoph Lameter 	__this_cpu_write(bh_accounting.ratelimit, 0);
30058a143426SEric Dumazet 	for_each_online_cpu(i)
30061da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
30071da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
30081da177e4SLinus Torvalds }
30091da177e4SLinus Torvalds 
3010dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
30111da177e4SLinus Torvalds {
3012019b4d12SRichard Kennedy 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
30131da177e4SLinus Torvalds 	if (ret) {
3014a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3015f1e67e35SThomas Gleixner 		spin_lock_init(&ret->b_uptodate_lock);
3016c7b92516SChristoph Lameter 		preempt_disable();
3017c7b92516SChristoph Lameter 		__this_cpu_inc(bh_accounting.nr);
30181da177e4SLinus Torvalds 		recalc_bh_state();
3019c7b92516SChristoph Lameter 		preempt_enable();
30201da177e4SLinus Torvalds 	}
30211da177e4SLinus Torvalds 	return ret;
30221da177e4SLinus Torvalds }
30231da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
30241da177e4SLinus Torvalds 
30251da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
30261da177e4SLinus Torvalds {
30271da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
30281da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3029c7b92516SChristoph Lameter 	preempt_disable();
3030c7b92516SChristoph Lameter 	__this_cpu_dec(bh_accounting.nr);
30311da177e4SLinus Torvalds 	recalc_bh_state();
3032c7b92516SChristoph Lameter 	preempt_enable();
30331da177e4SLinus Torvalds }
30341da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
30351da177e4SLinus Torvalds 
3036fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
30371da177e4SLinus Torvalds {
30381da177e4SLinus Torvalds 	int i;
30391da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30401da177e4SLinus Torvalds 
30411da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
30421da177e4SLinus Torvalds 		brelse(b->bhs[i]);
30431da177e4SLinus Torvalds 		b->bhs[i] = NULL;
30441da177e4SLinus Torvalds 	}
3045c7b92516SChristoph Lameter 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
30468a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
3047fc4d24c9SSebastian Andrzej Siewior 	return 0;
30481da177e4SLinus Torvalds }
30491da177e4SLinus Torvalds 
3050389d1b08SAneesh Kumar K.V /**
3051a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3052389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3053389d1b08SAneesh Kumar K.V  *
3054389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3055389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3056389d1b08SAneesh Kumar K.V  */
3057389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3058389d1b08SAneesh Kumar K.V {
3059389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3060389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3061389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3062389d1b08SAneesh Kumar K.V 			return 0;
3063389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3064389d1b08SAneesh Kumar K.V 	}
3065389d1b08SAneesh Kumar K.V 	return 1;
3066389d1b08SAneesh Kumar K.V }
3067389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3068389d1b08SAneesh Kumar K.V 
3069389d1b08SAneesh Kumar K.V /**
3070fdee117eSZhang Yi  * __bh_read - Submit read for a locked buffer
3071389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3072fdee117eSZhang Yi  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3073fdee117eSZhang Yi  * @wait: wait until reading finish
3074389d1b08SAneesh Kumar K.V  *
3075fdee117eSZhang Yi  * Returns zero on success or don't wait, and -EIO on error.
3076389d1b08SAneesh Kumar K.V  */
3077fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3078389d1b08SAneesh Kumar K.V {
3079fdee117eSZhang Yi 	int ret = 0;
3080389d1b08SAneesh Kumar K.V 
3081fdee117eSZhang Yi 	BUG_ON(!buffer_locked(bh));
3082389d1b08SAneesh Kumar K.V 
3083389d1b08SAneesh Kumar K.V 	get_bh(bh);
3084389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3085fdee117eSZhang Yi 	submit_bh(REQ_OP_READ | op_flags, bh);
3086fdee117eSZhang Yi 	if (wait) {
3087389d1b08SAneesh Kumar K.V 		wait_on_buffer(bh);
3088fdee117eSZhang Yi 		if (!buffer_uptodate(bh))
3089fdee117eSZhang Yi 			ret = -EIO;
3090389d1b08SAneesh Kumar K.V 	}
3091fdee117eSZhang Yi 	return ret;
3092fdee117eSZhang Yi }
3093fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read);
3094fdee117eSZhang Yi 
3095fdee117eSZhang Yi /**
3096fdee117eSZhang Yi  * __bh_read_batch - Submit read for a batch of unlocked buffers
3097fdee117eSZhang Yi  * @nr: entry number of the buffer batch
3098fdee117eSZhang Yi  * @bhs: a batch of struct buffer_head
3099fdee117eSZhang Yi  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3100fdee117eSZhang Yi  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3101fdee117eSZhang Yi  *              buffer that cannot lock.
3102fdee117eSZhang Yi  *
3103fdee117eSZhang Yi  * Returns zero on success or don't wait, and -EIO on error.
3104fdee117eSZhang Yi  */
3105fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
3106fdee117eSZhang Yi 		     blk_opf_t op_flags, bool force_lock)
3107fdee117eSZhang Yi {
3108fdee117eSZhang Yi 	int i;
3109fdee117eSZhang Yi 
3110fdee117eSZhang Yi 	for (i = 0; i < nr; i++) {
3111fdee117eSZhang Yi 		struct buffer_head *bh = bhs[i];
3112fdee117eSZhang Yi 
3113fdee117eSZhang Yi 		if (buffer_uptodate(bh))
3114fdee117eSZhang Yi 			continue;
3115fdee117eSZhang Yi 
3116fdee117eSZhang Yi 		if (force_lock)
3117fdee117eSZhang Yi 			lock_buffer(bh);
3118fdee117eSZhang Yi 		else
3119fdee117eSZhang Yi 			if (!trylock_buffer(bh))
3120fdee117eSZhang Yi 				continue;
3121fdee117eSZhang Yi 
3122fdee117eSZhang Yi 		if (buffer_uptodate(bh)) {
3123fdee117eSZhang Yi 			unlock_buffer(bh);
3124fdee117eSZhang Yi 			continue;
3125fdee117eSZhang Yi 		}
3126fdee117eSZhang Yi 
3127fdee117eSZhang Yi 		bh->b_end_io = end_buffer_read_sync;
3128fdee117eSZhang Yi 		get_bh(bh);
3129fdee117eSZhang Yi 		submit_bh(REQ_OP_READ | op_flags, bh);
3130fdee117eSZhang Yi 	}
3131fdee117eSZhang Yi }
3132fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch);
3133389d1b08SAneesh Kumar K.V 
31341da177e4SLinus Torvalds void __init buffer_init(void)
31351da177e4SLinus Torvalds {
313643be594aSZhang Yanfei 	unsigned long nrpages;
3137fc4d24c9SSebastian Andrzej Siewior 	int ret;
31381da177e4SLinus Torvalds 
3139b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3140b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3141b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3142b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3143019b4d12SRichard Kennedy 				NULL);
31441da177e4SLinus Torvalds 
31451da177e4SLinus Torvalds 	/*
31461da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
31471da177e4SLinus Torvalds 	 */
31481da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
31491da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3150fc4d24c9SSebastian Andrzej Siewior 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3151fc4d24c9SSebastian Andrzej Siewior 					NULL, buffer_exit_cpu_dead);
3152fc4d24c9SSebastian Andrzej Siewior 	WARN_ON(ret < 0);
31531da177e4SLinus Torvalds }
3154