xref: /linux/fs/buffer.c (revision 1da86618bdce301d23e89ecce92161f9d3b3c5e7)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/fs/buffer.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
151da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
161da177e4SLinus Torvalds  *
171da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
514fa512ceSEric Biggers #include <linux/fsverity.h>
528a237adfSMarcelo Tosatti #include <linux/sched/isolation.h>
531da177e4SLinus Torvalds 
542b211dc0SBen Dooks #include "internal.h"
552b211dc0SBen Dooks 
561da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
575bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
5844981351SBart Van Assche 			  enum rw_hint hint, struct writeback_control *wbc);
591da177e4SLinus Torvalds 
601da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
611da177e4SLinus Torvalds 
62f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
63f0059afdSTejun Heo {
645305cb83STejun Heo 	trace_block_touch_buffer(bh);
6503c5f331SMatthew Wilcox (Oracle) 	folio_mark_accessed(bh->b_folio);
66f0059afdSTejun Heo }
67f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
68f0059afdSTejun Heo 
69fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
701da177e4SLinus Torvalds {
7174316201SNeilBrown 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
721da177e4SLinus Torvalds }
731da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
741da177e4SLinus Torvalds 
75fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
761da177e4SLinus Torvalds {
7751b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
784e857c58SPeter Zijlstra 	smp_mb__after_atomic();
791da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
801da177e4SLinus Torvalds }
811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
821da177e4SLinus Torvalds 
831da177e4SLinus Torvalds /*
84520f301cSMatthew Wilcox (Oracle)  * Returns if the folio has dirty or writeback buffers. If all the buffers
85520f301cSMatthew Wilcox (Oracle)  * are unlocked and clean then the folio_test_dirty information is stale. If
86520f301cSMatthew Wilcox (Oracle)  * any of the buffers are locked, it is assumed they are locked for IO.
87b4597226SMel Gorman  */
88520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
89b4597226SMel Gorman 				     bool *dirty, bool *writeback)
90b4597226SMel Gorman {
91b4597226SMel Gorman 	struct buffer_head *head, *bh;
92b4597226SMel Gorman 	*dirty = false;
93b4597226SMel Gorman 	*writeback = false;
94b4597226SMel Gorman 
95520f301cSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
96b4597226SMel Gorman 
97520f301cSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
98520f301cSMatthew Wilcox (Oracle) 	if (!head)
99b4597226SMel Gorman 		return;
100b4597226SMel Gorman 
101520f301cSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
102b4597226SMel Gorman 		*writeback = true;
103b4597226SMel Gorman 
104b4597226SMel Gorman 	bh = head;
105b4597226SMel Gorman 	do {
106b4597226SMel Gorman 		if (buffer_locked(bh))
107b4597226SMel Gorman 			*writeback = true;
108b4597226SMel Gorman 
109b4597226SMel Gorman 		if (buffer_dirty(bh))
110b4597226SMel Gorman 			*dirty = true;
111b4597226SMel Gorman 
112b4597226SMel Gorman 		bh = bh->b_this_page;
113b4597226SMel Gorman 	} while (bh != head);
114b4597226SMel Gorman }
115b4597226SMel Gorman 
116b4597226SMel Gorman /*
1171da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
1181da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
1191da177e4SLinus Torvalds  * if you want to preserve its state.
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1221da177e4SLinus Torvalds {
12374316201SNeilBrown 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1241da177e4SLinus Torvalds }
1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1261da177e4SLinus Torvalds 
127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1281da177e4SLinus Torvalds {
129432f16e6SRobert Elliott 	if (!test_bit(BH_Quiet, &bh->b_state))
130432f16e6SRobert Elliott 		printk_ratelimited(KERN_ERR
131a1c6f057SDmitry Monakhov 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
132a1c6f057SDmitry Monakhov 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds /*
13668671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
13768671f35SDmitry Monakhov  * unlocking it.
13868671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13968671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
14068671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
14168671f35SDmitry Monakhov  * itself.
1421da177e4SLinus Torvalds  */
14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds 	if (uptodate) {
1461da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds 	} else {
14870246286SChristoph Hellwig 		/* This happens, due to failed read-ahead attempts. */
1491da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1501da177e4SLinus Torvalds 	}
1511da177e4SLinus Torvalds 	unlock_buffer(bh);
15268671f35SDmitry Monakhov }
15368671f35SDmitry Monakhov 
15468671f35SDmitry Monakhov /*
15568671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
15679f59784SZhang Yi  * unlock the buffer.
15768671f35SDmitry Monakhov  */
15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15968671f35SDmitry Monakhov {
16068671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1611da177e4SLinus Torvalds 	put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds 	if (uptodate) {
1681da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1691da177e4SLinus Torvalds 	} else {
170b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost sync page write");
17187354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
1721da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1731da177e4SLinus Torvalds 	}
1741da177e4SLinus Torvalds 	unlock_buffer(bh);
1751da177e4SLinus Torvalds 	put_bh(bh);
1761da177e4SLinus Torvalds }
1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1811da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1821da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183600f111eSMatthew Wilcox (Oracle)  * i_private_lock.
1841da177e4SLinus Torvalds  *
185600f111eSMatthew Wilcox (Oracle)  * Hack idea: for the blockdev mapping, i_private_lock contention
1861da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
187600f111eSMatthew Wilcox (Oracle)  * succeeds, there is no need to take i_private_lock.
1881da177e4SLinus Torvalds  */
1891da177e4SLinus Torvalds static struct buffer_head *
190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1911da177e4SLinus Torvalds {
19253cd4cd3SAl Viro 	struct address_space *bd_mapping = bdev->bd_mapping;
19353cd4cd3SAl Viro 	const int blkbits = bd_mapping->host->i_blkbits;
1941da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1951da177e4SLinus Torvalds 	pgoff_t index;
1961da177e4SLinus Torvalds 	struct buffer_head *bh;
1971da177e4SLinus Torvalds 	struct buffer_head *head;
198eee25182SMatthew Wilcox (Oracle) 	struct folio *folio;
1991da177e4SLinus Torvalds 	int all_mapped = 1;
20043636c80STetsuo Handa 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
2011da177e4SLinus Torvalds 
20253cd4cd3SAl Viro 	index = ((loff_t)block << blkbits) / PAGE_SIZE;
203eee25182SMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204eee25182SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
2051da177e4SLinus Torvalds 		goto out;
2061da177e4SLinus Torvalds 
207600f111eSMatthew Wilcox (Oracle) 	spin_lock(&bd_mapping->i_private_lock);
208eee25182SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
209eee25182SMatthew Wilcox (Oracle) 	if (!head)
2101da177e4SLinus Torvalds 		goto out_unlock;
2111da177e4SLinus Torvalds 	bh = head;
2121da177e4SLinus Torvalds 	do {
21397f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
21497f76d3dSNikanth Karthikesan 			all_mapped = 0;
21597f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2161da177e4SLinus Torvalds 			ret = bh;
2171da177e4SLinus Torvalds 			get_bh(bh);
2181da177e4SLinus Torvalds 			goto out_unlock;
2191da177e4SLinus Torvalds 		}
2201da177e4SLinus Torvalds 		bh = bh->b_this_page;
2211da177e4SLinus Torvalds 	} while (bh != head);
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2241da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2251da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2261da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2271da177e4SLinus Torvalds 	 */
22843636c80STetsuo Handa 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
22943636c80STetsuo Handa 	if (all_mapped && __ratelimit(&last_warned)) {
23043636c80STetsuo Handa 		printk("__find_get_block_slow() failed. block=%llu, "
23143636c80STetsuo Handa 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
23243636c80STetsuo Handa 		       "device %pg blocksize: %d\n",
233205f87f6SBadari Pulavarty 		       (unsigned long long)block,
23443636c80STetsuo Handa 		       (unsigned long long)bh->b_blocknr,
23543636c80STetsuo Handa 		       bh->b_state, bh->b_size, bdev,
23653cd4cd3SAl Viro 		       1 << blkbits);
2371da177e4SLinus Torvalds 	}
2381da177e4SLinus Torvalds out_unlock:
239600f111eSMatthew Wilcox (Oracle) 	spin_unlock(&bd_mapping->i_private_lock);
240eee25182SMatthew Wilcox (Oracle) 	folio_put(folio);
2411da177e4SLinus Torvalds out:
2421da177e4SLinus Torvalds 	return ret;
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds 	unsigned long flags;
248a3972203SNick Piggin 	struct buffer_head *first;
2491da177e4SLinus Torvalds 	struct buffer_head *tmp;
2502e2dba15SMatthew Wilcox (Oracle) 	struct folio *folio;
2512e2dba15SMatthew Wilcox (Oracle) 	int folio_uptodate = 1;
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
2541da177e4SLinus Torvalds 
2552e2dba15SMatthew Wilcox (Oracle) 	folio = bh->b_folio;
2561da177e4SLinus Torvalds 	if (uptodate) {
2571da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
2581da177e4SLinus Torvalds 	} else {
2591da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
260b744c2acSRobert Elliott 		buffer_io_error(bh, ", async page read");
2611da177e4SLinus Torvalds 	}
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds 	/*
2641da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
2651da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
2661da177e4SLinus Torvalds 	 * decide that the page is now completely done.
2671da177e4SLinus Torvalds 	 */
2682e2dba15SMatthew Wilcox (Oracle) 	first = folio_buffers(folio);
269f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
2701da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
2711da177e4SLinus Torvalds 	unlock_buffer(bh);
2721da177e4SLinus Torvalds 	tmp = bh;
2731da177e4SLinus Torvalds 	do {
2741da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
2752e2dba15SMatthew Wilcox (Oracle) 			folio_uptodate = 0;
2761da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
2771da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
2781da177e4SLinus Torvalds 			goto still_busy;
2791da177e4SLinus Torvalds 		}
2801da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
2811da177e4SLinus Torvalds 	} while (tmp != bh);
282f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2831da177e4SLinus Torvalds 
2846ba924d3SMatthew Wilcox (Oracle) 	folio_end_read(folio, folio_uptodate);
2851da177e4SLinus Torvalds 	return;
2861da177e4SLinus Torvalds 
2871da177e4SLinus Torvalds still_busy:
288f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2891da177e4SLinus Torvalds 	return;
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds 
2924fa512ceSEric Biggers struct postprocess_bh_ctx {
29331fb992cSEric Biggers 	struct work_struct work;
29431fb992cSEric Biggers 	struct buffer_head *bh;
29531fb992cSEric Biggers };
29631fb992cSEric Biggers 
2974fa512ceSEric Biggers static void verify_bh(struct work_struct *work)
2984fa512ceSEric Biggers {
2994fa512ceSEric Biggers 	struct postprocess_bh_ctx *ctx =
3004fa512ceSEric Biggers 		container_of(work, struct postprocess_bh_ctx, work);
3014fa512ceSEric Biggers 	struct buffer_head *bh = ctx->bh;
3024fa512ceSEric Biggers 	bool valid;
3034fa512ceSEric Biggers 
3048b7d3fe9SEric Biggers 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
3054fa512ceSEric Biggers 	end_buffer_async_read(bh, valid);
3064fa512ceSEric Biggers 	kfree(ctx);
3074fa512ceSEric Biggers }
3084fa512ceSEric Biggers 
3094fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh)
3104fa512ceSEric Biggers {
3118b7d3fe9SEric Biggers 	struct folio *folio = bh->b_folio;
3128b7d3fe9SEric Biggers 	struct inode *inode = folio->mapping->host;
3134fa512ceSEric Biggers 
3144fa512ceSEric Biggers 	return fsverity_active(inode) &&
3154fa512ceSEric Biggers 		/* needed by ext4 */
3168b7d3fe9SEric Biggers 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
3174fa512ceSEric Biggers }
3184fa512ceSEric Biggers 
31931fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
32031fb992cSEric Biggers {
3214fa512ceSEric Biggers 	struct postprocess_bh_ctx *ctx =
3224fa512ceSEric Biggers 		container_of(work, struct postprocess_bh_ctx, work);
32331fb992cSEric Biggers 	struct buffer_head *bh = ctx->bh;
32431fb992cSEric Biggers 	int err;
32531fb992cSEric Biggers 
3269c7fb7f7SEric Biggers 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
3279c7fb7f7SEric Biggers 					       bh_offset(bh));
3284fa512ceSEric Biggers 	if (err == 0 && need_fsverity(bh)) {
3294fa512ceSEric Biggers 		/*
3304fa512ceSEric Biggers 		 * We use different work queues for decryption and for verity
3314fa512ceSEric Biggers 		 * because verity may require reading metadata pages that need
3324fa512ceSEric Biggers 		 * decryption, and we shouldn't recurse to the same workqueue.
3334fa512ceSEric Biggers 		 */
3344fa512ceSEric Biggers 		INIT_WORK(&ctx->work, verify_bh);
3354fa512ceSEric Biggers 		fsverity_enqueue_verify_work(&ctx->work);
3364fa512ceSEric Biggers 		return;
3374fa512ceSEric Biggers 	}
33831fb992cSEric Biggers 	end_buffer_async_read(bh, err == 0);
33931fb992cSEric Biggers 	kfree(ctx);
34031fb992cSEric Biggers }
34131fb992cSEric Biggers 
34231fb992cSEric Biggers /*
3432c69e205SMatthew Wilcox (Oracle)  * I/O completion handler for block_read_full_folio() - pages
34431fb992cSEric Biggers  * which come unlocked at the end of I/O.
34531fb992cSEric Biggers  */
34631fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
34731fb992cSEric Biggers {
3483822a7c4SLinus Torvalds 	struct inode *inode = bh->b_folio->mapping->host;
3494fa512ceSEric Biggers 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
3504fa512ceSEric Biggers 	bool verify = need_fsverity(bh);
3514fa512ceSEric Biggers 
3524fa512ceSEric Biggers 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
3534fa512ceSEric Biggers 	if (uptodate && (decrypt || verify)) {
3544fa512ceSEric Biggers 		struct postprocess_bh_ctx *ctx =
3554fa512ceSEric Biggers 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
35631fb992cSEric Biggers 
35731fb992cSEric Biggers 		if (ctx) {
35831fb992cSEric Biggers 			ctx->bh = bh;
3594fa512ceSEric Biggers 			if (decrypt) {
3604fa512ceSEric Biggers 				INIT_WORK(&ctx->work, decrypt_bh);
36131fb992cSEric Biggers 				fscrypt_enqueue_decrypt_work(&ctx->work);
3624fa512ceSEric Biggers 			} else {
3634fa512ceSEric Biggers 				INIT_WORK(&ctx->work, verify_bh);
3644fa512ceSEric Biggers 				fsverity_enqueue_verify_work(&ctx->work);
3654fa512ceSEric Biggers 			}
36631fb992cSEric Biggers 			return;
36731fb992cSEric Biggers 		}
36831fb992cSEric Biggers 		uptodate = 0;
36931fb992cSEric Biggers 	}
37031fb992cSEric Biggers 	end_buffer_async_read(bh, uptodate);
37131fb992cSEric Biggers }
37231fb992cSEric Biggers 
3731da177e4SLinus Torvalds /*
37414059f66SMatthew Wilcox (Oracle)  * Completion handler for block_write_full_folio() - folios which are unlocked
37514059f66SMatthew Wilcox (Oracle)  * during I/O, and which have the writeback flag cleared upon I/O completion.
3761da177e4SLinus Torvalds  */
37714059f66SMatthew Wilcox (Oracle) static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3781da177e4SLinus Torvalds {
3791da177e4SLinus Torvalds 	unsigned long flags;
380a3972203SNick Piggin 	struct buffer_head *first;
3811da177e4SLinus Torvalds 	struct buffer_head *tmp;
382743ed81eSMatthew Wilcox (Oracle) 	struct folio *folio;
3831da177e4SLinus Torvalds 
3841da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3851da177e4SLinus Torvalds 
386743ed81eSMatthew Wilcox (Oracle) 	folio = bh->b_folio;
3871da177e4SLinus Torvalds 	if (uptodate) {
3881da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3891da177e4SLinus Torvalds 	} else {
390b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost async page write");
39187354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
3921da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3931da177e4SLinus Torvalds 	}
3941da177e4SLinus Torvalds 
395743ed81eSMatthew Wilcox (Oracle) 	first = folio_buffers(folio);
396f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
397a3972203SNick Piggin 
3981da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
3991da177e4SLinus Torvalds 	unlock_buffer(bh);
4001da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4011da177e4SLinus Torvalds 	while (tmp != bh) {
4021da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4031da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4041da177e4SLinus Torvalds 			goto still_busy;
4051da177e4SLinus Torvalds 		}
4061da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4071da177e4SLinus Torvalds 	}
408f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
409743ed81eSMatthew Wilcox (Oracle) 	folio_end_writeback(folio);
4101da177e4SLinus Torvalds 	return;
4111da177e4SLinus Torvalds 
4121da177e4SLinus Torvalds still_busy:
413f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
4141da177e4SLinus Torvalds 	return;
4151da177e4SLinus Torvalds }
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds /*
4181da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4191da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4201da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4211da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4221da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4231da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4241da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4251da177e4SLinus Torvalds  *
4261da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4271da177e4SLinus Torvalds  * left.
4281da177e4SLinus Torvalds  *
4291da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4301da177e4SLinus Torvalds  * the buffers.
4311da177e4SLinus Torvalds  *
4321da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4331da177e4SLinus Torvalds  * page.
4341da177e4SLinus Torvalds  *
4351da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4361da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4371da177e4SLinus Torvalds  */
4381da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4391da177e4SLinus Torvalds {
44031fb992cSEric Biggers 	bh->b_end_io = end_buffer_async_read_io;
4411da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4421da177e4SLinus Torvalds }
4431da177e4SLinus Torvalds 
4441fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
44535c80d5fSChris Mason 					  bh_end_io_t *handler)
44635c80d5fSChris Mason {
44735c80d5fSChris Mason 	bh->b_end_io = handler;
44835c80d5fSChris Mason 	set_buffer_async_write(bh);
44935c80d5fSChris Mason }
45035c80d5fSChris Mason 
4511da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4521da177e4SLinus Torvalds {
45335c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4541da177e4SLinus Torvalds }
4551da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds 
4581da177e4SLinus Torvalds /*
4591da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4601da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4611da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4621da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4631da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4641da177e4SLinus Torvalds  *
46573f65b8bSAndreas Gruenbacher  * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
4661da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
467600f111eSMatthew Wilcox (Oracle)  * management of a list of dependent buffers at ->i_mapping->i_private_list.
4681da177e4SLinus Torvalds  *
4691da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4701da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4711da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4721da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
473600f111eSMatthew Wilcox (Oracle)  * So the locking for i_private_list is via the i_private_lock in the address_space
4741da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4751da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
476600f111eSMatthew Wilcox (Oracle)  * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
477600f111eSMatthew Wilcox (Oracle)  * mapping->i_private_list will always be protected by the backing blockdev's
478600f111eSMatthew Wilcox (Oracle)  * ->i_private_lock.
4791da177e4SLinus Torvalds  *
4801da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
481600f111eSMatthew Wilcox (Oracle)  * ->i_private_list must be from the same address_space: the blockdev's.
4821da177e4SLinus Torvalds  *
483600f111eSMatthew Wilcox (Oracle)  * address_spaces which do not place buffers at ->i_private_list via these
484600f111eSMatthew Wilcox (Oracle)  * utility functions are free to use i_private_lock and i_private_list for
485600f111eSMatthew Wilcox (Oracle)  * whatever they want.  The only requirement is that list_empty(i_private_list)
4861da177e4SLinus Torvalds  * be true at clear_inode() time.
4871da177e4SLinus Torvalds  *
4881da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4891da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4901da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4911da177e4SLinus Torvalds  *
4921da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4931da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4941da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4951da177e4SLinus Torvalds  * queued up.
4961da177e4SLinus Torvalds  *
4971da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
4981da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
4991da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5001da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5011da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5021da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5031da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5041da177e4SLinus Torvalds  * b_inode back.
5051da177e4SLinus Torvalds  */
5061da177e4SLinus Torvalds 
5071da177e4SLinus Torvalds /*
508600f111eSMatthew Wilcox (Oracle)  * The buffer's backing address_space's i_private_lock must be held
5091da177e4SLinus Torvalds  */
510dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5111da177e4SLinus Torvalds {
5121da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
51358ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
51458ff407bSJan Kara 	bh->b_assoc_map = NULL;
5151da177e4SLinus Torvalds }
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5181da177e4SLinus Torvalds {
519600f111eSMatthew Wilcox (Oracle) 	return !list_empty(&inode->i_data.i_private_list);
5201da177e4SLinus Torvalds }
5211da177e4SLinus Torvalds 
5221da177e4SLinus Torvalds /*
5231da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5241da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5251da177e4SLinus Torvalds  * writes to the disk.
5261da177e4SLinus Torvalds  *
52779f59784SZhang Yi  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
52879f59784SZhang Yi  * as you dirty the buffers, and then use osync_inode_buffers to wait for
5291da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5301da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5311da177e4SLinus Torvalds  */
5321da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5331da177e4SLinus Torvalds {
5341da177e4SLinus Torvalds 	struct buffer_head *bh;
5351da177e4SLinus Torvalds 	struct list_head *p;
5361da177e4SLinus Torvalds 	int err = 0;
5371da177e4SLinus Torvalds 
5381da177e4SLinus Torvalds 	spin_lock(lock);
5391da177e4SLinus Torvalds repeat:
5401da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5411da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5421da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5431da177e4SLinus Torvalds 			get_bh(bh);
5441da177e4SLinus Torvalds 			spin_unlock(lock);
5451da177e4SLinus Torvalds 			wait_on_buffer(bh);
5461da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5471da177e4SLinus Torvalds 				err = -EIO;
5481da177e4SLinus Torvalds 			brelse(bh);
5491da177e4SLinus Torvalds 			spin_lock(lock);
5501da177e4SLinus Torvalds 			goto repeat;
5511da177e4SLinus Torvalds 		}
5521da177e4SLinus Torvalds 	}
5531da177e4SLinus Torvalds 	spin_unlock(lock);
5541da177e4SLinus Torvalds 	return err;
5551da177e4SLinus Torvalds }
5561da177e4SLinus Torvalds 
5571da177e4SLinus Torvalds /**
55878a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
55967be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5601da177e4SLinus Torvalds  *
561600f111eSMatthew Wilcox (Oracle)  * Starts I/O against the buffers at mapping->i_private_list, and waits upon
5621da177e4SLinus Torvalds  * that I/O.
5631da177e4SLinus Torvalds  *
56467be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
56567be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
56667be2dd1SMartin Waitz  * a successful fsync().
5671da177e4SLinus Torvalds  */
5681da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5691da177e4SLinus Torvalds {
570600f111eSMatthew Wilcox (Oracle) 	struct address_space *buffer_mapping = mapping->i_private_data;
5711da177e4SLinus Torvalds 
572600f111eSMatthew Wilcox (Oracle) 	if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
5731da177e4SLinus Torvalds 		return 0;
5741da177e4SLinus Torvalds 
575600f111eSMatthew Wilcox (Oracle) 	return fsync_buffers_list(&buffer_mapping->i_private_lock,
576600f111eSMatthew Wilcox (Oracle) 					&mapping->i_private_list);
5771da177e4SLinus Torvalds }
5781da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5791da177e4SLinus Torvalds 
58031b2ebc0SRitesh Harjani (IBM) /**
58131b2ebc0SRitesh Harjani (IBM)  * generic_buffers_fsync_noflush - generic buffer fsync implementation
58231b2ebc0SRitesh Harjani (IBM)  * for simple filesystems with no inode lock
58331b2ebc0SRitesh Harjani (IBM)  *
58431b2ebc0SRitesh Harjani (IBM)  * @file:	file to synchronize
58531b2ebc0SRitesh Harjani (IBM)  * @start:	start offset in bytes
58631b2ebc0SRitesh Harjani (IBM)  * @end:	end offset in bytes (inclusive)
58731b2ebc0SRitesh Harjani (IBM)  * @datasync:	only synchronize essential metadata if true
58831b2ebc0SRitesh Harjani (IBM)  *
58931b2ebc0SRitesh Harjani (IBM)  * This is a generic implementation of the fsync method for simple
59031b2ebc0SRitesh Harjani (IBM)  * filesystems which track all non-inode metadata in the buffers list
59131b2ebc0SRitesh Harjani (IBM)  * hanging off the address_space structure.
59231b2ebc0SRitesh Harjani (IBM)  */
59331b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
59431b2ebc0SRitesh Harjani (IBM) 				  bool datasync)
59531b2ebc0SRitesh Harjani (IBM) {
59631b2ebc0SRitesh Harjani (IBM) 	struct inode *inode = file->f_mapping->host;
59731b2ebc0SRitesh Harjani (IBM) 	int err;
59831b2ebc0SRitesh Harjani (IBM) 	int ret;
59931b2ebc0SRitesh Harjani (IBM) 
60031b2ebc0SRitesh Harjani (IBM) 	err = file_write_and_wait_range(file, start, end);
60131b2ebc0SRitesh Harjani (IBM) 	if (err)
60231b2ebc0SRitesh Harjani (IBM) 		return err;
60331b2ebc0SRitesh Harjani (IBM) 
60431b2ebc0SRitesh Harjani (IBM) 	ret = sync_mapping_buffers(inode->i_mapping);
60531b2ebc0SRitesh Harjani (IBM) 	if (!(inode->i_state & I_DIRTY_ALL))
60631b2ebc0SRitesh Harjani (IBM) 		goto out;
60731b2ebc0SRitesh Harjani (IBM) 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
60831b2ebc0SRitesh Harjani (IBM) 		goto out;
60931b2ebc0SRitesh Harjani (IBM) 
61031b2ebc0SRitesh Harjani (IBM) 	err = sync_inode_metadata(inode, 1);
61131b2ebc0SRitesh Harjani (IBM) 	if (ret == 0)
61231b2ebc0SRitesh Harjani (IBM) 		ret = err;
61331b2ebc0SRitesh Harjani (IBM) 
61431b2ebc0SRitesh Harjani (IBM) out:
61531b2ebc0SRitesh Harjani (IBM) 	/* check and advance again to catch errors after syncing out buffers */
61631b2ebc0SRitesh Harjani (IBM) 	err = file_check_and_advance_wb_err(file);
61731b2ebc0SRitesh Harjani (IBM) 	if (ret == 0)
61831b2ebc0SRitesh Harjani (IBM) 		ret = err;
61931b2ebc0SRitesh Harjani (IBM) 	return ret;
62031b2ebc0SRitesh Harjani (IBM) }
62131b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush);
62231b2ebc0SRitesh Harjani (IBM) 
62331b2ebc0SRitesh Harjani (IBM) /**
62431b2ebc0SRitesh Harjani (IBM)  * generic_buffers_fsync - generic buffer fsync implementation
62531b2ebc0SRitesh Harjani (IBM)  * for simple filesystems with no inode lock
62631b2ebc0SRitesh Harjani (IBM)  *
62731b2ebc0SRitesh Harjani (IBM)  * @file:	file to synchronize
62831b2ebc0SRitesh Harjani (IBM)  * @start:	start offset in bytes
62931b2ebc0SRitesh Harjani (IBM)  * @end:	end offset in bytes (inclusive)
63031b2ebc0SRitesh Harjani (IBM)  * @datasync:	only synchronize essential metadata if true
63131b2ebc0SRitesh Harjani (IBM)  *
63231b2ebc0SRitesh Harjani (IBM)  * This is a generic implementation of the fsync method for simple
63331b2ebc0SRitesh Harjani (IBM)  * filesystems which track all non-inode metadata in the buffers list
63431b2ebc0SRitesh Harjani (IBM)  * hanging off the address_space structure. This also makes sure that
63531b2ebc0SRitesh Harjani (IBM)  * a device cache flush operation is called at the end.
63631b2ebc0SRitesh Harjani (IBM)  */
63731b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
63831b2ebc0SRitesh Harjani (IBM) 			  bool datasync)
63931b2ebc0SRitesh Harjani (IBM) {
64031b2ebc0SRitesh Harjani (IBM) 	struct inode *inode = file->f_mapping->host;
64131b2ebc0SRitesh Harjani (IBM) 	int ret;
64231b2ebc0SRitesh Harjani (IBM) 
64331b2ebc0SRitesh Harjani (IBM) 	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
64431b2ebc0SRitesh Harjani (IBM) 	if (!ret)
64531b2ebc0SRitesh Harjani (IBM) 		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
64631b2ebc0SRitesh Harjani (IBM) 	return ret;
64731b2ebc0SRitesh Harjani (IBM) }
64831b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync);
64931b2ebc0SRitesh Harjani (IBM) 
6501da177e4SLinus Torvalds /*
6511da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6521da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6531da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6541da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6551da177e4SLinus Torvalds  */
6561da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6571da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6581da177e4SLinus Torvalds {
6591da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6601da177e4SLinus Torvalds 	if (bh) {
6611da177e4SLinus Torvalds 		if (buffer_dirty(bh))
662e7ea1129SZhang Yi 			write_dirty_buffer(bh, 0);
6631da177e4SLinus Torvalds 		put_bh(bh);
6641da177e4SLinus Torvalds 	}
6651da177e4SLinus Torvalds }
6661da177e4SLinus Torvalds 
6671da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6681da177e4SLinus Torvalds {
6691da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
670abc8a8a2SMatthew Wilcox (Oracle) 	struct address_space *buffer_mapping = bh->b_folio->mapping;
6711da177e4SLinus Torvalds 
6721da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
673600f111eSMatthew Wilcox (Oracle) 	if (!mapping->i_private_data) {
674600f111eSMatthew Wilcox (Oracle) 		mapping->i_private_data = buffer_mapping;
6751da177e4SLinus Torvalds 	} else {
676600f111eSMatthew Wilcox (Oracle) 		BUG_ON(mapping->i_private_data != buffer_mapping);
6771da177e4SLinus Torvalds 	}
678535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
679600f111eSMatthew Wilcox (Oracle) 		spin_lock(&buffer_mapping->i_private_lock);
6801da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
681600f111eSMatthew Wilcox (Oracle) 				&mapping->i_private_list);
68258ff407bSJan Kara 		bh->b_assoc_map = mapping;
683600f111eSMatthew Wilcox (Oracle) 		spin_unlock(&buffer_mapping->i_private_lock);
6841da177e4SLinus Torvalds 	}
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6871da177e4SLinus Torvalds 
6883814ec89SMatthew Wilcox (Oracle) /**
6893814ec89SMatthew Wilcox (Oracle)  * block_dirty_folio - Mark a folio as dirty.
6903814ec89SMatthew Wilcox (Oracle)  * @mapping: The address space containing this folio.
6913814ec89SMatthew Wilcox (Oracle)  * @folio: The folio to mark dirty.
6921da177e4SLinus Torvalds  *
6933814ec89SMatthew Wilcox (Oracle)  * Filesystems which use buffer_heads can use this function as their
6943814ec89SMatthew Wilcox (Oracle)  * ->dirty_folio implementation.  Some filesystems need to do a little
6953814ec89SMatthew Wilcox (Oracle)  * work before calling this function.  Filesystems which do not use
6963814ec89SMatthew Wilcox (Oracle)  * buffer_heads should call filemap_dirty_folio() instead.
6971da177e4SLinus Torvalds  *
6983814ec89SMatthew Wilcox (Oracle)  * If the folio has buffers, the uptodate buffers are set dirty, to
6993814ec89SMatthew Wilcox (Oracle)  * preserve dirty-state coherency between the folio and the buffers.
7003814ec89SMatthew Wilcox (Oracle)  * Buffers added to a dirty folio are created dirty.
7011da177e4SLinus Torvalds  *
7023814ec89SMatthew Wilcox (Oracle)  * The buffers are dirtied before the folio is dirtied.  There's a small
7033814ec89SMatthew Wilcox (Oracle)  * race window in which writeback may see the folio cleanness but not the
7043814ec89SMatthew Wilcox (Oracle)  * buffer dirtiness.  That's fine.  If this code were to set the folio
7053814ec89SMatthew Wilcox (Oracle)  * dirty before the buffers, writeback could clear the folio dirty flag,
7063814ec89SMatthew Wilcox (Oracle)  * see a bunch of clean buffers and we'd end up with dirty buffers/clean
7073814ec89SMatthew Wilcox (Oracle)  * folio on the dirty folio list.
7081da177e4SLinus Torvalds  *
7093814ec89SMatthew Wilcox (Oracle)  * We use i_private_lock to lock against try_to_free_buffers() while
7103814ec89SMatthew Wilcox (Oracle)  * using the folio's buffer list.  This also prevents clean buffers
7113814ec89SMatthew Wilcox (Oracle)  * being added to the folio after it was set dirty.
7121da177e4SLinus Torvalds  *
7133814ec89SMatthew Wilcox (Oracle)  * Context: May only be called from process context.  Does not sleep.
7143814ec89SMatthew Wilcox (Oracle)  * Caller must ensure that @folio cannot be truncated during this call,
7153814ec89SMatthew Wilcox (Oracle)  * typically by holding the folio lock or having a page in the folio
7163814ec89SMatthew Wilcox (Oracle)  * mapped and holding the page table lock.
7173814ec89SMatthew Wilcox (Oracle)  *
7183814ec89SMatthew Wilcox (Oracle)  * Return: True if the folio was dirtied; false if it was already dirtied.
7191da177e4SLinus Torvalds  */
720e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
7211da177e4SLinus Torvalds {
722e621900aSMatthew Wilcox (Oracle) 	struct buffer_head *head;
723e621900aSMatthew Wilcox (Oracle) 	bool newly_dirty;
7241da177e4SLinus Torvalds 
725600f111eSMatthew Wilcox (Oracle) 	spin_lock(&mapping->i_private_lock);
726e621900aSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
727e621900aSMatthew Wilcox (Oracle) 	if (head) {
7281da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7291da177e4SLinus Torvalds 
7301da177e4SLinus Torvalds 		do {
7311da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7321da177e4SLinus Torvalds 			bh = bh->b_this_page;
7331da177e4SLinus Torvalds 		} while (bh != head);
7341da177e4SLinus Torvalds 	}
735c4843a75SGreg Thelen 	/*
736bcfe06bfSRoman Gushchin 	 * Lock out page's memcg migration to keep PageDirty
73781f8c3a4SJohannes Weiner 	 * synchronized with per-memcg dirty page counters.
738c4843a75SGreg Thelen 	 */
739e621900aSMatthew Wilcox (Oracle) 	folio_memcg_lock(folio);
740e621900aSMatthew Wilcox (Oracle) 	newly_dirty = !folio_test_set_dirty(folio);
741600f111eSMatthew Wilcox (Oracle) 	spin_unlock(&mapping->i_private_lock);
7421da177e4SLinus Torvalds 
743a8e7d49aSLinus Torvalds 	if (newly_dirty)
744e621900aSMatthew Wilcox (Oracle) 		__folio_mark_dirty(folio, mapping, 1);
745c4843a75SGreg Thelen 
746e621900aSMatthew Wilcox (Oracle) 	folio_memcg_unlock(folio);
747c4843a75SGreg Thelen 
748c4843a75SGreg Thelen 	if (newly_dirty)
749c4843a75SGreg Thelen 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
750c4843a75SGreg Thelen 
751a8e7d49aSLinus Torvalds 	return newly_dirty;
7521da177e4SLinus Torvalds }
753e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds /*
7561da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7571da177e4SLinus Torvalds  *
7581da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7591da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7601da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7611da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7621da177e4SLinus Torvalds  *
7631da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7641da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7651da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7661da177e4SLinus Torvalds  *
7671da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7681da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7691da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7701da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7711da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7721da177e4SLinus Torvalds  * any newly dirty buffers for write.
7731da177e4SLinus Torvalds  */
7741da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7751da177e4SLinus Torvalds {
7761da177e4SLinus Torvalds 	struct buffer_head *bh;
7771da177e4SLinus Torvalds 	struct list_head tmp;
7787eaceaccSJens Axboe 	struct address_space *mapping;
7791da177e4SLinus Torvalds 	int err = 0, err2;
7804ee2491eSJens Axboe 	struct blk_plug plug;
7811da177e4SLinus Torvalds 
7821da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7834ee2491eSJens Axboe 	blk_start_plug(&plug);
7841da177e4SLinus Torvalds 
7851da177e4SLinus Torvalds 	spin_lock(lock);
7861da177e4SLinus Torvalds 	while (!list_empty(list)) {
7871da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
788535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
78958ff407bSJan Kara 		__remove_assoc_queue(bh);
790535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
791535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
792535ee2fbSJan Kara 		smp_mb();
7931da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7941da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
795535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7961da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7971da177e4SLinus Torvalds 				get_bh(bh);
7981da177e4SLinus Torvalds 				spin_unlock(lock);
7991da177e4SLinus Torvalds 				/*
8001da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
8019cb569d6SChristoph Hellwig 				 * write_dirty_buffer() actually writes the
8029cb569d6SChristoph Hellwig 				 * current contents - it is a noop if I/O is
8039cb569d6SChristoph Hellwig 				 * still in flight on potentially older
8049cb569d6SChristoph Hellwig 				 * contents.
8051da177e4SLinus Torvalds 				 */
80670fd7614SChristoph Hellwig 				write_dirty_buffer(bh, REQ_SYNC);
8079cf6b720SJens Axboe 
8089cf6b720SJens Axboe 				/*
8099cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
8109cf6b720SJens Axboe 				 * that we will not run the very last mapping,
8119cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
8129cf6b720SJens Axboe 				 * through sync_buffer().
8139cf6b720SJens Axboe 				 */
8141da177e4SLinus Torvalds 				brelse(bh);
8151da177e4SLinus Torvalds 				spin_lock(lock);
8161da177e4SLinus Torvalds 			}
8171da177e4SLinus Torvalds 		}
8181da177e4SLinus Torvalds 	}
8191da177e4SLinus Torvalds 
8204ee2491eSJens Axboe 	spin_unlock(lock);
8214ee2491eSJens Axboe 	blk_finish_plug(&plug);
8224ee2491eSJens Axboe 	spin_lock(lock);
8234ee2491eSJens Axboe 
8241da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
8251da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
8261da177e4SLinus Torvalds 		get_bh(bh);
827535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
828535ee2fbSJan Kara 		__remove_assoc_queue(bh);
829535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
830535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
831535ee2fbSJan Kara 		smp_mb();
832535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
833535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
834600f111eSMatthew Wilcox (Oracle) 				 &mapping->i_private_list);
835535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
836535ee2fbSJan Kara 		}
8371da177e4SLinus Torvalds 		spin_unlock(lock);
8381da177e4SLinus Torvalds 		wait_on_buffer(bh);
8391da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8401da177e4SLinus Torvalds 			err = -EIO;
8411da177e4SLinus Torvalds 		brelse(bh);
8421da177e4SLinus Torvalds 		spin_lock(lock);
8431da177e4SLinus Torvalds 	}
8441da177e4SLinus Torvalds 
8451da177e4SLinus Torvalds 	spin_unlock(lock);
8461da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8471da177e4SLinus Torvalds 	if (err)
8481da177e4SLinus Torvalds 		return err;
8491da177e4SLinus Torvalds 	else
8501da177e4SLinus Torvalds 		return err2;
8511da177e4SLinus Torvalds }
8521da177e4SLinus Torvalds 
8531da177e4SLinus Torvalds /*
8541da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8551da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8561da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8571da177e4SLinus Torvalds  *
858600f111eSMatthew Wilcox (Oracle)  * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
8591da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8601da177e4SLinus Torvalds  * for reiserfs.
8611da177e4SLinus Torvalds  */
8621da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8631da177e4SLinus Torvalds {
8641da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8651da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
866600f111eSMatthew Wilcox (Oracle) 		struct list_head *list = &mapping->i_private_list;
867600f111eSMatthew Wilcox (Oracle) 		struct address_space *buffer_mapping = mapping->i_private_data;
8681da177e4SLinus Torvalds 
869600f111eSMatthew Wilcox (Oracle) 		spin_lock(&buffer_mapping->i_private_lock);
8701da177e4SLinus Torvalds 		while (!list_empty(list))
8711da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
872600f111eSMatthew Wilcox (Oracle) 		spin_unlock(&buffer_mapping->i_private_lock);
8731da177e4SLinus Torvalds 	}
8741da177e4SLinus Torvalds }
87552b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8761da177e4SLinus Torvalds 
8771da177e4SLinus Torvalds /*
8781da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8791da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8801da177e4SLinus Torvalds  *
8811da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8821da177e4SLinus Torvalds  */
8831da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8841da177e4SLinus Torvalds {
8851da177e4SLinus Torvalds 	int ret = 1;
8861da177e4SLinus Torvalds 
8871da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8881da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
889600f111eSMatthew Wilcox (Oracle) 		struct list_head *list = &mapping->i_private_list;
890600f111eSMatthew Wilcox (Oracle) 		struct address_space *buffer_mapping = mapping->i_private_data;
8911da177e4SLinus Torvalds 
892600f111eSMatthew Wilcox (Oracle) 		spin_lock(&buffer_mapping->i_private_lock);
8931da177e4SLinus Torvalds 		while (!list_empty(list)) {
8941da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8951da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8961da177e4SLinus Torvalds 				ret = 0;
8971da177e4SLinus Torvalds 				break;
8981da177e4SLinus Torvalds 			}
8991da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
9001da177e4SLinus Torvalds 		}
901600f111eSMatthew Wilcox (Oracle) 		spin_unlock(&buffer_mapping->i_private_lock);
9021da177e4SLinus Torvalds 	}
9031da177e4SLinus Torvalds 	return ret;
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds /*
907c71124a8SPankaj Raghav  * Create the appropriate buffers when given a folio for data area and
9081da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
9091da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
9101da177e4SLinus Torvalds  * buffers.
9111da177e4SLinus Torvalds  *
9121da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
9131da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
9141da177e4SLinus Torvalds  */
915c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
9162a418157SMatthew Wilcox (Oracle) 					gfp_t gfp)
9171da177e4SLinus Torvalds {
9181da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
9191da177e4SLinus Torvalds 	long offset;
920b87d8cefSRoman Gushchin 	struct mem_cgroup *memcg, *old_memcg;
9211da177e4SLinus Torvalds 
922c71124a8SPankaj Raghav 	/* The folio lock pins the memcg */
923c71124a8SPankaj Raghav 	memcg = folio_memcg(folio);
924b87d8cefSRoman Gushchin 	old_memcg = set_active_memcg(memcg);
925f745c6f5SShakeel Butt 
9261da177e4SLinus Torvalds 	head = NULL;
927c71124a8SPankaj Raghav 	offset = folio_size(folio);
9281da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
929640ab98fSJens Axboe 		bh = alloc_buffer_head(gfp);
9301da177e4SLinus Torvalds 		if (!bh)
9311da177e4SLinus Torvalds 			goto no_grow;
9321da177e4SLinus Torvalds 
9331da177e4SLinus Torvalds 		bh->b_this_page = head;
9341da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9351da177e4SLinus Torvalds 		head = bh;
9361da177e4SLinus Torvalds 
9371da177e4SLinus Torvalds 		bh->b_size = size;
9381da177e4SLinus Torvalds 
939c71124a8SPankaj Raghav 		/* Link the buffer to its folio */
940c71124a8SPankaj Raghav 		folio_set_bh(bh, folio, offset);
9411da177e4SLinus Torvalds 	}
942f745c6f5SShakeel Butt out:
943b87d8cefSRoman Gushchin 	set_active_memcg(old_memcg);
9441da177e4SLinus Torvalds 	return head;
9451da177e4SLinus Torvalds /*
9461da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9471da177e4SLinus Torvalds  */
9481da177e4SLinus Torvalds no_grow:
9491da177e4SLinus Torvalds 	if (head) {
9501da177e4SLinus Torvalds 		do {
9511da177e4SLinus Torvalds 			bh = head;
9521da177e4SLinus Torvalds 			head = head->b_this_page;
9531da177e4SLinus Torvalds 			free_buffer_head(bh);
9541da177e4SLinus Torvalds 		} while (head);
9551da177e4SLinus Torvalds 	}
9561da177e4SLinus Torvalds 
957f745c6f5SShakeel Butt 	goto out;
9581da177e4SLinus Torvalds }
959c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers);
960c71124a8SPankaj Raghav 
961c71124a8SPankaj Raghav struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
962c71124a8SPankaj Raghav 				       bool retry)
963c71124a8SPankaj Raghav {
9642a418157SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
9652a418157SMatthew Wilcox (Oracle) 	if (retry)
9662a418157SMatthew Wilcox (Oracle) 		gfp |= __GFP_NOFAIL;
9672a418157SMatthew Wilcox (Oracle) 
9682a418157SMatthew Wilcox (Oracle) 	return folio_alloc_buffers(page_folio(page), size, gfp);
969c71124a8SPankaj Raghav }
9701da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9711da177e4SLinus Torvalds 
97208d84addSMatthew Wilcox (Oracle) static inline void link_dev_buffers(struct folio *folio,
97308d84addSMatthew Wilcox (Oracle) 		struct buffer_head *head)
9741da177e4SLinus Torvalds {
9751da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9761da177e4SLinus Torvalds 
9771da177e4SLinus Torvalds 	bh = head;
9781da177e4SLinus Torvalds 	do {
9791da177e4SLinus Torvalds 		tail = bh;
9801da177e4SLinus Torvalds 		bh = bh->b_this_page;
9811da177e4SLinus Torvalds 	} while (bh);
9821da177e4SLinus Torvalds 	tail->b_this_page = head;
98308d84addSMatthew Wilcox (Oracle) 	folio_attach_private(folio, head);
9841da177e4SLinus Torvalds }
9851da177e4SLinus Torvalds 
986bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
987bbec0270SLinus Torvalds {
988bbec0270SLinus Torvalds 	sector_t retval = ~((sector_t)0);
989b86058f9SChristoph Hellwig 	loff_t sz = bdev_nr_bytes(bdev);
990bbec0270SLinus Torvalds 
991bbec0270SLinus Torvalds 	if (sz) {
992bbec0270SLinus Torvalds 		unsigned int sizebits = blksize_bits(size);
993bbec0270SLinus Torvalds 		retval = (sz >> sizebits);
994bbec0270SLinus Torvalds 	}
995bbec0270SLinus Torvalds 	return retval;
996bbec0270SLinus Torvalds }
997bbec0270SLinus Torvalds 
9981da177e4SLinus Torvalds /*
9996f24ce6bSMatthew Wilcox (Oracle)  * Initialise the state of a blockdev folio's buffers.
10001da177e4SLinus Torvalds  */
10016f24ce6bSMatthew Wilcox (Oracle) static sector_t folio_init_buffers(struct folio *folio,
1002382497adSMatthew Wilcox (Oracle) 		struct block_device *bdev, unsigned size)
10031da177e4SLinus Torvalds {
10046f24ce6bSMatthew Wilcox (Oracle) 	struct buffer_head *head = folio_buffers(folio);
10051da177e4SLinus Torvalds 	struct buffer_head *bh = head;
10066f24ce6bSMatthew Wilcox (Oracle) 	bool uptodate = folio_test_uptodate(folio);
1007382497adSMatthew Wilcox (Oracle) 	sector_t block = div_u64(folio_pos(folio), size);
1008bcd1d063SChristoph Hellwig 	sector_t end_block = blkdev_max_block(bdev, size);
10091da177e4SLinus Torvalds 
10101da177e4SLinus Torvalds 	do {
10111da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
101201950a34SEric Biggers 			bh->b_end_io = NULL;
101301950a34SEric Biggers 			bh->b_private = NULL;
10141da177e4SLinus Torvalds 			bh->b_bdev = bdev;
10151da177e4SLinus Torvalds 			bh->b_blocknr = block;
10161da177e4SLinus Torvalds 			if (uptodate)
10171da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
1018080399aaSJeff Moyer 			if (block < end_block)
10191da177e4SLinus Torvalds 				set_buffer_mapped(bh);
10201da177e4SLinus Torvalds 		}
10211da177e4SLinus Torvalds 		block++;
10221da177e4SLinus Torvalds 		bh = bh->b_this_page;
10231da177e4SLinus Torvalds 	} while (bh != head);
1024676ce6d5SHugh Dickins 
1025676ce6d5SHugh Dickins 	/*
1026676ce6d5SHugh Dickins 	 * Caller needs to validate requested block against end of device.
1027676ce6d5SHugh Dickins 	 */
1028676ce6d5SHugh Dickins 	return end_block;
10291da177e4SLinus Torvalds }
10301da177e4SLinus Torvalds 
10311da177e4SLinus Torvalds /*
10326d840a18SMatthew Wilcox (Oracle)  * Create the page-cache folio that contains the requested block.
10331da177e4SLinus Torvalds  *
1034676ce6d5SHugh Dickins  * This is used purely for blockdev mappings.
10356d840a18SMatthew Wilcox (Oracle)  *
1036bcd30d4cSMatthew Wilcox (Oracle)  * Returns false if we have a failure which cannot be cured by retrying
1037bcd30d4cSMatthew Wilcox (Oracle)  * without sleeping.  Returns true if we succeeded, or the caller should retry.
10381da177e4SLinus Torvalds  */
10396d840a18SMatthew Wilcox (Oracle) static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1040382497adSMatthew Wilcox (Oracle) 		pgoff_t index, unsigned size, gfp_t gfp)
10411da177e4SLinus Torvalds {
104222f89a4fSAl Viro 	struct address_space *mapping = bdev->bd_mapping;
10433c98a41cSMatthew Wilcox (Oracle) 	struct folio *folio;
10441da177e4SLinus Torvalds 	struct buffer_head *bh;
10456d840a18SMatthew Wilcox (Oracle) 	sector_t end_block = 0;
104684235de3SJohannes Weiner 
104722f89a4fSAl Viro 	folio = __filemap_get_folio(mapping, index,
10483ed65f04SMatthew Wilcox (Oracle) 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
10493ed65f04SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
10506d840a18SMatthew Wilcox (Oracle) 		return false;
10511da177e4SLinus Torvalds 
10523c98a41cSMatthew Wilcox (Oracle) 	bh = folio_buffers(folio);
10533c98a41cSMatthew Wilcox (Oracle) 	if (bh) {
10541da177e4SLinus Torvalds 		if (bh->b_size == size) {
1055382497adSMatthew Wilcox (Oracle) 			end_block = folio_init_buffers(folio, bdev, size);
10566d840a18SMatthew Wilcox (Oracle) 			goto unlock;
10571da177e4SLinus Torvalds 		}
10581da177e4SLinus Torvalds 
1059bcd30d4cSMatthew Wilcox (Oracle) 		/*
1060bcd30d4cSMatthew Wilcox (Oracle) 		 * Retrying may succeed; for example the folio may finish
1061bcd30d4cSMatthew Wilcox (Oracle) 		 * writeback, or buffers may be cleaned.  This should not
1062bcd30d4cSMatthew Wilcox (Oracle) 		 * happen very often; maybe we have old buffers attached to
1063bcd30d4cSMatthew Wilcox (Oracle) 		 * this blockdev's page cache and we're trying to change
1064bcd30d4cSMatthew Wilcox (Oracle) 		 * the block size?
1065bcd30d4cSMatthew Wilcox (Oracle) 		 */
1066bcd30d4cSMatthew Wilcox (Oracle) 		if (!try_to_free_buffers(folio)) {
10676d840a18SMatthew Wilcox (Oracle) 			end_block = ~0ULL;
10686d840a18SMatthew Wilcox (Oracle) 			goto unlock;
10696d840a18SMatthew Wilcox (Oracle) 		}
1070bcd30d4cSMatthew Wilcox (Oracle) 	}
10716d840a18SMatthew Wilcox (Oracle) 
10723ed65f04SMatthew Wilcox (Oracle) 	bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
10733ed65f04SMatthew Wilcox (Oracle) 	if (!bh)
10746d840a18SMatthew Wilcox (Oracle) 		goto unlock;
10751da177e4SLinus Torvalds 
10761da177e4SLinus Torvalds 	/*
10773c98a41cSMatthew Wilcox (Oracle) 	 * Link the folio to the buffers and initialise them.  Take the
10781da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10793c98a41cSMatthew Wilcox (Oracle) 	 * run under the folio lock.
10801da177e4SLinus Torvalds 	 */
108122f89a4fSAl Viro 	spin_lock(&mapping->i_private_lock);
108208d84addSMatthew Wilcox (Oracle) 	link_dev_buffers(folio, bh);
1083382497adSMatthew Wilcox (Oracle) 	end_block = folio_init_buffers(folio, bdev, size);
108422f89a4fSAl Viro 	spin_unlock(&mapping->i_private_lock);
10856d840a18SMatthew Wilcox (Oracle) unlock:
10863c98a41cSMatthew Wilcox (Oracle) 	folio_unlock(folio);
10873c98a41cSMatthew Wilcox (Oracle) 	folio_put(folio);
10886d840a18SMatthew Wilcox (Oracle) 	return block < end_block;
10891da177e4SLinus Torvalds }
10901da177e4SLinus Torvalds 
10911da177e4SLinus Torvalds /*
10926d840a18SMatthew Wilcox (Oracle)  * Create buffers for the specified block device block's folio.  If
10936d840a18SMatthew Wilcox (Oracle)  * that folio was dirty, the buffers are set dirty also.  Returns false
10946d840a18SMatthew Wilcox (Oracle)  * if we've hit a permanent error.
10951da177e4SLinus Torvalds  */
10966d840a18SMatthew Wilcox (Oracle) static bool grow_buffers(struct block_device *bdev, sector_t block,
10976d840a18SMatthew Wilcox (Oracle) 		unsigned size, gfp_t gfp)
10981da177e4SLinus Torvalds {
10995f3bd90dSMatthew Wilcox (Oracle) 	loff_t pos;
11001da177e4SLinus Torvalds 
1101e5657933SAndrew Morton 	/*
11025f3bd90dSMatthew Wilcox (Oracle) 	 * Check for a block which lies outside our maximum possible
11035f3bd90dSMatthew Wilcox (Oracle) 	 * pagecache index.
1104e5657933SAndrew Morton 	 */
11055f3bd90dSMatthew Wilcox (Oracle) 	if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
11065f3bd90dSMatthew Wilcox (Oracle) 		printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
11078e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1108a1c6f057SDmitry Monakhov 			bdev);
11096d840a18SMatthew Wilcox (Oracle) 		return false;
1110e5657933SAndrew Morton 	}
1111676ce6d5SHugh Dickins 
11126d840a18SMatthew Wilcox (Oracle) 	/* Create a folio with the proper size buffers */
11135f3bd90dSMatthew Wilcox (Oracle) 	return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
11141da177e4SLinus Torvalds }
11151da177e4SLinus Torvalds 
11160026ba40SEric Biggers static struct buffer_head *
11173b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
11183b5e6454SGioh Kim 	     unsigned size, gfp_t gfp)
11191da177e4SLinus Torvalds {
11201da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1121e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
11221da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11231da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11241da177e4SLinus Torvalds 					size);
1125e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1126e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
11271da177e4SLinus Torvalds 
11281da177e4SLinus Torvalds 		dump_stack();
11291da177e4SLinus Torvalds 		return NULL;
11301da177e4SLinus Torvalds 	}
11311da177e4SLinus Torvalds 
1132676ce6d5SHugh Dickins 	for (;;) {
1133676ce6d5SHugh Dickins 		struct buffer_head *bh;
1134676ce6d5SHugh Dickins 
11351da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11361da177e4SLinus Torvalds 		if (bh)
11371da177e4SLinus Torvalds 			return bh;
11381da177e4SLinus Torvalds 
11396d840a18SMatthew Wilcox (Oracle) 		if (!grow_buffers(bdev, block, size, gfp))
114091f68c89SJeff Moyer 			return NULL;
1141676ce6d5SHugh Dickins 	}
11421da177e4SLinus Torvalds }
11431da177e4SLinus Torvalds 
11441da177e4SLinus Torvalds /*
11451da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11461da177e4SLinus Torvalds  *
11471da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1148ec82e1c1SMatthew Wilcox  * the page is tagged dirty in the page cache.
11491da177e4SLinus Torvalds  *
11501da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11511da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11521da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11531da177e4SLinus Torvalds  *
11541da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11551da177e4SLinus Torvalds  * (if the page has buffers).
11561da177e4SLinus Torvalds  *
11571da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11581da177e4SLinus Torvalds  * buffers are not.
11591da177e4SLinus Torvalds  *
11601da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11611da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11621da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11632c69e205SMatthew Wilcox (Oracle)  * block_read_full_folio() against that folio will discover all the uptodate
11642c69e205SMatthew Wilcox (Oracle)  * buffers, will set the folio uptodate and will perform no I/O.
11651da177e4SLinus Torvalds  */
11661da177e4SLinus Torvalds 
11671da177e4SLinus Torvalds /**
11681da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
116967be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11701da177e4SLinus Torvalds  *
1171ec82e1c1SMatthew Wilcox  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1172ec82e1c1SMatthew Wilcox  * its backing page dirty, then tag the page as dirty in the page cache
1173ec82e1c1SMatthew Wilcox  * and then attach the address_space's inode to its superblock's dirty
11741da177e4SLinus Torvalds  * inode list.
11751da177e4SLinus Torvalds  *
1176600f111eSMatthew Wilcox (Oracle)  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1177b93b0163SMatthew Wilcox  * i_pages lock and mapping->host->i_lock.
11781da177e4SLinus Torvalds  */
1179fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11801da177e4SLinus Torvalds {
1181787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11821be62dc1SLinus Torvalds 
11835305cb83STejun Heo 	trace_block_dirty_buffer(bh);
11845305cb83STejun Heo 
11851be62dc1SLinus Torvalds 	/*
11861be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11871be62dc1SLinus Torvalds 	 *
11881be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11891be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11901be62dc1SLinus Torvalds 	 */
11911be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11921be62dc1SLinus Torvalds 		smp_mb();
11931be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11941be62dc1SLinus Torvalds 			return;
11951be62dc1SLinus Torvalds 	}
11961be62dc1SLinus Torvalds 
1197a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1198cf1d3417SMatthew Wilcox (Oracle) 		struct folio *folio = bh->b_folio;
1199c4843a75SGreg Thelen 		struct address_space *mapping = NULL;
1200c4843a75SGreg Thelen 
1201cf1d3417SMatthew Wilcox (Oracle) 		folio_memcg_lock(folio);
1202cf1d3417SMatthew Wilcox (Oracle) 		if (!folio_test_set_dirty(folio)) {
1203cf1d3417SMatthew Wilcox (Oracle) 			mapping = folio->mapping;
12048e9d78edSLinus Torvalds 			if (mapping)
1205cf1d3417SMatthew Wilcox (Oracle) 				__folio_mark_dirty(folio, mapping, 0);
12068e9d78edSLinus Torvalds 		}
1207cf1d3417SMatthew Wilcox (Oracle) 		folio_memcg_unlock(folio);
1208c4843a75SGreg Thelen 		if (mapping)
1209c4843a75SGreg Thelen 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1210a8e7d49aSLinus Torvalds 	}
12111da177e4SLinus Torvalds }
12121fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
12131da177e4SLinus Torvalds 
121487354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
121587354e5dSJeff Layton {
121687354e5dSJeff Layton 	set_buffer_write_io_error(bh);
121787354e5dSJeff Layton 	/* FIXME: do we need to set this in both places? */
1218abc8a8a2SMatthew Wilcox (Oracle) 	if (bh->b_folio && bh->b_folio->mapping)
1219abc8a8a2SMatthew Wilcox (Oracle) 		mapping_set_error(bh->b_folio->mapping, -EIO);
12204b2201daSChristoph Hellwig 	if (bh->b_assoc_map) {
122187354e5dSJeff Layton 		mapping_set_error(bh->b_assoc_map, -EIO);
12224b2201daSChristoph Hellwig 		errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
12234b2201daSChristoph Hellwig 	}
122487354e5dSJeff Layton }
122587354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
122687354e5dSJeff Layton 
122766924fdaSMatthew Wilcox (Oracle) /**
122866924fdaSMatthew Wilcox (Oracle)  * __brelse - Release a buffer.
122966924fdaSMatthew Wilcox (Oracle)  * @bh: The buffer to release.
123066924fdaSMatthew Wilcox (Oracle)  *
123166924fdaSMatthew Wilcox (Oracle)  * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
12321da177e4SLinus Torvalds  */
123366924fdaSMatthew Wilcox (Oracle) void __brelse(struct buffer_head *bh)
12341da177e4SLinus Torvalds {
123566924fdaSMatthew Wilcox (Oracle) 	if (atomic_read(&bh->b_count)) {
123666924fdaSMatthew Wilcox (Oracle) 		put_bh(bh);
12371da177e4SLinus Torvalds 		return;
12381da177e4SLinus Torvalds 	}
12395c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12401da177e4SLinus Torvalds }
12411fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
12421da177e4SLinus Torvalds 
1243b73a936fSMatthew Wilcox (Oracle) /**
1244b73a936fSMatthew Wilcox (Oracle)  * __bforget - Discard any dirty data in a buffer.
1245b73a936fSMatthew Wilcox (Oracle)  * @bh: The buffer to forget.
1246b73a936fSMatthew Wilcox (Oracle)  *
1247b73a936fSMatthew Wilcox (Oracle)  * This variant of bforget() can be called if @bh is guaranteed to not
1248b73a936fSMatthew Wilcox (Oracle)  * be NULL.
12491da177e4SLinus Torvalds  */
12501da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12511da177e4SLinus Torvalds {
12521da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1253535ee2fbSJan Kara 	if (bh->b_assoc_map) {
1254abc8a8a2SMatthew Wilcox (Oracle) 		struct address_space *buffer_mapping = bh->b_folio->mapping;
12551da177e4SLinus Torvalds 
1256600f111eSMatthew Wilcox (Oracle) 		spin_lock(&buffer_mapping->i_private_lock);
12571da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
125858ff407bSJan Kara 		bh->b_assoc_map = NULL;
1259600f111eSMatthew Wilcox (Oracle) 		spin_unlock(&buffer_mapping->i_private_lock);
12601da177e4SLinus Torvalds 	}
12611da177e4SLinus Torvalds 	__brelse(bh);
12621da177e4SLinus Torvalds }
12631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12641da177e4SLinus Torvalds 
12651da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12661da177e4SLinus Torvalds {
12671da177e4SLinus Torvalds 	lock_buffer(bh);
12681da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12691da177e4SLinus Torvalds 		unlock_buffer(bh);
12701da177e4SLinus Torvalds 		return bh;
12711da177e4SLinus Torvalds 	} else {
12721da177e4SLinus Torvalds 		get_bh(bh);
12731da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12741420c4a5SBart Van Assche 		submit_bh(REQ_OP_READ, bh);
12751da177e4SLinus Torvalds 		wait_on_buffer(bh);
12761da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12771da177e4SLinus Torvalds 			return bh;
12781da177e4SLinus Torvalds 	}
12791da177e4SLinus Torvalds 	brelse(bh);
12801da177e4SLinus Torvalds 	return NULL;
12811da177e4SLinus Torvalds }
12821da177e4SLinus Torvalds 
12831da177e4SLinus Torvalds /*
12841da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12851da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12861da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12871da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12881da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12891da177e4SLinus Torvalds  *
12901da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12911da177e4SLinus Torvalds  * sb_find_get_block().
12921da177e4SLinus Torvalds  *
12931da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12941da177e4SLinus Torvalds  * a local interrupt disable for that.
12951da177e4SLinus Torvalds  */
12961da177e4SLinus Torvalds 
129786cf78d7SSebastien Buisson #define BH_LRU_SIZE	16
12981da177e4SLinus Torvalds 
12991da177e4SLinus Torvalds struct bh_lru {
13001da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
13011da177e4SLinus Torvalds };
13021da177e4SLinus Torvalds 
13031da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
13041da177e4SLinus Torvalds 
13051da177e4SLinus Torvalds #ifdef CONFIG_SMP
13061da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
13071da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
13081da177e4SLinus Torvalds #else
13091da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
13101da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
13111da177e4SLinus Torvalds #endif
13121da177e4SLinus Torvalds 
13131da177e4SLinus Torvalds static inline void check_irqs_on(void)
13141da177e4SLinus Torvalds {
13151da177e4SLinus Torvalds #ifdef irqs_disabled
13161da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
13171da177e4SLinus Torvalds #endif
13181da177e4SLinus Torvalds }
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds /*
1321241f01fbSEric Biggers  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1322241f01fbSEric Biggers  * inserted at the front, and the buffer_head at the back if any is evicted.
1323241f01fbSEric Biggers  * Or, if already in the LRU it is moved to the front.
13241da177e4SLinus Torvalds  */
13251da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13261da177e4SLinus Torvalds {
1327241f01fbSEric Biggers 	struct buffer_head *evictee = bh;
1328241f01fbSEric Biggers 	struct bh_lru *b;
1329241f01fbSEric Biggers 	int i;
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 	check_irqs_on();
1332c0226eb8SMinchan Kim 	bh_lru_lock();
1333c0226eb8SMinchan Kim 
13348cc621d2SMinchan Kim 	/*
13358cc621d2SMinchan Kim 	 * the refcount of buffer_head in bh_lru prevents dropping the
13368cc621d2SMinchan Kim 	 * attached page(i.e., try_to_free_buffers) so it could cause
13378cc621d2SMinchan Kim 	 * failing page migration.
13388cc621d2SMinchan Kim 	 * Skip putting upcoming bh into bh_lru until migration is done.
13398cc621d2SMinchan Kim 	 */
13408a237adfSMarcelo Tosatti 	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1341c0226eb8SMinchan Kim 		bh_lru_unlock();
13428cc621d2SMinchan Kim 		return;
1343c0226eb8SMinchan Kim 	}
1344241f01fbSEric Biggers 
1345241f01fbSEric Biggers 	b = this_cpu_ptr(&bh_lrus);
1346241f01fbSEric Biggers 	for (i = 0; i < BH_LRU_SIZE; i++) {
1347241f01fbSEric Biggers 		swap(evictee, b->bhs[i]);
1348241f01fbSEric Biggers 		if (evictee == bh) {
1349241f01fbSEric Biggers 			bh_lru_unlock();
1350241f01fbSEric Biggers 			return;
1351241f01fbSEric Biggers 		}
1352241f01fbSEric Biggers 	}
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds 	get_bh(bh);
13551da177e4SLinus Torvalds 	bh_lru_unlock();
1356241f01fbSEric Biggers 	brelse(evictee);
13571da177e4SLinus Torvalds }
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds /*
13601da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13611da177e4SLinus Torvalds  */
1362858119e1SArjan van de Ven static struct buffer_head *
13633991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13641da177e4SLinus Torvalds {
13651da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13663991d3bdSTomasz Kvarsin 	unsigned int i;
13671da177e4SLinus Torvalds 
13681da177e4SLinus Torvalds 	check_irqs_on();
13691da177e4SLinus Torvalds 	bh_lru_lock();
13708a237adfSMarcelo Tosatti 	if (cpu_is_isolated(smp_processor_id())) {
13718a237adfSMarcelo Tosatti 		bh_lru_unlock();
13728a237adfSMarcelo Tosatti 		return NULL;
13738a237adfSMarcelo Tosatti 	}
13741da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
1375c7b92516SChristoph Lameter 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13761da177e4SLinus Torvalds 
13779470dd5dSZach Brown 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
13789470dd5dSZach Brown 		    bh->b_size == size) {
13791da177e4SLinus Torvalds 			if (i) {
13801da177e4SLinus Torvalds 				while (i) {
1381c7b92516SChristoph Lameter 					__this_cpu_write(bh_lrus.bhs[i],
1382c7b92516SChristoph Lameter 						__this_cpu_read(bh_lrus.bhs[i - 1]));
13831da177e4SLinus Torvalds 					i--;
13841da177e4SLinus Torvalds 				}
1385c7b92516SChristoph Lameter 				__this_cpu_write(bh_lrus.bhs[0], bh);
13861da177e4SLinus Torvalds 			}
13871da177e4SLinus Torvalds 			get_bh(bh);
13881da177e4SLinus Torvalds 			ret = bh;
13891da177e4SLinus Torvalds 			break;
13901da177e4SLinus Torvalds 		}
13911da177e4SLinus Torvalds 	}
13921da177e4SLinus Torvalds 	bh_lru_unlock();
13931da177e4SLinus Torvalds 	return ret;
13941da177e4SLinus Torvalds }
13951da177e4SLinus Torvalds 
13961da177e4SLinus Torvalds /*
13971da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13981da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13991da177e4SLinus Torvalds  * NULL
14001da177e4SLinus Torvalds  */
14011da177e4SLinus Torvalds struct buffer_head *
14023991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
14031da177e4SLinus Torvalds {
14041da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
14051da177e4SLinus Torvalds 
14061da177e4SLinus Torvalds 	if (bh == NULL) {
14072457aec6SMel Gorman 		/* __find_get_block_slow will mark the page accessed */
1408385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
14091da177e4SLinus Torvalds 		if (bh)
14101da177e4SLinus Torvalds 			bh_lru_install(bh);
14112457aec6SMel Gorman 	} else
14121da177e4SLinus Torvalds 		touch_buffer(bh);
14132457aec6SMel Gorman 
14141da177e4SLinus Torvalds 	return bh;
14151da177e4SLinus Torvalds }
14161da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14171da177e4SLinus Torvalds 
14183ed65f04SMatthew Wilcox (Oracle) /**
14193ed65f04SMatthew Wilcox (Oracle)  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
14203ed65f04SMatthew Wilcox (Oracle)  * @bdev: The block device.
14213ed65f04SMatthew Wilcox (Oracle)  * @block: The block number.
14223ed65f04SMatthew Wilcox (Oracle)  * @size: The size of buffer_heads for this @bdev.
14233ed65f04SMatthew Wilcox (Oracle)  * @gfp: The memory allocation flags to use.
14243ed65f04SMatthew Wilcox (Oracle)  *
14250b116ff4SMatthew Wilcox (Oracle)  * The returned buffer head has its reference count incremented, but is
14260b116ff4SMatthew Wilcox (Oracle)  * not locked.  The caller should call brelse() when it has finished
14270b116ff4SMatthew Wilcox (Oracle)  * with the buffer.  The buffer may not be uptodate.  If needed, the
14280b116ff4SMatthew Wilcox (Oracle)  * caller can bring it uptodate either by reading it or overwriting it.
14290b116ff4SMatthew Wilcox (Oracle)  *
14303ed65f04SMatthew Wilcox (Oracle)  * Return: The buffer head, or NULL if memory could not be allocated.
14313ed65f04SMatthew Wilcox (Oracle)  */
14323ed65f04SMatthew Wilcox (Oracle) struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
14333ed65f04SMatthew Wilcox (Oracle) 		unsigned size, gfp_t gfp)
14343ed65f04SMatthew Wilcox (Oracle) {
14353ed65f04SMatthew Wilcox (Oracle) 	struct buffer_head *bh = __find_get_block(bdev, block, size);
14363ed65f04SMatthew Wilcox (Oracle) 
14373ed65f04SMatthew Wilcox (Oracle) 	might_alloc(gfp);
14383ed65f04SMatthew Wilcox (Oracle) 	if (bh)
14393ed65f04SMatthew Wilcox (Oracle) 		return bh;
14403ed65f04SMatthew Wilcox (Oracle) 
14413ed65f04SMatthew Wilcox (Oracle) 	return __getblk_slow(bdev, block, size, gfp);
14423ed65f04SMatthew Wilcox (Oracle) }
14433ed65f04SMatthew Wilcox (Oracle) EXPORT_SYMBOL(bdev_getblk);
14443ed65f04SMatthew Wilcox (Oracle) 
14451da177e4SLinus Torvalds /*
14461da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
14471da177e4SLinus Torvalds  */
14483991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14491da177e4SLinus Torvalds {
1450775d9b10SMatthew Wilcox (Oracle) 	struct buffer_head *bh = bdev_getblk(bdev, block, size,
1451775d9b10SMatthew Wilcox (Oracle) 			GFP_NOWAIT | __GFP_MOVABLE);
1452775d9b10SMatthew Wilcox (Oracle) 
1453a3e713b5SAndrew Morton 	if (likely(bh)) {
1454e7ea1129SZhang Yi 		bh_readahead(bh, REQ_RAHEAD);
14551da177e4SLinus Torvalds 		brelse(bh);
14561da177e4SLinus Torvalds 	}
1457a3e713b5SAndrew Morton }
14581da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14591da177e4SLinus Torvalds 
14601da177e4SLinus Torvalds /**
1461324ecaeeSMatthew Wilcox (Oracle)  * __bread_gfp() - Read a block.
1462324ecaeeSMatthew Wilcox (Oracle)  * @bdev: The block device to read from.
1463324ecaeeSMatthew Wilcox (Oracle)  * @block: Block number in units of block size.
1464324ecaeeSMatthew Wilcox (Oracle)  * @size: The block size of this device in bytes.
1465324ecaeeSMatthew Wilcox (Oracle)  * @gfp: Not page allocation flags; see below.
14661da177e4SLinus Torvalds  *
1467324ecaeeSMatthew Wilcox (Oracle)  * You are not expected to call this function.  You should use one of
1468324ecaeeSMatthew Wilcox (Oracle)  * sb_bread(), sb_bread_unmovable() or __bread().
1469324ecaeeSMatthew Wilcox (Oracle)  *
1470324ecaeeSMatthew Wilcox (Oracle)  * Read a specified block, and return the buffer head that refers to it.
1471324ecaeeSMatthew Wilcox (Oracle)  * If @gfp is 0, the memory will be allocated using the block device's
1472324ecaeeSMatthew Wilcox (Oracle)  * default GFP flags.  If @gfp is __GFP_MOVABLE, the memory may be
1473324ecaeeSMatthew Wilcox (Oracle)  * allocated from a movable area.  Do not pass in a complete set of
1474324ecaeeSMatthew Wilcox (Oracle)  * GFP flags.
1475324ecaeeSMatthew Wilcox (Oracle)  *
1476324ecaeeSMatthew Wilcox (Oracle)  * The returned buffer head has its refcount increased.  The caller should
1477324ecaeeSMatthew Wilcox (Oracle)  * call brelse() when it has finished with the buffer.
1478324ecaeeSMatthew Wilcox (Oracle)  *
1479324ecaeeSMatthew Wilcox (Oracle)  * Context: May sleep waiting for I/O.
1480324ecaeeSMatthew Wilcox (Oracle)  * Return: NULL if the block was unreadable.
14811da177e4SLinus Torvalds  */
1482324ecaeeSMatthew Wilcox (Oracle) struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
14833b5e6454SGioh Kim 		unsigned size, gfp_t gfp)
14841da177e4SLinus Torvalds {
148593b13ecaSMatthew Wilcox (Oracle) 	struct buffer_head *bh;
148693b13ecaSMatthew Wilcox (Oracle) 
1487224941e8SAl Viro 	gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
148893b13ecaSMatthew Wilcox (Oracle) 
148993b13ecaSMatthew Wilcox (Oracle) 	/*
149093b13ecaSMatthew Wilcox (Oracle) 	 * Prefer looping in the allocator rather than here, at least that
149193b13ecaSMatthew Wilcox (Oracle) 	 * code knows what it's doing.
149293b13ecaSMatthew Wilcox (Oracle) 	 */
149393b13ecaSMatthew Wilcox (Oracle) 	gfp |= __GFP_NOFAIL;
149493b13ecaSMatthew Wilcox (Oracle) 
149593b13ecaSMatthew Wilcox (Oracle) 	bh = bdev_getblk(bdev, block, size, gfp);
14961da177e4SLinus Torvalds 
1497a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14981da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14991da177e4SLinus Torvalds 	return bh;
15001da177e4SLinus Torvalds }
15013b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
15021da177e4SLinus Torvalds 
15038cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
15048cc621d2SMinchan Kim {
15058cc621d2SMinchan Kim 	int i;
15068cc621d2SMinchan Kim 
15078cc621d2SMinchan Kim 	for (i = 0; i < BH_LRU_SIZE; i++) {
15088cc621d2SMinchan Kim 		brelse(b->bhs[i]);
15098cc621d2SMinchan Kim 		b->bhs[i] = NULL;
15108cc621d2SMinchan Kim 	}
15118cc621d2SMinchan Kim }
15121da177e4SLinus Torvalds /*
15131da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
15141da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
15151da177e4SLinus Torvalds  * or with preempt disabled.
15161da177e4SLinus Torvalds  */
15171da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
15181da177e4SLinus Torvalds {
15191da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
15201da177e4SLinus Torvalds 
15218cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
15221da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
15231da177e4SLinus Torvalds }
15241da177e4SLinus Torvalds 
15258cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
152642be35d0SGilad Ben-Yossef {
152742be35d0SGilad Ben-Yossef 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
152842be35d0SGilad Ben-Yossef 	int i;
152942be35d0SGilad Ben-Yossef 
153042be35d0SGilad Ben-Yossef 	for (i = 0; i < BH_LRU_SIZE; i++) {
153142be35d0SGilad Ben-Yossef 		if (b->bhs[i])
15321d706679SSaurav Girepunje 			return true;
153342be35d0SGilad Ben-Yossef 	}
153442be35d0SGilad Ben-Yossef 
15351d706679SSaurav Girepunje 	return false;
153642be35d0SGilad Ben-Yossef }
153742be35d0SGilad Ben-Yossef 
1538f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
15391da177e4SLinus Torvalds {
1540cb923159SSebastian Andrzej Siewior 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
15411da177e4SLinus Torvalds }
15429db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
15431da177e4SLinus Torvalds 
1544243418e3SMinchan Kim /*
1545243418e3SMinchan Kim  * It's called from workqueue context so we need a bh_lru_lock to close
1546243418e3SMinchan Kim  * the race with preemption/irq.
1547243418e3SMinchan Kim  */
1548243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
15498cc621d2SMinchan Kim {
15508cc621d2SMinchan Kim 	struct bh_lru *b;
15518cc621d2SMinchan Kim 
15528cc621d2SMinchan Kim 	bh_lru_lock();
1553243418e3SMinchan Kim 	b = this_cpu_ptr(&bh_lrus);
15548cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
15558cc621d2SMinchan Kim 	bh_lru_unlock();
15568cc621d2SMinchan Kim }
15578cc621d2SMinchan Kim 
1558465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1559465e5e6aSPankaj Raghav 		  unsigned long offset)
1560465e5e6aSPankaj Raghav {
1561465e5e6aSPankaj Raghav 	bh->b_folio = folio;
1562465e5e6aSPankaj Raghav 	BUG_ON(offset >= folio_size(folio));
1563465e5e6aSPankaj Raghav 	if (folio_test_highmem(folio))
1564465e5e6aSPankaj Raghav 		/*
1565465e5e6aSPankaj Raghav 		 * This catches illegal uses and preserves the offset:
1566465e5e6aSPankaj Raghav 		 */
1567465e5e6aSPankaj Raghav 		bh->b_data = (char *)(0 + offset);
1568465e5e6aSPankaj Raghav 	else
1569465e5e6aSPankaj Raghav 		bh->b_data = folio_address(folio) + offset;
1570465e5e6aSPankaj Raghav }
1571465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh);
1572465e5e6aSPankaj Raghav 
15731da177e4SLinus Torvalds /*
15741da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
15751da177e4SLinus Torvalds  */
1576e7470ee8SMel Gorman 
1577e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1578e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1579e7470ee8SMel Gorman 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1580e7470ee8SMel Gorman 	 1 << BH_Delay | 1 << BH_Unwritten)
1581e7470ee8SMel Gorman 
1582858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
15831da177e4SLinus Torvalds {
1584b0192296SUros Bizjak 	unsigned long b_state;
1585e7470ee8SMel Gorman 
15861da177e4SLinus Torvalds 	lock_buffer(bh);
15871da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
15881da177e4SLinus Torvalds 	bh->b_bdev = NULL;
1589b0192296SUros Bizjak 	b_state = READ_ONCE(bh->b_state);
1590b0192296SUros Bizjak 	do {
1591b0192296SUros Bizjak 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1592b0192296SUros Bizjak 			      b_state & ~BUFFER_FLAGS_DISCARD));
15931da177e4SLinus Torvalds 	unlock_buffer(bh);
15941da177e4SLinus Torvalds }
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds /**
15977ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
15987ba13abbSMatthew Wilcox (Oracle)  * @folio: The folio which is affected.
1599d47992f8SLukas Czerner  * @offset: start of the range to invalidate
1600d47992f8SLukas Czerner  * @length: length of the range to invalidate
16011da177e4SLinus Torvalds  *
16027ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() is called when all or part of the folio has been
16031da177e4SLinus Torvalds  * invalidated by a truncate operation.
16041da177e4SLinus Torvalds  *
16057ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() does not have to release all buffers, but it must
16061da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
16071da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
16081da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
16091da177e4SLinus Torvalds  * blocks on-disk.
16101da177e4SLinus Torvalds  */
16117ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
16121da177e4SLinus Torvalds {
16131da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
16147ba13abbSMatthew Wilcox (Oracle) 	size_t curr_off = 0;
16157ba13abbSMatthew Wilcox (Oracle) 	size_t stop = length + offset;
16161da177e4SLinus Torvalds 
16177ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
16181da177e4SLinus Torvalds 
1619d47992f8SLukas Czerner 	/*
1620d47992f8SLukas Czerner 	 * Check for overflow
1621d47992f8SLukas Czerner 	 */
16227ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(stop > folio_size(folio) || stop < length);
1623d47992f8SLukas Czerner 
16247ba13abbSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
16257ba13abbSMatthew Wilcox (Oracle) 	if (!head)
16267ba13abbSMatthew Wilcox (Oracle) 		return;
16277ba13abbSMatthew Wilcox (Oracle) 
16281da177e4SLinus Torvalds 	bh = head;
16291da177e4SLinus Torvalds 	do {
16307ba13abbSMatthew Wilcox (Oracle) 		size_t next_off = curr_off + bh->b_size;
16311da177e4SLinus Torvalds 		next = bh->b_this_page;
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds 		/*
1634d47992f8SLukas Czerner 		 * Are we still fully in range ?
1635d47992f8SLukas Czerner 		 */
1636d47992f8SLukas Czerner 		if (next_off > stop)
1637d47992f8SLukas Czerner 			goto out;
1638d47992f8SLukas Czerner 
1639d47992f8SLukas Czerner 		/*
16401da177e4SLinus Torvalds 		 * is this block fully invalidated?
16411da177e4SLinus Torvalds 		 */
16421da177e4SLinus Torvalds 		if (offset <= curr_off)
16431da177e4SLinus Torvalds 			discard_buffer(bh);
16441da177e4SLinus Torvalds 		curr_off = next_off;
16451da177e4SLinus Torvalds 		bh = next;
16461da177e4SLinus Torvalds 	} while (bh != head);
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds 	/*
16497ba13abbSMatthew Wilcox (Oracle) 	 * We release buffers only if the entire folio is being invalidated.
16501da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
16511da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
16521da177e4SLinus Torvalds 	 */
16537ba13abbSMatthew Wilcox (Oracle) 	if (length == folio_size(folio))
16547ba13abbSMatthew Wilcox (Oracle) 		filemap_release_folio(folio, 0);
16551da177e4SLinus Torvalds out:
16562ff28e22SNeilBrown 	return;
16571da177e4SLinus Torvalds }
16587ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
16591da177e4SLinus Torvalds 
16601da177e4SLinus Torvalds /*
16611da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
1662600f111eSMatthew Wilcox (Oracle)  * block_dirty_folio() via i_private_lock.  try_to_free_buffers
16638e2e1756SPankaj Raghav  * is already excluded via the folio lock.
16641da177e4SLinus Torvalds  */
16650a88810dSMatthew Wilcox (Oracle) struct buffer_head *create_empty_buffers(struct folio *folio,
16663decb856SMatthew Wilcox (Oracle) 		unsigned long blocksize, unsigned long b_state)
16671da177e4SLinus Torvalds {
16681da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
16692a418157SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
16701da177e4SLinus Torvalds 
16712a418157SMatthew Wilcox (Oracle) 	head = folio_alloc_buffers(folio, blocksize, gfp);
16721da177e4SLinus Torvalds 	bh = head;
16731da177e4SLinus Torvalds 	do {
16741da177e4SLinus Torvalds 		bh->b_state |= b_state;
16751da177e4SLinus Torvalds 		tail = bh;
16761da177e4SLinus Torvalds 		bh = bh->b_this_page;
16771da177e4SLinus Torvalds 	} while (bh);
16781da177e4SLinus Torvalds 	tail->b_this_page = head;
16791da177e4SLinus Torvalds 
1680600f111eSMatthew Wilcox (Oracle) 	spin_lock(&folio->mapping->i_private_lock);
16818e2e1756SPankaj Raghav 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
16821da177e4SLinus Torvalds 		bh = head;
16831da177e4SLinus Torvalds 		do {
16848e2e1756SPankaj Raghav 			if (folio_test_dirty(folio))
16851da177e4SLinus Torvalds 				set_buffer_dirty(bh);
16868e2e1756SPankaj Raghav 			if (folio_test_uptodate(folio))
16871da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
16881da177e4SLinus Torvalds 			bh = bh->b_this_page;
16891da177e4SLinus Torvalds 		} while (bh != head);
16901da177e4SLinus Torvalds 	}
16918e2e1756SPankaj Raghav 	folio_attach_private(folio, head);
1692600f111eSMatthew Wilcox (Oracle) 	spin_unlock(&folio->mapping->i_private_lock);
16933decb856SMatthew Wilcox (Oracle) 
16943decb856SMatthew Wilcox (Oracle) 	return head;
16958e2e1756SPankaj Raghav }
16961da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16971da177e4SLinus Torvalds 
169829f3ad7dSJan Kara /**
169929f3ad7dSJan Kara  * clean_bdev_aliases: clean a range of buffers in block device
170029f3ad7dSJan Kara  * @bdev: Block device to clean buffers in
170129f3ad7dSJan Kara  * @block: Start of a range of blocks to clean
170229f3ad7dSJan Kara  * @len: Number of blocks to clean
17031da177e4SLinus Torvalds  *
170429f3ad7dSJan Kara  * We are taking a range of blocks for data and we don't want writeback of any
170529f3ad7dSJan Kara  * buffer-cache aliases starting from return from this function and until the
170629f3ad7dSJan Kara  * moment when something will explicitly mark the buffer dirty (hopefully that
170729f3ad7dSJan Kara  * will not happen until we will free that block ;-) We don't even need to mark
170829f3ad7dSJan Kara  * it not-uptodate - nobody can expect anything from a newly allocated buffer
170929f3ad7dSJan Kara  * anyway. We used to use unmap_buffer() for such invalidation, but that was
171029f3ad7dSJan Kara  * wrong. We definitely don't want to mark the alias unmapped, for example - it
171129f3ad7dSJan Kara  * would confuse anyone who might pick it with bread() afterwards...
171229f3ad7dSJan Kara  *
171329f3ad7dSJan Kara  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
171429f3ad7dSJan Kara  * writeout I/O going on against recently-freed buffers.  We don't wait on that
171529f3ad7dSJan Kara  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
171629f3ad7dSJan Kara  * need to.  That happens here.
17171da177e4SLinus Torvalds  */
171829f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
17191da177e4SLinus Torvalds {
172053cd4cd3SAl Viro 	struct address_space *bd_mapping = bdev->bd_mapping;
172153cd4cd3SAl Viro 	const int blkbits = bd_mapping->host->i_blkbits;
17229e0b6f31SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
172353cd4cd3SAl Viro 	pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
172429f3ad7dSJan Kara 	pgoff_t end;
1725c10f778dSJan Kara 	int i, count;
172629f3ad7dSJan Kara 	struct buffer_head *bh;
172729f3ad7dSJan Kara 	struct buffer_head *head;
17281da177e4SLinus Torvalds 
172953cd4cd3SAl Viro 	end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
17309e0b6f31SMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
17319e0b6f31SMatthew Wilcox (Oracle) 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
17329e0b6f31SMatthew Wilcox (Oracle) 		count = folio_batch_count(&fbatch);
1733c10f778dSJan Kara 		for (i = 0; i < count; i++) {
17349e0b6f31SMatthew Wilcox (Oracle) 			struct folio *folio = fbatch.folios[i];
17351da177e4SLinus Torvalds 
17369e0b6f31SMatthew Wilcox (Oracle) 			if (!folio_buffers(folio))
173729f3ad7dSJan Kara 				continue;
173829f3ad7dSJan Kara 			/*
1739600f111eSMatthew Wilcox (Oracle) 			 * We use folio lock instead of bd_mapping->i_private_lock
174029f3ad7dSJan Kara 			 * to pin buffers here since we can afford to sleep and
174129f3ad7dSJan Kara 			 * it scales better than a global spinlock lock.
174229f3ad7dSJan Kara 			 */
17439e0b6f31SMatthew Wilcox (Oracle) 			folio_lock(folio);
17449e0b6f31SMatthew Wilcox (Oracle) 			/* Recheck when the folio is locked which pins bhs */
17459e0b6f31SMatthew Wilcox (Oracle) 			head = folio_buffers(folio);
17469e0b6f31SMatthew Wilcox (Oracle) 			if (!head)
174729f3ad7dSJan Kara 				goto unlock_page;
174829f3ad7dSJan Kara 			bh = head;
174929f3ad7dSJan Kara 			do {
17506c006a9dSChandan Rajendra 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
175129f3ad7dSJan Kara 					goto next;
175229f3ad7dSJan Kara 				if (bh->b_blocknr >= block + len)
175329f3ad7dSJan Kara 					break;
175429f3ad7dSJan Kara 				clear_buffer_dirty(bh);
175529f3ad7dSJan Kara 				wait_on_buffer(bh);
175629f3ad7dSJan Kara 				clear_buffer_req(bh);
175729f3ad7dSJan Kara next:
175829f3ad7dSJan Kara 				bh = bh->b_this_page;
175929f3ad7dSJan Kara 			} while (bh != head);
176029f3ad7dSJan Kara unlock_page:
17619e0b6f31SMatthew Wilcox (Oracle) 			folio_unlock(folio);
176229f3ad7dSJan Kara 		}
17639e0b6f31SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
176429f3ad7dSJan Kara 		cond_resched();
1765c10f778dSJan Kara 		/* End of range already reached? */
1766c10f778dSJan Kara 		if (index > end || !index)
1767c10f778dSJan Kara 			break;
17681da177e4SLinus Torvalds 	}
17691da177e4SLinus Torvalds }
177029f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
17711da177e4SLinus Torvalds 
1772c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio,
1773c6c8c3e7SPankaj Raghav 						struct inode *inode,
1774c6c8c3e7SPankaj Raghav 						unsigned int b_state)
177545bce8f3SLinus Torvalds {
17763decb856SMatthew Wilcox (Oracle) 	struct buffer_head *bh;
17773decb856SMatthew Wilcox (Oracle) 
1778c6c8c3e7SPankaj Raghav 	BUG_ON(!folio_test_locked(folio));
177945bce8f3SLinus Torvalds 
17803decb856SMatthew Wilcox (Oracle) 	bh = folio_buffers(folio);
17813decb856SMatthew Wilcox (Oracle) 	if (!bh)
17820a88810dSMatthew Wilcox (Oracle) 		bh = create_empty_buffers(folio,
17833decb856SMatthew Wilcox (Oracle) 				1 << READ_ONCE(inode->i_blkbits), b_state);
17843decb856SMatthew Wilcox (Oracle) 	return bh;
178545bce8f3SLinus Torvalds }
178645bce8f3SLinus Torvalds 
178745bce8f3SLinus Torvalds /*
17881da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
17891da177e4SLinus Torvalds  *
17901da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
17911da177e4SLinus Torvalds  *
17921da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
17931da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
17941da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
17951da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
17961da177e4SLinus Torvalds  *
17971da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
17981da177e4SLinus Torvalds  */
17991da177e4SLinus Torvalds 
18001da177e4SLinus Torvalds /*
180117bf23a9SMatthew Wilcox (Oracle)  * While block_write_full_folio is writing back the dirty buffers under
18021da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
18031da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
18041da177e4SLinus Torvalds  * state inside lock_buffer().
18051da177e4SLinus Torvalds  *
180617bf23a9SMatthew Wilcox (Oracle)  * If block_write_full_folio() is called for regular writeback
18071da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
18081da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
18091da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
18101da177e4SLinus Torvalds  * prevents this contention from occurring.
18116e34eeddSTheodore Ts'o  *
181217bf23a9SMatthew Wilcox (Oracle)  * If block_write_full_folio() is called with wbc->sync_mode ==
181370fd7614SChristoph Hellwig  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1814721a9602SJens Axboe  * causes the writes to be flagged as synchronous writes.
18151da177e4SLinus Torvalds  */
181653418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio,
181714059f66SMatthew Wilcox (Oracle) 			get_block_t *get_block, struct writeback_control *wbc)
18181da177e4SLinus Torvalds {
18191da177e4SLinus Torvalds 	int err;
18201da177e4SLinus Torvalds 	sector_t block;
18211da177e4SLinus Torvalds 	sector_t last_block;
1822f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1823fa399c31SMatthew Wilcox (Oracle) 	size_t blocksize;
18241da177e4SLinus Torvalds 	int nr_underway = 0;
18253ae72869SBart Van Assche 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
18261da177e4SLinus Torvalds 
182753418a18SMatthew Wilcox (Oracle) 	head = folio_create_buffers(folio, inode,
18281da177e4SLinus Torvalds 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
18291da177e4SLinus Torvalds 
18301da177e4SLinus Torvalds 	/*
1831e621900aSMatthew Wilcox (Oracle) 	 * Be very careful.  We have no exclusion from block_dirty_folio
18321da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
18331da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
183453418a18SMatthew Wilcox (Oracle) 	 * then we just miss that fact, and the folio stays dirty.
18351da177e4SLinus Torvalds 	 *
1836e621900aSMatthew Wilcox (Oracle) 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
18371da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
18381da177e4SLinus Torvalds 	 */
18391da177e4SLinus Torvalds 
18401da177e4SLinus Torvalds 	bh = head;
184145bce8f3SLinus Torvalds 	blocksize = bh->b_size;
184245bce8f3SLinus Torvalds 
1843fa399c31SMatthew Wilcox (Oracle) 	block = div_u64(folio_pos(folio), blocksize);
1844fa399c31SMatthew Wilcox (Oracle) 	last_block = div_u64(i_size_read(inode) - 1, blocksize);
18451da177e4SLinus Torvalds 
18461da177e4SLinus Torvalds 	/*
18471da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
18481da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
18491da177e4SLinus Torvalds 	 */
18501da177e4SLinus Torvalds 	do {
18511da177e4SLinus Torvalds 		if (block > last_block) {
18521da177e4SLinus Torvalds 			/*
18531da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
185453418a18SMatthew Wilcox (Oracle) 			 * this folio can be outside i_size when there is a
18551da177e4SLinus Torvalds 			 * truncate in progress.
18561da177e4SLinus Torvalds 			 */
18571da177e4SLinus Torvalds 			/*
185817bf23a9SMatthew Wilcox (Oracle) 			 * The buffer was zeroed by block_write_full_folio()
18591da177e4SLinus Torvalds 			 */
18601da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18611da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
186229a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
186329a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1864b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18651da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18661da177e4SLinus Torvalds 			if (err)
18671da177e4SLinus Torvalds 				goto recover;
186829a814d2SAlex Tomas 			clear_buffer_delay(bh);
18691da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18701da177e4SLinus Torvalds 				/* blockdev mappings never come here */
18711da177e4SLinus Torvalds 				clear_buffer_new(bh);
1872e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
18731da177e4SLinus Torvalds 			}
18741da177e4SLinus Torvalds 		}
18751da177e4SLinus Torvalds 		bh = bh->b_this_page;
18761da177e4SLinus Torvalds 		block++;
18771da177e4SLinus Torvalds 	} while (bh != head);
18781da177e4SLinus Torvalds 
18791da177e4SLinus Torvalds 	do {
18801da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
18811da177e4SLinus Torvalds 			continue;
18821da177e4SLinus Torvalds 		/*
18831da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
188453418a18SMatthew Wilcox (Oracle) 		 * lock the buffer then redirty the folio.  Note that this can
18855b0830cbSJens Axboe 		 * potentially cause a busy-wait loop from writeback threads
18865b0830cbSJens Axboe 		 * and kswapd activity, but those code paths have their own
18875b0830cbSJens Axboe 		 * higher-level throttling.
18881da177e4SLinus Torvalds 		 */
18891b430beeSWu Fengguang 		if (wbc->sync_mode != WB_SYNC_NONE) {
18901da177e4SLinus Torvalds 			lock_buffer(bh);
1891ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
189253418a18SMatthew Wilcox (Oracle) 			folio_redirty_for_writepage(wbc, folio);
18931da177e4SLinus Torvalds 			continue;
18941da177e4SLinus Torvalds 		}
18951da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
189614059f66SMatthew Wilcox (Oracle) 			mark_buffer_async_write_endio(bh,
189714059f66SMatthew Wilcox (Oracle) 				end_buffer_async_write);
18981da177e4SLinus Torvalds 		} else {
18991da177e4SLinus Torvalds 			unlock_buffer(bh);
19001da177e4SLinus Torvalds 		}
19011da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
19021da177e4SLinus Torvalds 
19031da177e4SLinus Torvalds 	/*
190453418a18SMatthew Wilcox (Oracle) 	 * The folio and its buffers are protected by the writeback flag,
190553418a18SMatthew Wilcox (Oracle) 	 * so we can drop the bh refcounts early.
19061da177e4SLinus Torvalds 	 */
190753418a18SMatthew Wilcox (Oracle) 	BUG_ON(folio_test_writeback(folio));
190853418a18SMatthew Wilcox (Oracle) 	folio_start_writeback(folio);
19091da177e4SLinus Torvalds 
19101da177e4SLinus Torvalds 	do {
19111da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
19121da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
191344981351SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
191444981351SBart Van Assche 				      inode->i_write_hint, wbc);
19151da177e4SLinus Torvalds 			nr_underway++;
1916ad576e63SNick Piggin 		}
19171da177e4SLinus Torvalds 		bh = next;
19181da177e4SLinus Torvalds 	} while (bh != head);
191953418a18SMatthew Wilcox (Oracle) 	folio_unlock(folio);
19201da177e4SLinus Torvalds 
19211da177e4SLinus Torvalds 	err = 0;
19221da177e4SLinus Torvalds done:
19231da177e4SLinus Torvalds 	if (nr_underway == 0) {
19241da177e4SLinus Torvalds 		/*
192553418a18SMatthew Wilcox (Oracle) 		 * The folio was marked dirty, but the buffers were
19261da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
192779f59784SZhang Yi 		 * write_dirty_buffer/submit_bh.  A rare case.
19281da177e4SLinus Torvalds 		 */
192953418a18SMatthew Wilcox (Oracle) 		folio_end_writeback(folio);
19303d67f2d7SNick Piggin 
19311da177e4SLinus Torvalds 		/*
193253418a18SMatthew Wilcox (Oracle) 		 * The folio and buffer_heads can be released at any time from
19331da177e4SLinus Torvalds 		 * here on.
19341da177e4SLinus Torvalds 		 */
19351da177e4SLinus Torvalds 	}
19361da177e4SLinus Torvalds 	return err;
19371da177e4SLinus Torvalds 
19381da177e4SLinus Torvalds recover:
19391da177e4SLinus Torvalds 	/*
19401da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
19411da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
19421da177e4SLinus Torvalds 	 * exposing stale data.
194353418a18SMatthew Wilcox (Oracle) 	 * The folio is currently locked and not marked for writeback
19441da177e4SLinus Torvalds 	 */
19451da177e4SLinus Torvalds 	bh = head;
19461da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
19471da177e4SLinus Torvalds 	do {
194829a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
194929a814d2SAlex Tomas 		    !buffer_delay(bh)) {
19501da177e4SLinus Torvalds 			lock_buffer(bh);
195114059f66SMatthew Wilcox (Oracle) 			mark_buffer_async_write_endio(bh,
195214059f66SMatthew Wilcox (Oracle) 				end_buffer_async_write);
19531da177e4SLinus Torvalds 		} else {
19541da177e4SLinus Torvalds 			/*
19551da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
195653418a18SMatthew Wilcox (Oracle) 			 * attachment to a dirty folio.
19571da177e4SLinus Torvalds 			 */
19581da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
19591da177e4SLinus Torvalds 		}
19601da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
196153418a18SMatthew Wilcox (Oracle) 	BUG_ON(folio_test_writeback(folio));
196253418a18SMatthew Wilcox (Oracle) 	mapping_set_error(folio->mapping, err);
196353418a18SMatthew Wilcox (Oracle) 	folio_start_writeback(folio);
19641da177e4SLinus Torvalds 	do {
19651da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
19661da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
19671da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
196844981351SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
196944981351SBart Van Assche 				      inode->i_write_hint, wbc);
19701da177e4SLinus Torvalds 			nr_underway++;
1971ad576e63SNick Piggin 		}
19721da177e4SLinus Torvalds 		bh = next;
19731da177e4SLinus Torvalds 	} while (bh != head);
197453418a18SMatthew Wilcox (Oracle) 	folio_unlock(folio);
19751da177e4SLinus Torvalds 	goto done;
19761da177e4SLinus Torvalds }
197753418a18SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__block_write_full_folio);
19781da177e4SLinus Torvalds 
1979afddba49SNick Piggin /*
19804a9622f2SMatthew Wilcox (Oracle)  * If a folio has any new buffers, zero them out here, and mark them uptodate
1981afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1982afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1983afddba49SNick Piggin  */
19844a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1985afddba49SNick Piggin {
19864a9622f2SMatthew Wilcox (Oracle) 	size_t block_start, block_end;
1987afddba49SNick Piggin 	struct buffer_head *head, *bh;
1988afddba49SNick Piggin 
19894a9622f2SMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
19904a9622f2SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
19914a9622f2SMatthew Wilcox (Oracle) 	if (!head)
1992afddba49SNick Piggin 		return;
1993afddba49SNick Piggin 
19944a9622f2SMatthew Wilcox (Oracle) 	bh = head;
1995afddba49SNick Piggin 	block_start = 0;
1996afddba49SNick Piggin 	do {
1997afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1998afddba49SNick Piggin 
1999afddba49SNick Piggin 		if (buffer_new(bh)) {
2000afddba49SNick Piggin 			if (block_end > from && block_start < to) {
20014a9622f2SMatthew Wilcox (Oracle) 				if (!folio_test_uptodate(folio)) {
20024a9622f2SMatthew Wilcox (Oracle) 					size_t start, xend;
2003afddba49SNick Piggin 
2004afddba49SNick Piggin 					start = max(from, block_start);
20054a9622f2SMatthew Wilcox (Oracle) 					xend = min(to, block_end);
2006afddba49SNick Piggin 
20074a9622f2SMatthew Wilcox (Oracle) 					folio_zero_segment(folio, start, xend);
2008afddba49SNick Piggin 					set_buffer_uptodate(bh);
2009afddba49SNick Piggin 				}
2010afddba49SNick Piggin 
2011afddba49SNick Piggin 				clear_buffer_new(bh);
2012afddba49SNick Piggin 				mark_buffer_dirty(bh);
2013afddba49SNick Piggin 			}
2014afddba49SNick Piggin 		}
2015afddba49SNick Piggin 
2016afddba49SNick Piggin 		block_start = block_end;
2017afddba49SNick Piggin 		bh = bh->b_this_page;
2018afddba49SNick Piggin 	} while (bh != head);
2019afddba49SNick Piggin }
20204a9622f2SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_zero_new_buffers);
2021afddba49SNick Piggin 
20224aa8cdd5SChristoph Hellwig static int
2023ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
20246d49cc85SChristoph Hellwig 		const struct iomap *iomap)
2025ae259a9cSChristoph Hellwig {
202680844194SMatthew Wilcox (Oracle) 	loff_t offset = (loff_t)block << inode->i_blkbits;
2027ae259a9cSChristoph Hellwig 
2028ae259a9cSChristoph Hellwig 	bh->b_bdev = iomap->bdev;
2029ae259a9cSChristoph Hellwig 
2030ae259a9cSChristoph Hellwig 	/*
2031ae259a9cSChristoph Hellwig 	 * Block points to offset in file we need to map, iomap contains
2032ae259a9cSChristoph Hellwig 	 * the offset at which the map starts. If the map ends before the
2033ae259a9cSChristoph Hellwig 	 * current block, then do not map the buffer and let the caller
2034ae259a9cSChristoph Hellwig 	 * handle it.
2035ae259a9cSChristoph Hellwig 	 */
20364aa8cdd5SChristoph Hellwig 	if (offset >= iomap->offset + iomap->length)
20374aa8cdd5SChristoph Hellwig 		return -EIO;
2038ae259a9cSChristoph Hellwig 
2039ae259a9cSChristoph Hellwig 	switch (iomap->type) {
2040ae259a9cSChristoph Hellwig 	case IOMAP_HOLE:
2041ae259a9cSChristoph Hellwig 		/*
2042ae259a9cSChristoph Hellwig 		 * If the buffer is not up to date or beyond the current EOF,
2043ae259a9cSChristoph Hellwig 		 * we need to mark it as new to ensure sub-block zeroing is
2044ae259a9cSChristoph Hellwig 		 * executed if necessary.
2045ae259a9cSChristoph Hellwig 		 */
2046ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
2047ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
2048ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
20494aa8cdd5SChristoph Hellwig 		return 0;
2050ae259a9cSChristoph Hellwig 	case IOMAP_DELALLOC:
2051ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
2052ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
2053ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
2054ae259a9cSChristoph Hellwig 		set_buffer_uptodate(bh);
2055ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
2056ae259a9cSChristoph Hellwig 		set_buffer_delay(bh);
20574aa8cdd5SChristoph Hellwig 		return 0;
2058ae259a9cSChristoph Hellwig 	case IOMAP_UNWRITTEN:
2059ae259a9cSChristoph Hellwig 		/*
20603d7b6b21SAndreas Gruenbacher 		 * For unwritten regions, we always need to ensure that regions
20613d7b6b21SAndreas Gruenbacher 		 * in the block we are not writing to are zeroed. Mark the
20623d7b6b21SAndreas Gruenbacher 		 * buffer as new to ensure this.
2063ae259a9cSChristoph Hellwig 		 */
2064ae259a9cSChristoph Hellwig 		set_buffer_new(bh);
2065ae259a9cSChristoph Hellwig 		set_buffer_unwritten(bh);
2066df561f66SGustavo A. R. Silva 		fallthrough;
2067ae259a9cSChristoph Hellwig 	case IOMAP_MAPPED:
20683d7b6b21SAndreas Gruenbacher 		if ((iomap->flags & IOMAP_F_NEW) ||
2069381c0432SChristoph Hellwig 		    offset >= i_size_read(inode)) {
2070381c0432SChristoph Hellwig 			/*
2071381c0432SChristoph Hellwig 			 * This can happen if truncating the block device races
2072381c0432SChristoph Hellwig 			 * with the check in the caller as i_size updates on
2073381c0432SChristoph Hellwig 			 * block devices aren't synchronized by i_rwsem for
2074381c0432SChristoph Hellwig 			 * block devices.
2075381c0432SChristoph Hellwig 			 */
2076381c0432SChristoph Hellwig 			if (S_ISBLK(inode->i_mode))
2077381c0432SChristoph Hellwig 				return -EIO;
2078ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
2079381c0432SChristoph Hellwig 		}
208019fe5f64SAndreas Gruenbacher 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
208119fe5f64SAndreas Gruenbacher 				inode->i_blkbits;
2082ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
20834aa8cdd5SChristoph Hellwig 		return 0;
20844aa8cdd5SChristoph Hellwig 	default:
20854aa8cdd5SChristoph Hellwig 		WARN_ON_ONCE(1);
20864aa8cdd5SChristoph Hellwig 		return -EIO;
2087ae259a9cSChristoph Hellwig 	}
2088ae259a9cSChristoph Hellwig }
2089ae259a9cSChristoph Hellwig 
2090d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
20916d49cc85SChristoph Hellwig 		get_block_t *get_block, const struct iomap *iomap)
20921da177e4SLinus Torvalds {
2093b0619401SMatthew Wilcox (Oracle) 	size_t from = offset_in_folio(folio, pos);
2094b0619401SMatthew Wilcox (Oracle) 	size_t to = from + len;
2095d1bd0b4eSMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
2096b0619401SMatthew Wilcox (Oracle) 	size_t block_start, block_end;
20971da177e4SLinus Torvalds 	sector_t block;
20981da177e4SLinus Torvalds 	int err = 0;
2099b0619401SMatthew Wilcox (Oracle) 	size_t blocksize;
21001da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
21011da177e4SLinus Torvalds 
2102d1bd0b4eSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
2103b0619401SMatthew Wilcox (Oracle) 	BUG_ON(to > folio_size(folio));
21041da177e4SLinus Torvalds 	BUG_ON(from > to);
21051da177e4SLinus Torvalds 
2106c6c8c3e7SPankaj Raghav 	head = folio_create_buffers(folio, inode, 0);
210745bce8f3SLinus Torvalds 	blocksize = head->b_size;
2108b0619401SMatthew Wilcox (Oracle) 	block = div_u64(folio_pos(folio), blocksize);
21091da177e4SLinus Torvalds 
21101da177e4SLinus Torvalds 	for (bh = head, block_start = 0; bh != head || !block_start;
21111da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
21121da177e4SLinus Torvalds 		block_end = block_start + blocksize;
21131da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
2114d1bd0b4eSMatthew Wilcox (Oracle) 			if (folio_test_uptodate(folio)) {
21151da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
21161da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21171da177e4SLinus Torvalds 			}
21181da177e4SLinus Torvalds 			continue;
21191da177e4SLinus Torvalds 		}
21201da177e4SLinus Torvalds 		if (buffer_new(bh))
21211da177e4SLinus Torvalds 			clear_buffer_new(bh);
21221da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2123b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
21244aa8cdd5SChristoph Hellwig 			if (get_block)
21251da177e4SLinus Torvalds 				err = get_block(inode, block, bh, 1);
21264aa8cdd5SChristoph Hellwig 			else
21274aa8cdd5SChristoph Hellwig 				err = iomap_to_bh(inode, block, bh, iomap);
21281da177e4SLinus Torvalds 			if (err)
2129f3ddbdc6SNick Piggin 				break;
2130ae259a9cSChristoph Hellwig 
21311da177e4SLinus Torvalds 			if (buffer_new(bh)) {
2132e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
2133d1bd0b4eSMatthew Wilcox (Oracle) 				if (folio_test_uptodate(folio)) {
2134637aff46SNick Piggin 					clear_buffer_new(bh);
21351da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
2136637aff46SNick Piggin 					mark_buffer_dirty(bh);
21371da177e4SLinus Torvalds 					continue;
21381da177e4SLinus Torvalds 				}
2139eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
2140d1bd0b4eSMatthew Wilcox (Oracle) 					folio_zero_segments(folio,
2141eebd2aa3SChristoph Lameter 						to, block_end,
2142eebd2aa3SChristoph Lameter 						block_start, from);
21431da177e4SLinus Torvalds 				continue;
21441da177e4SLinus Torvalds 			}
21451da177e4SLinus Torvalds 		}
2146d1bd0b4eSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio)) {
21471da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
21481da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
21491da177e4SLinus Torvalds 			continue;
21501da177e4SLinus Torvalds 		}
21511da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
215233a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
21531da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
2154e7ea1129SZhang Yi 			bh_read_nowait(bh, 0);
21551da177e4SLinus Torvalds 			*wait_bh++=bh;
21561da177e4SLinus Torvalds 		}
21571da177e4SLinus Torvalds 	}
21581da177e4SLinus Torvalds 	/*
21591da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
21601da177e4SLinus Torvalds 	 */
21611da177e4SLinus Torvalds 	while(wait_bh > wait) {
21621da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
21631da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
2164f3ddbdc6SNick Piggin 			err = -EIO;
21651da177e4SLinus Torvalds 	}
2166f9f07b6cSJan Kara 	if (unlikely(err))
21674a9622f2SMatthew Wilcox (Oracle) 		folio_zero_new_buffers(folio, from, to);
21681da177e4SLinus Torvalds 	return err;
21691da177e4SLinus Torvalds }
2170ae259a9cSChristoph Hellwig 
2171ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2172ae259a9cSChristoph Hellwig 		get_block_t *get_block)
2173ae259a9cSChristoph Hellwig {
2174d1bd0b4eSMatthew Wilcox (Oracle) 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2175d1bd0b4eSMatthew Wilcox (Oracle) 				       NULL);
2176ae259a9cSChristoph Hellwig }
2177ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
21781da177e4SLinus Torvalds 
2179a524fcfeSBean Huo static void __block_commit_write(struct folio *folio, size_t from, size_t to)
21801da177e4SLinus Torvalds {
21818c6cb3e3SMatthew Wilcox (Oracle) 	size_t block_start, block_end;
21828c6cb3e3SMatthew Wilcox (Oracle) 	bool partial = false;
21831da177e4SLinus Torvalds 	unsigned blocksize;
21841da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
21851da177e4SLinus Torvalds 
21868c6cb3e3SMatthew Wilcox (Oracle) 	bh = head = folio_buffers(folio);
218783f4414bSWojciech Gładysz 	if (!bh)
218883f4414bSWojciech Gładysz 		return;
218945bce8f3SLinus Torvalds 	blocksize = bh->b_size;
21901da177e4SLinus Torvalds 
219145bce8f3SLinus Torvalds 	block_start = 0;
219245bce8f3SLinus Torvalds 	do {
21931da177e4SLinus Torvalds 		block_end = block_start + blocksize;
21941da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
21951da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
21968c6cb3e3SMatthew Wilcox (Oracle) 				partial = true;
21971da177e4SLinus Torvalds 		} else {
21981da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
21991da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
22001da177e4SLinus Torvalds 		}
22014ebd3aecSYang Guo 		if (buffer_new(bh))
2202afddba49SNick Piggin 			clear_buffer_new(bh);
220345bce8f3SLinus Torvalds 
220445bce8f3SLinus Torvalds 		block_start = block_end;
220545bce8f3SLinus Torvalds 		bh = bh->b_this_page;
220645bce8f3SLinus Torvalds 	} while (bh != head);
22071da177e4SLinus Torvalds 
22081da177e4SLinus Torvalds 	/*
22091da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
22102c69e205SMatthew Wilcox (Oracle) 	 * uptodate then we can optimize away a bogus read_folio() for
22118c6cb3e3SMatthew Wilcox (Oracle) 	 * the next read(). Here we 'discover' whether the folio went
22121da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
22131da177e4SLinus Torvalds 	 */
22141da177e4SLinus Torvalds 	if (!partial)
22158c6cb3e3SMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds /*
2219155130a4SChristoph Hellwig  * block_write_begin takes care of the basic task of block allocation and
2220155130a4SChristoph Hellwig  * bringing partial write blocks uptodate first.
2221155130a4SChristoph Hellwig  *
22227bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
2223afddba49SNick Piggin  */
2224155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2225*1da86618SMatthew Wilcox (Oracle) 		struct folio **foliop, get_block_t *get_block)
2226afddba49SNick Piggin {
222709cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
22288eb835a1SMatthew Wilcox (Oracle) 	struct folio *folio;
22296e1db88dSChristoph Hellwig 	int status;
2230afddba49SNick Piggin 
22318eb835a1SMatthew Wilcox (Oracle) 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
22328eb835a1SMatthew Wilcox (Oracle) 			mapping_gfp_mask(mapping));
22338eb835a1SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
22348eb835a1SMatthew Wilcox (Oracle) 		return PTR_ERR(folio);
2235afddba49SNick Piggin 
22368eb835a1SMatthew Wilcox (Oracle) 	status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2237afddba49SNick Piggin 	if (unlikely(status)) {
22388eb835a1SMatthew Wilcox (Oracle) 		folio_unlock(folio);
22398eb835a1SMatthew Wilcox (Oracle) 		folio_put(folio);
22408eb835a1SMatthew Wilcox (Oracle) 		folio = NULL;
2241afddba49SNick Piggin 	}
2242afddba49SNick Piggin 
2243*1da86618SMatthew Wilcox (Oracle) 	*foliop = folio;
2244afddba49SNick Piggin 	return status;
2245afddba49SNick Piggin }
2246afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2247afddba49SNick Piggin 
2248afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2249afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
225097edbc02SMatthew Wilcox (Oracle) 			struct folio *folio, void *fsdata)
2251afddba49SNick Piggin {
22528c6cb3e3SMatthew Wilcox (Oracle) 	size_t start = pos - folio_pos(folio);
2253afddba49SNick Piggin 
2254afddba49SNick Piggin 	if (unlikely(copied < len)) {
2255afddba49SNick Piggin 		/*
22562c69e205SMatthew Wilcox (Oracle) 		 * The buffers that were written will now be uptodate, so
22572c69e205SMatthew Wilcox (Oracle) 		 * we don't have to worry about a read_folio reading them
22582c69e205SMatthew Wilcox (Oracle) 		 * and overwriting a partial write. However if we have
22592c69e205SMatthew Wilcox (Oracle) 		 * encountered a short write and only partially written
22602c69e205SMatthew Wilcox (Oracle) 		 * into a buffer, it will not be marked uptodate, so a
22612c69e205SMatthew Wilcox (Oracle) 		 * read_folio might come in and destroy our partial write.
2262afddba49SNick Piggin 		 *
2263afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
22648c6cb3e3SMatthew Wilcox (Oracle) 		 * non uptodate folio as a zero-length write, and force the
2265afddba49SNick Piggin 		 * caller to redo the whole thing.
2266afddba49SNick Piggin 		 */
22678c6cb3e3SMatthew Wilcox (Oracle) 		if (!folio_test_uptodate(folio))
2268afddba49SNick Piggin 			copied = 0;
2269afddba49SNick Piggin 
22704a9622f2SMatthew Wilcox (Oracle) 		folio_zero_new_buffers(folio, start+copied, start+len);
2271afddba49SNick Piggin 	}
22728c6cb3e3SMatthew Wilcox (Oracle) 	flush_dcache_folio(folio);
2273afddba49SNick Piggin 
2274afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2275489b7e72SBean Huo 	__block_commit_write(folio, start, start + copied);
2276afddba49SNick Piggin 
2277afddba49SNick Piggin 	return copied;
2278afddba49SNick Piggin }
2279afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2280afddba49SNick Piggin 
2281afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2282afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2283a225800fSMatthew Wilcox (Oracle) 			struct folio *folio, void *fsdata)
2284afddba49SNick Piggin {
22858af54f29SChristoph Hellwig 	struct inode *inode = mapping->host;
22868af54f29SChristoph Hellwig 	loff_t old_size = inode->i_size;
22878af54f29SChristoph Hellwig 	bool i_size_changed = false;
22888af54f29SChristoph Hellwig 
228997edbc02SMatthew Wilcox (Oracle) 	copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
22908af54f29SChristoph Hellwig 
22918af54f29SChristoph Hellwig 	/*
22928af54f29SChristoph Hellwig 	 * No need to use i_size_read() here, the i_size cannot change under us
22938af54f29SChristoph Hellwig 	 * because we hold i_rwsem.
22948af54f29SChristoph Hellwig 	 *
2295696876d0SMatthew Wilcox (Oracle) 	 * But it's important to update i_size while still holding folio lock:
22968af54f29SChristoph Hellwig 	 * page writeout could otherwise come in and zero beyond i_size.
22978af54f29SChristoph Hellwig 	 */
22988af54f29SChristoph Hellwig 	if (pos + copied > inode->i_size) {
22998af54f29SChristoph Hellwig 		i_size_write(inode, pos + copied);
23008af54f29SChristoph Hellwig 		i_size_changed = true;
23018af54f29SChristoph Hellwig 	}
23028af54f29SChristoph Hellwig 
2303696876d0SMatthew Wilcox (Oracle) 	folio_unlock(folio);
2304696876d0SMatthew Wilcox (Oracle) 	folio_put(folio);
23058af54f29SChristoph Hellwig 
23068af54f29SChristoph Hellwig 	if (old_size < pos)
23078af54f29SChristoph Hellwig 		pagecache_isize_extended(inode, old_size, pos);
23088af54f29SChristoph Hellwig 	/*
23098af54f29SChristoph Hellwig 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
23108af54f29SChristoph Hellwig 	 * makes the holding time of page lock longer. Second, it forces lock
23118af54f29SChristoph Hellwig 	 * ordering of page lock and transaction start for journaling
23128af54f29SChristoph Hellwig 	 * filesystems.
23138af54f29SChristoph Hellwig 	 */
23148af54f29SChristoph Hellwig 	if (i_size_changed)
23158af54f29SChristoph Hellwig 		mark_inode_dirty(inode);
231626ddb1f4SAndreas Gruenbacher 	return copied;
2317afddba49SNick Piggin }
2318afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2319afddba49SNick Piggin 
2320afddba49SNick Piggin /*
23212e7e80f7SMatthew Wilcox (Oracle)  * block_is_partially_uptodate checks whether buffers within a folio are
23228ab22b9aSHisashi Hifumi  * uptodate or not.
23238ab22b9aSHisashi Hifumi  *
23242e7e80f7SMatthew Wilcox (Oracle)  * Returns true if all buffers which correspond to the specified part
23252e7e80f7SMatthew Wilcox (Oracle)  * of the folio are uptodate.
23268ab22b9aSHisashi Hifumi  */
23272e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
23288ab22b9aSHisashi Hifumi {
23298ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
23308ab22b9aSHisashi Hifumi 	unsigned to;
23318ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
23322e7e80f7SMatthew Wilcox (Oracle) 	bool ret = true;
23338ab22b9aSHisashi Hifumi 
23342e7e80f7SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
23352e7e80f7SMatthew Wilcox (Oracle) 	if (!head)
23362e7e80f7SMatthew Wilcox (Oracle) 		return false;
233745bce8f3SLinus Torvalds 	blocksize = head->b_size;
23382e7e80f7SMatthew Wilcox (Oracle) 	to = min_t(unsigned, folio_size(folio) - from, count);
23398ab22b9aSHisashi Hifumi 	to = from + to;
23402e7e80f7SMatthew Wilcox (Oracle) 	if (from < blocksize && to > folio_size(folio) - blocksize)
23412e7e80f7SMatthew Wilcox (Oracle) 		return false;
23428ab22b9aSHisashi Hifumi 
23438ab22b9aSHisashi Hifumi 	bh = head;
23448ab22b9aSHisashi Hifumi 	block_start = 0;
23458ab22b9aSHisashi Hifumi 	do {
23468ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
23478ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
23488ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
23492e7e80f7SMatthew Wilcox (Oracle) 				ret = false;
23508ab22b9aSHisashi Hifumi 				break;
23518ab22b9aSHisashi Hifumi 			}
23528ab22b9aSHisashi Hifumi 			if (block_end >= to)
23538ab22b9aSHisashi Hifumi 				break;
23548ab22b9aSHisashi Hifumi 		}
23558ab22b9aSHisashi Hifumi 		block_start = block_end;
23568ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
23578ab22b9aSHisashi Hifumi 	} while (bh != head);
23588ab22b9aSHisashi Hifumi 
23598ab22b9aSHisashi Hifumi 	return ret;
23608ab22b9aSHisashi Hifumi }
23618ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
23628ab22b9aSHisashi Hifumi 
23638ab22b9aSHisashi Hifumi /*
23642c69e205SMatthew Wilcox (Oracle)  * Generic "read_folio" function for block devices that have the normal
23651da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
23662c69e205SMatthew Wilcox (Oracle)  * Reads the folio asynchronously --- the unlock_buffer() and
23671da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
23682c69e205SMatthew Wilcox (Oracle)  * folio once IO has completed.
23691da177e4SLinus Torvalds  */
23702c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23711da177e4SLinus Torvalds {
23722c69e205SMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
23731da177e4SLinus Torvalds 	sector_t iblock, lblock;
23741da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2375fa399c31SMatthew Wilcox (Oracle) 	size_t blocksize;
23761da177e4SLinus Torvalds 	int nr, i;
23771da177e4SLinus Torvalds 	int fully_mapped = 1;
2378b7a6eb22SMatthew Wilcox (Oracle) 	bool page_error = false;
23794fa512ceSEric Biggers 	loff_t limit = i_size_read(inode);
23804fa512ceSEric Biggers 
23814fa512ceSEric Biggers 	/* This is needed for ext4. */
23824fa512ceSEric Biggers 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
23834fa512ceSEric Biggers 		limit = inode->i_sb->s_maxbytes;
23841da177e4SLinus Torvalds 
23852c69e205SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
23862c69e205SMatthew Wilcox (Oracle) 
2387c6c8c3e7SPankaj Raghav 	head = folio_create_buffers(folio, inode, 0);
238845bce8f3SLinus Torvalds 	blocksize = head->b_size;
23891da177e4SLinus Torvalds 
2390fa399c31SMatthew Wilcox (Oracle) 	iblock = div_u64(folio_pos(folio), blocksize);
2391fa399c31SMatthew Wilcox (Oracle) 	lblock = div_u64(limit + blocksize - 1, blocksize);
23921da177e4SLinus Torvalds 	bh = head;
23931da177e4SLinus Torvalds 	nr = 0;
23941da177e4SLinus Torvalds 	i = 0;
23951da177e4SLinus Torvalds 
23961da177e4SLinus Torvalds 	do {
23971da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
23981da177e4SLinus Torvalds 			continue;
23991da177e4SLinus Torvalds 
24001da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2401c64610baSAndrew Morton 			int err = 0;
2402c64610baSAndrew Morton 
24031da177e4SLinus Torvalds 			fully_mapped = 0;
24041da177e4SLinus Torvalds 			if (iblock < lblock) {
2405b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2406c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
24077ad635eaSMatthew Wilcox (Oracle) 				if (err)
2408b7a6eb22SMatthew Wilcox (Oracle) 					page_error = true;
2409b7a6eb22SMatthew Wilcox (Oracle) 			}
24101da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
24112c69e205SMatthew Wilcox (Oracle) 				folio_zero_range(folio, i * blocksize,
24122c69e205SMatthew Wilcox (Oracle) 						blocksize);
2413c64610baSAndrew Morton 				if (!err)
24141da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
24151da177e4SLinus Torvalds 				continue;
24161da177e4SLinus Torvalds 			}
24171da177e4SLinus Torvalds 			/*
24181da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
24191da177e4SLinus Torvalds 			 * synchronously
24201da177e4SLinus Torvalds 			 */
24211da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
24221da177e4SLinus Torvalds 				continue;
24231da177e4SLinus Torvalds 		}
24241da177e4SLinus Torvalds 		arr[nr++] = bh;
24251da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
24261da177e4SLinus Torvalds 
24271da177e4SLinus Torvalds 	if (fully_mapped)
24282c69e205SMatthew Wilcox (Oracle) 		folio_set_mappedtodisk(folio);
24291da177e4SLinus Torvalds 
24301da177e4SLinus Torvalds 	if (!nr) {
24311da177e4SLinus Torvalds 		/*
24326ba924d3SMatthew Wilcox (Oracle) 		 * All buffers are uptodate or get_block() returned an
24336ba924d3SMatthew Wilcox (Oracle) 		 * error when trying to map them - we can finish the read.
24341da177e4SLinus Torvalds 		 */
24356ba924d3SMatthew Wilcox (Oracle) 		folio_end_read(folio, !page_error);
24361da177e4SLinus Torvalds 		return 0;
24371da177e4SLinus Torvalds 	}
24381da177e4SLinus Torvalds 
24391da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
24401da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
24411da177e4SLinus Torvalds 		bh = arr[i];
24421da177e4SLinus Torvalds 		lock_buffer(bh);
24431da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
24441da177e4SLinus Torvalds 	}
24451da177e4SLinus Torvalds 
24461da177e4SLinus Torvalds 	/*
24471da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
24481da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
24491da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
24501da177e4SLinus Torvalds 	 */
24511da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
24521da177e4SLinus Torvalds 		bh = arr[i];
24531da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
24541da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
24551da177e4SLinus Torvalds 		else
24561420c4a5SBart Van Assche 			submit_bh(REQ_OP_READ, bh);
24571da177e4SLinus Torvalds 	}
24581da177e4SLinus Torvalds 	return 0;
24591da177e4SLinus Torvalds }
24602c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
24611da177e4SLinus Torvalds 
24621da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
246389e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
24641da177e4SLinus Torvalds  * deal with the hole.
24651da177e4SLinus Torvalds  */
246689e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
24671da177e4SLinus Torvalds {
24681da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
246953b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
2470*1da86618SMatthew Wilcox (Oracle) 	struct folio *folio;
24711468c6f4SAlexander Potapenko 	void *fsdata = NULL;
24721da177e4SLinus Torvalds 	int err;
24731da177e4SLinus Torvalds 
2474c08d3b0eSnpiggin@suse.de 	err = inode_newsize_ok(inode, size);
2475c08d3b0eSnpiggin@suse.de 	if (err)
24761da177e4SLinus Torvalds 		goto out;
24771da177e4SLinus Torvalds 
2478*1da86618SMatthew Wilcox (Oracle) 	err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
247989e10787SNick Piggin 	if (err)
248005eb0b51SOGAWA Hirofumi 		goto out;
248105eb0b51SOGAWA Hirofumi 
2482*1da86618SMatthew Wilcox (Oracle) 	err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
248389e10787SNick Piggin 	BUG_ON(err > 0);
248405eb0b51SOGAWA Hirofumi 
248505eb0b51SOGAWA Hirofumi out:
248605eb0b51SOGAWA Hirofumi 	return err;
248705eb0b51SOGAWA Hirofumi }
24881fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
248905eb0b51SOGAWA Hirofumi 
2490f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
249189e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
249205eb0b51SOGAWA Hirofumi {
249389e10787SNick Piggin 	struct inode *inode = mapping->host;
249453b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
249593407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
2496*1da86618SMatthew Wilcox (Oracle) 	struct folio *folio;
24971468c6f4SAlexander Potapenko 	void *fsdata = NULL;
249889e10787SNick Piggin 	pgoff_t index, curidx;
249989e10787SNick Piggin 	loff_t curpos;
250089e10787SNick Piggin 	unsigned zerofrom, offset, len;
250189e10787SNick Piggin 	int err = 0;
250205eb0b51SOGAWA Hirofumi 
250309cbfeafSKirill A. Shutemov 	index = pos >> PAGE_SHIFT;
250409cbfeafSKirill A. Shutemov 	offset = pos & ~PAGE_MASK;
250589e10787SNick Piggin 
250609cbfeafSKirill A. Shutemov 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
250709cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
250889e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
250989e10787SNick Piggin 			*bytes |= (blocksize-1);
251089e10787SNick Piggin 			(*bytes)++;
251189e10787SNick Piggin 		}
251209cbfeafSKirill A. Shutemov 		len = PAGE_SIZE - zerofrom;
251389e10787SNick Piggin 
251453b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
2515*1da86618SMatthew Wilcox (Oracle) 					    &folio, &fsdata);
251689e10787SNick Piggin 		if (err)
251789e10787SNick Piggin 			goto out;
2518*1da86618SMatthew Wilcox (Oracle) 		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
251953b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
2520*1da86618SMatthew Wilcox (Oracle) 						folio, fsdata);
252189e10787SNick Piggin 		if (err < 0)
252289e10787SNick Piggin 			goto out;
252389e10787SNick Piggin 		BUG_ON(err != len);
252489e10787SNick Piggin 		err = 0;
2525061e9746SOGAWA Hirofumi 
2526061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
2527c2ca0fcdSMikulas Patocka 
252808d405c8SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
2529c2ca0fcdSMikulas Patocka 			err = -EINTR;
2530c2ca0fcdSMikulas Patocka 			goto out;
2531c2ca0fcdSMikulas Patocka 		}
253289e10787SNick Piggin 	}
253389e10787SNick Piggin 
253489e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
253589e10787SNick Piggin 	if (index == curidx) {
253609cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
253789e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
253889e10787SNick Piggin 		if (offset <= zerofrom) {
253989e10787SNick Piggin 			goto out;
254089e10787SNick Piggin 		}
254189e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
254289e10787SNick Piggin 			*bytes |= (blocksize-1);
254389e10787SNick Piggin 			(*bytes)++;
254489e10787SNick Piggin 		}
254589e10787SNick Piggin 		len = offset - zerofrom;
254689e10787SNick Piggin 
254753b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
2548*1da86618SMatthew Wilcox (Oracle) 					    &folio, &fsdata);
254989e10787SNick Piggin 		if (err)
255089e10787SNick Piggin 			goto out;
2551*1da86618SMatthew Wilcox (Oracle) 		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
255253b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
2553*1da86618SMatthew Wilcox (Oracle) 						folio, fsdata);
255489e10787SNick Piggin 		if (err < 0)
255589e10787SNick Piggin 			goto out;
255689e10787SNick Piggin 		BUG_ON(err != len);
255789e10787SNick Piggin 		err = 0;
255889e10787SNick Piggin 	}
255989e10787SNick Piggin out:
256089e10787SNick Piggin 	return err;
25611da177e4SLinus Torvalds }
25621da177e4SLinus Torvalds 
25631da177e4SLinus Torvalds /*
25641da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
25651da177e4SLinus Torvalds  * We may have to extend the file.
25661da177e4SLinus Torvalds  */
2567282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2568be3bbbc5SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
2569*1da86618SMatthew Wilcox (Oracle) 			struct folio **foliop, void **fsdata,
257089e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
25711da177e4SLinus Torvalds {
25721da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
257393407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
257493407472SFabian Frederick 	unsigned int zerofrom;
257589e10787SNick Piggin 	int err;
25761da177e4SLinus Torvalds 
257789e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
257889e10787SNick Piggin 	if (err)
2579155130a4SChristoph Hellwig 		return err;
25801da177e4SLinus Torvalds 
258109cbfeafSKirill A. Shutemov 	zerofrom = *bytes & ~PAGE_MASK;
258289e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
25831da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
25841da177e4SLinus Torvalds 		(*bytes)++;
25851da177e4SLinus Torvalds 	}
25861da177e4SLinus Torvalds 
2587*1da86618SMatthew Wilcox (Oracle) 	return block_write_begin(mapping, pos, len, foliop, get_block);
25881da177e4SLinus Torvalds }
25891fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
25901da177e4SLinus Torvalds 
2591a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned from, unsigned to)
25921da177e4SLinus Torvalds {
25938c6cb3e3SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
2594489b7e72SBean Huo 	__block_commit_write(folio, from, to);
25951da177e4SLinus Torvalds }
25961fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
25971da177e4SLinus Torvalds 
259854171690SDavid Chinner /*
259954171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
260054171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
260154171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
260254171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
260354171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
260454171690SDavid Chinner  * support these features.
260554171690SDavid Chinner  *
260654171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
260754171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
26087bb46a67Snpiggin@suse.de  * truncate writes the inode size before removing pages, once we have the
260954171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
261054171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
261154171690SDavid Chinner  * unlock the page.
2612ea13a864SJan Kara  *
261314da9200SJan Kara  * Direct callers of this function should protect against filesystem freezing
26145c500029SRoss Zwisler  * using sb_start_pagefault() - sb_end_pagefault() functions.
261554171690SDavid Chinner  */
26165c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
261754171690SDavid Chinner 			 get_block_t get_block)
261854171690SDavid Chinner {
2619fe181377SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(vmf->page);
2620496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
262154171690SDavid Chinner 	unsigned long end;
262254171690SDavid Chinner 	loff_t size;
262324da4fabSJan Kara 	int ret;
262454171690SDavid Chinner 
2625fe181377SMatthew Wilcox (Oracle) 	folio_lock(folio);
262654171690SDavid Chinner 	size = i_size_read(inode);
2627fe181377SMatthew Wilcox (Oracle) 	if ((folio->mapping != inode->i_mapping) ||
2628fe181377SMatthew Wilcox (Oracle) 	    (folio_pos(folio) >= size)) {
262924da4fabSJan Kara 		/* We overload EFAULT to mean page got truncated */
263024da4fabSJan Kara 		ret = -EFAULT;
263124da4fabSJan Kara 		goto out_unlock;
263254171690SDavid Chinner 	}
263354171690SDavid Chinner 
2634fe181377SMatthew Wilcox (Oracle) 	end = folio_size(folio);
2635fe181377SMatthew Wilcox (Oracle) 	/* folio is wholly or partially inside EOF */
2636fe181377SMatthew Wilcox (Oracle) 	if (folio_pos(folio) + end > size)
2637fe181377SMatthew Wilcox (Oracle) 		end = size - folio_pos(folio);
263854171690SDavid Chinner 
2639fe181377SMatthew Wilcox (Oracle) 	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2640a524fcfeSBean Huo 	if (unlikely(ret))
264124da4fabSJan Kara 		goto out_unlock;
2642a524fcfeSBean Huo 
2643a524fcfeSBean Huo 	__block_commit_write(folio, 0, end);
2644a524fcfeSBean Huo 
2645fe181377SMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
2646fe181377SMatthew Wilcox (Oracle) 	folio_wait_stable(folio);
264724da4fabSJan Kara 	return 0;
264824da4fabSJan Kara out_unlock:
2649fe181377SMatthew Wilcox (Oracle) 	folio_unlock(folio);
265054171690SDavid Chinner 	return ret;
265154171690SDavid Chinner }
26521fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
26531da177e4SLinus Torvalds 
26541da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
26551da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
26561da177e4SLinus Torvalds {
265709cbfeafSKirill A. Shutemov 	pgoff_t index = from >> PAGE_SHIFT;
26581da177e4SLinus Torvalds 	unsigned blocksize;
265954b21a79SAndrew Morton 	sector_t iblock;
26606d68f644SMatthew Wilcox (Oracle) 	size_t offset, length, pos;
26611da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26626d68f644SMatthew Wilcox (Oracle) 	struct folio *folio;
26631da177e4SLinus Torvalds 	struct buffer_head *bh;
2664dc7cb2d2SJiapeng Chong 	int err = 0;
26651da177e4SLinus Torvalds 
266693407472SFabian Frederick 	blocksize = i_blocksize(inode);
26676d68f644SMatthew Wilcox (Oracle) 	length = from & (blocksize - 1);
26681da177e4SLinus Torvalds 
26691da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
26701da177e4SLinus Torvalds 	if (!length)
26711da177e4SLinus Torvalds 		return 0;
26721da177e4SLinus Torvalds 
26731da177e4SLinus Torvalds 	length = blocksize - length;
26744b04646cSMatthew Wilcox (Oracle) 	iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
26751da177e4SLinus Torvalds 
26766d68f644SMatthew Wilcox (Oracle) 	folio = filemap_grab_folio(mapping, index);
26776d68f644SMatthew Wilcox (Oracle) 	if (IS_ERR(folio))
26786d68f644SMatthew Wilcox (Oracle) 		return PTR_ERR(folio);
26791da177e4SLinus Torvalds 
26806d68f644SMatthew Wilcox (Oracle) 	bh = folio_buffers(folio);
26813decb856SMatthew Wilcox (Oracle) 	if (!bh)
26820a88810dSMatthew Wilcox (Oracle) 		bh = create_empty_buffers(folio, blocksize, 0);
26831da177e4SLinus Torvalds 
26841da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
26856d68f644SMatthew Wilcox (Oracle) 	offset = offset_in_folio(folio, from);
26861da177e4SLinus Torvalds 	pos = blocksize;
26871da177e4SLinus Torvalds 	while (offset >= pos) {
26881da177e4SLinus Torvalds 		bh = bh->b_this_page;
26891da177e4SLinus Torvalds 		iblock++;
26901da177e4SLinus Torvalds 		pos += blocksize;
26911da177e4SLinus Torvalds 	}
26921da177e4SLinus Torvalds 
26931da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2694b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
26951da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
26961da177e4SLinus Torvalds 		if (err)
26971da177e4SLinus Torvalds 			goto unlock;
26981da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
26991da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
27001da177e4SLinus Torvalds 			goto unlock;
27011da177e4SLinus Torvalds 	}
27021da177e4SLinus Torvalds 
27031da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
27046d68f644SMatthew Wilcox (Oracle) 	if (folio_test_uptodate(folio))
27051da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
27061da177e4SLinus Torvalds 
270733a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2708e7ea1129SZhang Yi 		err = bh_read(bh, 0);
27091da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
2710e7ea1129SZhang Yi 		if (err < 0)
27111da177e4SLinus Torvalds 			goto unlock;
27121da177e4SLinus Torvalds 	}
27131da177e4SLinus Torvalds 
27146d68f644SMatthew Wilcox (Oracle) 	folio_zero_range(folio, offset, length);
27151da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
27161da177e4SLinus Torvalds 
27171da177e4SLinus Torvalds unlock:
27186d68f644SMatthew Wilcox (Oracle) 	folio_unlock(folio);
27196d68f644SMatthew Wilcox (Oracle) 	folio_put(folio);
2720dc7cb2d2SJiapeng Chong 
27211da177e4SLinus Torvalds 	return err;
27221da177e4SLinus Torvalds }
27231fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
27241da177e4SLinus Torvalds 
27251da177e4SLinus Torvalds /*
27261da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
27271da177e4SLinus Torvalds  */
272817bf23a9SMatthew Wilcox (Oracle) int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
272917bf23a9SMatthew Wilcox (Oracle) 		void *get_block)
27301da177e4SLinus Torvalds {
2731bb0ea598SMatthew Wilcox (Oracle) 	struct inode * const inode = folio->mapping->host;
27321da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27331da177e4SLinus Torvalds 
2734bb0ea598SMatthew Wilcox (Oracle) 	/* Is the folio fully inside i_size? */
2735bb0ea598SMatthew Wilcox (Oracle) 	if (folio_pos(folio) + folio_size(folio) <= i_size)
273614059f66SMatthew Wilcox (Oracle) 		return __block_write_full_folio(inode, folio, get_block, wbc);
27371da177e4SLinus Torvalds 
2738bb0ea598SMatthew Wilcox (Oracle) 	/* Is the folio fully outside i_size? (truncate in progress) */
2739bb0ea598SMatthew Wilcox (Oracle) 	if (folio_pos(folio) >= i_size) {
274053418a18SMatthew Wilcox (Oracle) 		folio_unlock(folio);
27411da177e4SLinus Torvalds 		return 0; /* don't care */
27421da177e4SLinus Torvalds 	}
27431da177e4SLinus Torvalds 
27441da177e4SLinus Torvalds 	/*
2745bb0ea598SMatthew Wilcox (Oracle) 	 * The folio straddles i_size.  It must be zeroed out on each and every
27462a61aa40SAdam Buchbinder 	 * writepage invocation because it may be mmapped.  "A file is mapped
27471da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
27481da177e4SLinus Torvalds 	 * the page size, the remaining memory is zeroed when mapped, and
27491da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
27501da177e4SLinus Torvalds 	 */
2751bb0ea598SMatthew Wilcox (Oracle) 	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2752bb0ea598SMatthew Wilcox (Oracle) 			folio_size(folio));
275314059f66SMatthew Wilcox (Oracle) 	return __block_write_full_folio(inode, folio, get_block, wbc);
275435c80d5fSChris Mason }
275535c80d5fSChris Mason 
27561da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
27571da177e4SLinus Torvalds 			    get_block_t *get_block)
27581da177e4SLinus Torvalds {
27591da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27602a527d68SAlexander Potapenko 	struct buffer_head tmp = {
27612a527d68SAlexander Potapenko 		.b_size = i_blocksize(inode),
27622a527d68SAlexander Potapenko 	};
27632a527d68SAlexander Potapenko 
27641da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
27651da177e4SLinus Torvalds 	return tmp.b_blocknr;
27661da177e4SLinus Torvalds }
27671fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
27681da177e4SLinus Torvalds 
27694246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
27701da177e4SLinus Torvalds {
27711da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
27721da177e4SLinus Torvalds 
2773b7c44ed9SJens Axboe 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
277408bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
277508bafc03SKeith Mannthey 
27764e4cbee9SChristoph Hellwig 	bh->b_end_io(bh, !bio->bi_status);
27771da177e4SLinus Torvalds 	bio_put(bio);
27781da177e4SLinus Torvalds }
27791da177e4SLinus Torvalds 
27805bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
278144981351SBart Van Assche 			  enum rw_hint write_hint,
27821420c4a5SBart Van Assche 			  struct writeback_control *wbc)
27831da177e4SLinus Torvalds {
27841420c4a5SBart Van Assche 	const enum req_op op = opf & REQ_OP_MASK;
27851da177e4SLinus Torvalds 	struct bio *bio;
27861da177e4SLinus Torvalds 
27871da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
27881da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
27891da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
27908fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_delay(bh));
27918fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_unwritten(bh));
27921da177e4SLinus Torvalds 
279348fd4f93SJens Axboe 	/*
279448fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
27951da177e4SLinus Torvalds 	 */
27962a222ca9SMike Christie 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
27971da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
27981da177e4SLinus Torvalds 
279907888c66SChristoph Hellwig 	if (buffer_meta(bh))
28001420c4a5SBart Van Assche 		opf |= REQ_META;
280107888c66SChristoph Hellwig 	if (buffer_prio(bh))
28021420c4a5SBart Van Assche 		opf |= REQ_PRIO;
280307888c66SChristoph Hellwig 
28041420c4a5SBart Van Assche 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
28051da177e4SLinus Torvalds 
28064f74d15fSEric Biggers 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
28074f74d15fSEric Biggers 
28084f024f37SKent Overstreet 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
280944981351SBart Van Assche 	bio->bi_write_hint = write_hint;
28101da177e4SLinus Torvalds 
2811741af75dSJohannes Thumshirn 	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
28121da177e4SLinus Torvalds 
28131da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
28141da177e4SLinus Torvalds 	bio->bi_private = bh;
28151da177e4SLinus Torvalds 
281683c9c547SMing Lei 	/* Take care of bh's that straddle the end of the device */
281783c9c547SMing Lei 	guard_bio_eod(bio);
281883c9c547SMing Lei 
2819fd42df30SDennis Zhou 	if (wbc) {
2820fd42df30SDennis Zhou 		wbc_init_bio(wbc, bio);
282134e51a5eSTejun Heo 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2822fd42df30SDennis Zhou 	}
2823fd42df30SDennis Zhou 
28244e49ea4aSMike Christie 	submit_bio(bio);
28251da177e4SLinus Torvalds }
2826bafc0dbaSTejun Heo 
28275bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh)
282871368511SDarrick J. Wong {
282944981351SBart Van Assche 	submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
283071368511SDarrick J. Wong }
28311fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
28321da177e4SLinus Torvalds 
28333ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28349cb569d6SChristoph Hellwig {
28359cb569d6SChristoph Hellwig 	lock_buffer(bh);
28369cb569d6SChristoph Hellwig 	if (!test_clear_buffer_dirty(bh)) {
28379cb569d6SChristoph Hellwig 		unlock_buffer(bh);
28389cb569d6SChristoph Hellwig 		return;
28399cb569d6SChristoph Hellwig 	}
28409cb569d6SChristoph Hellwig 	bh->b_end_io = end_buffer_write_sync;
28419cb569d6SChristoph Hellwig 	get_bh(bh);
28421420c4a5SBart Van Assche 	submit_bh(REQ_OP_WRITE | op_flags, bh);
28439cb569d6SChristoph Hellwig }
28449cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
28459cb569d6SChristoph Hellwig 
28461da177e4SLinus Torvalds /*
28471da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
28481da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
28491da177e4SLinus Torvalds  * the buffer_head.
28501da177e4SLinus Torvalds  */
28513ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28521da177e4SLinus Torvalds {
28531da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
28541da177e4SLinus Torvalds 	lock_buffer(bh);
28551da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
2856377254b2SXianting Tian 		/*
2857377254b2SXianting Tian 		 * The bh should be mapped, but it might not be if the
2858377254b2SXianting Tian 		 * device was hot-removed. Not much we can do but fail the I/O.
2859377254b2SXianting Tian 		 */
2860377254b2SXianting Tian 		if (!buffer_mapped(bh)) {
2861377254b2SXianting Tian 			unlock_buffer(bh);
2862377254b2SXianting Tian 			return -EIO;
2863377254b2SXianting Tian 		}
2864377254b2SXianting Tian 
28651da177e4SLinus Torvalds 		get_bh(bh);
28661da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
2867ab620620SRitesh Harjani (IBM) 		submit_bh(REQ_OP_WRITE | op_flags, bh);
28681da177e4SLinus Torvalds 		wait_on_buffer(bh);
2869ab620620SRitesh Harjani (IBM) 		if (!buffer_uptodate(bh))
2870ab620620SRitesh Harjani (IBM) 			return -EIO;
28711da177e4SLinus Torvalds 	} else {
28721da177e4SLinus Torvalds 		unlock_buffer(bh);
28731da177e4SLinus Torvalds 	}
2874ab620620SRitesh Harjani (IBM) 	return 0;
28751da177e4SLinus Torvalds }
287687e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
287787e99511SChristoph Hellwig 
287887e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
287987e99511SChristoph Hellwig {
288070fd7614SChristoph Hellwig 	return __sync_dirty_buffer(bh, REQ_SYNC);
288187e99511SChristoph Hellwig }
28821fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28831da177e4SLinus Torvalds 
28841da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28851da177e4SLinus Torvalds {
28861da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
28871da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28881da177e4SLinus Torvalds }
28891da177e4SLinus Torvalds 
289064394763SMatthew Wilcox (Oracle) static bool
289164394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
28921da177e4SLinus Torvalds {
289364394763SMatthew Wilcox (Oracle) 	struct buffer_head *head = folio_buffers(folio);
28941da177e4SLinus Torvalds 	struct buffer_head *bh;
28951da177e4SLinus Torvalds 
28961da177e4SLinus Torvalds 	bh = head;
28971da177e4SLinus Torvalds 	do {
28981da177e4SLinus Torvalds 		if (buffer_busy(bh))
28991da177e4SLinus Torvalds 			goto failed;
29001da177e4SLinus Torvalds 		bh = bh->b_this_page;
29011da177e4SLinus Torvalds 	} while (bh != head);
29021da177e4SLinus Torvalds 
29031da177e4SLinus Torvalds 	do {
29041da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
29051da177e4SLinus Torvalds 
2906535ee2fbSJan Kara 		if (bh->b_assoc_map)
29071da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
29081da177e4SLinus Torvalds 		bh = next;
29091da177e4SLinus Torvalds 	} while (bh != head);
29101da177e4SLinus Torvalds 	*buffers_to_free = head;
291164394763SMatthew Wilcox (Oracle) 	folio_detach_private(folio);
291264394763SMatthew Wilcox (Oracle) 	return true;
29131da177e4SLinus Torvalds failed:
291464394763SMatthew Wilcox (Oracle) 	return false;
29151da177e4SLinus Torvalds }
29161da177e4SLinus Torvalds 
2917b1888d14SMatthew Wilcox (Oracle) /**
2918b1888d14SMatthew Wilcox (Oracle)  * try_to_free_buffers - Release buffers attached to this folio.
2919b1888d14SMatthew Wilcox (Oracle)  * @folio: The folio.
2920b1888d14SMatthew Wilcox (Oracle)  *
2921b1888d14SMatthew Wilcox (Oracle)  * If any buffers are in use (dirty, under writeback, elevated refcount),
2922b1888d14SMatthew Wilcox (Oracle)  * no buffers will be freed.
2923b1888d14SMatthew Wilcox (Oracle)  *
2924b1888d14SMatthew Wilcox (Oracle)  * If the folio is dirty but all the buffers are clean then we need to
2925b1888d14SMatthew Wilcox (Oracle)  * be sure to mark the folio clean as well.  This is because the folio
2926b1888d14SMatthew Wilcox (Oracle)  * may be against a block device, and a later reattachment of buffers
2927b1888d14SMatthew Wilcox (Oracle)  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2928b1888d14SMatthew Wilcox (Oracle)  * filesystem data on the same device.
2929b1888d14SMatthew Wilcox (Oracle)  *
2930b1888d14SMatthew Wilcox (Oracle)  * The same applies to regular filesystem folios: if all the buffers are
2931b1888d14SMatthew Wilcox (Oracle)  * clean then we set the folio clean and proceed.  To do that, we require
2932b1888d14SMatthew Wilcox (Oracle)  * total exclusion from block_dirty_folio().  That is obtained with
2933b1888d14SMatthew Wilcox (Oracle)  * i_private_lock.
2934b1888d14SMatthew Wilcox (Oracle)  *
2935b1888d14SMatthew Wilcox (Oracle)  * Exclusion against try_to_free_buffers may be obtained by either
2936b1888d14SMatthew Wilcox (Oracle)  * locking the folio or by holding its mapping's i_private_lock.
2937b1888d14SMatthew Wilcox (Oracle)  *
2938b1888d14SMatthew Wilcox (Oracle)  * Context: Process context.  @folio must be locked.  Will not sleep.
2939b1888d14SMatthew Wilcox (Oracle)  * Return: true if all buffers attached to this folio were freed.
2940b1888d14SMatthew Wilcox (Oracle)  */
294168189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
29421da177e4SLinus Torvalds {
294368189fefSMatthew Wilcox (Oracle) 	struct address_space * const mapping = folio->mapping;
29441da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
294568189fefSMatthew Wilcox (Oracle) 	bool ret = 0;
29461da177e4SLinus Torvalds 
294768189fefSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
294868189fefSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
294968189fefSMatthew Wilcox (Oracle) 		return false;
29501da177e4SLinus Torvalds 
29511da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
295264394763SMatthew Wilcox (Oracle) 		ret = drop_buffers(folio, &buffers_to_free);
29531da177e4SLinus Torvalds 		goto out;
29541da177e4SLinus Torvalds 	}
29551da177e4SLinus Torvalds 
2956600f111eSMatthew Wilcox (Oracle) 	spin_lock(&mapping->i_private_lock);
295764394763SMatthew Wilcox (Oracle) 	ret = drop_buffers(folio, &buffers_to_free);
2958ecdfc978SLinus Torvalds 
2959ecdfc978SLinus Torvalds 	/*
2960ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
296168189fefSMatthew Wilcox (Oracle) 	 * then we can have clean buffers against a dirty folio.  We
296268189fefSMatthew Wilcox (Oracle) 	 * clean the folio here; otherwise the VM will never notice
2963ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2964ecdfc978SLinus Torvalds 	 *
2965ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
296668189fefSMatthew Wilcox (Oracle) 	 * the folio's buffers clean.  We discover that here and clean
296768189fefSMatthew Wilcox (Oracle) 	 * the folio also.
296887df7241SNick Piggin 	 *
2969600f111eSMatthew Wilcox (Oracle) 	 * i_private_lock must be held over this entire operation in order
2970e621900aSMatthew Wilcox (Oracle) 	 * to synchronise against block_dirty_folio and prevent the
297187df7241SNick Piggin 	 * dirty bit from being lost.
2972ecdfc978SLinus Torvalds 	 */
297311f81becSTejun Heo 	if (ret)
297468189fefSMatthew Wilcox (Oracle) 		folio_cancel_dirty(folio);
2975600f111eSMatthew Wilcox (Oracle) 	spin_unlock(&mapping->i_private_lock);
29761da177e4SLinus Torvalds out:
29771da177e4SLinus Torvalds 	if (buffers_to_free) {
29781da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
29791da177e4SLinus Torvalds 
29801da177e4SLinus Torvalds 		do {
29811da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
29821da177e4SLinus Torvalds 			free_buffer_head(bh);
29831da177e4SLinus Torvalds 			bh = next;
29841da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
29851da177e4SLinus Torvalds 	}
29861da177e4SLinus Torvalds 	return ret;
29871da177e4SLinus Torvalds }
29881da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29891da177e4SLinus Torvalds 
29901da177e4SLinus Torvalds /*
29911da177e4SLinus Torvalds  * Buffer-head allocation
29921da177e4SLinus Torvalds  */
299368279f9cSAlexey Dobriyan static struct kmem_cache *bh_cachep __ro_after_init;
29941da177e4SLinus Torvalds 
29951da177e4SLinus Torvalds /*
29961da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
29971da177e4SLinus Torvalds  * stripping them in writeback.
29981da177e4SLinus Torvalds  */
299968279f9cSAlexey Dobriyan static unsigned long max_buffer_heads __ro_after_init;
30001da177e4SLinus Torvalds 
30011da177e4SLinus Torvalds int buffer_heads_over_limit;
30021da177e4SLinus Torvalds 
30031da177e4SLinus Torvalds struct bh_accounting {
30041da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
30051da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
30061da177e4SLinus Torvalds };
30071da177e4SLinus Torvalds 
30081da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
30091da177e4SLinus Torvalds 
30101da177e4SLinus Torvalds static void recalc_bh_state(void)
30111da177e4SLinus Torvalds {
30121da177e4SLinus Torvalds 	int i;
30131da177e4SLinus Torvalds 	int tot = 0;
30141da177e4SLinus Torvalds 
3015ee1be862SChristoph Lameter 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
30161da177e4SLinus Torvalds 		return;
3017c7b92516SChristoph Lameter 	__this_cpu_write(bh_accounting.ratelimit, 0);
30188a143426SEric Dumazet 	for_each_online_cpu(i)
30191da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
30201da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
30211da177e4SLinus Torvalds }
30221da177e4SLinus Torvalds 
3023dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
30241da177e4SLinus Torvalds {
3025019b4d12SRichard Kennedy 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
30261da177e4SLinus Torvalds 	if (ret) {
3027a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3028f1e67e35SThomas Gleixner 		spin_lock_init(&ret->b_uptodate_lock);
3029c7b92516SChristoph Lameter 		preempt_disable();
3030c7b92516SChristoph Lameter 		__this_cpu_inc(bh_accounting.nr);
30311da177e4SLinus Torvalds 		recalc_bh_state();
3032c7b92516SChristoph Lameter 		preempt_enable();
30331da177e4SLinus Torvalds 	}
30341da177e4SLinus Torvalds 	return ret;
30351da177e4SLinus Torvalds }
30361da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
30371da177e4SLinus Torvalds 
30381da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
30391da177e4SLinus Torvalds {
30401da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
30411da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3042c7b92516SChristoph Lameter 	preempt_disable();
3043c7b92516SChristoph Lameter 	__this_cpu_dec(bh_accounting.nr);
30441da177e4SLinus Torvalds 	recalc_bh_state();
3045c7b92516SChristoph Lameter 	preempt_enable();
30461da177e4SLinus Torvalds }
30471da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
30481da177e4SLinus Torvalds 
3049fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
30501da177e4SLinus Torvalds {
30511da177e4SLinus Torvalds 	int i;
30521da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30531da177e4SLinus Torvalds 
30541da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
30551da177e4SLinus Torvalds 		brelse(b->bhs[i]);
30561da177e4SLinus Torvalds 		b->bhs[i] = NULL;
30571da177e4SLinus Torvalds 	}
3058c7b92516SChristoph Lameter 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
30598a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
3060fc4d24c9SSebastian Andrzej Siewior 	return 0;
30611da177e4SLinus Torvalds }
30621da177e4SLinus Torvalds 
3063389d1b08SAneesh Kumar K.V /**
3064a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3065389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3066389d1b08SAneesh Kumar K.V  *
3067389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3068389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3069389d1b08SAneesh Kumar K.V  */
3070389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3071389d1b08SAneesh Kumar K.V {
3072389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3073389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3074389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3075389d1b08SAneesh Kumar K.V 			return 0;
3076389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3077389d1b08SAneesh Kumar K.V 	}
3078389d1b08SAneesh Kumar K.V 	return 1;
3079389d1b08SAneesh Kumar K.V }
3080389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3081389d1b08SAneesh Kumar K.V 
3082389d1b08SAneesh Kumar K.V /**
3083fdee117eSZhang Yi  * __bh_read - Submit read for a locked buffer
3084389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3085fdee117eSZhang Yi  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3086fdee117eSZhang Yi  * @wait: wait until reading finish
3087389d1b08SAneesh Kumar K.V  *
3088fdee117eSZhang Yi  * Returns zero on success or don't wait, and -EIO on error.
3089389d1b08SAneesh Kumar K.V  */
3090fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3091389d1b08SAneesh Kumar K.V {
3092fdee117eSZhang Yi 	int ret = 0;
3093389d1b08SAneesh Kumar K.V 
3094fdee117eSZhang Yi 	BUG_ON(!buffer_locked(bh));
3095389d1b08SAneesh Kumar K.V 
3096389d1b08SAneesh Kumar K.V 	get_bh(bh);
3097389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3098fdee117eSZhang Yi 	submit_bh(REQ_OP_READ | op_flags, bh);
3099fdee117eSZhang Yi 	if (wait) {
3100389d1b08SAneesh Kumar K.V 		wait_on_buffer(bh);
3101fdee117eSZhang Yi 		if (!buffer_uptodate(bh))
3102fdee117eSZhang Yi 			ret = -EIO;
3103389d1b08SAneesh Kumar K.V 	}
3104fdee117eSZhang Yi 	return ret;
3105fdee117eSZhang Yi }
3106fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read);
3107fdee117eSZhang Yi 
3108fdee117eSZhang Yi /**
3109fdee117eSZhang Yi  * __bh_read_batch - Submit read for a batch of unlocked buffers
3110fdee117eSZhang Yi  * @nr: entry number of the buffer batch
3111fdee117eSZhang Yi  * @bhs: a batch of struct buffer_head
3112fdee117eSZhang Yi  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3113fdee117eSZhang Yi  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3114fdee117eSZhang Yi  *              buffer that cannot lock.
3115fdee117eSZhang Yi  *
3116fdee117eSZhang Yi  * Returns zero on success or don't wait, and -EIO on error.
3117fdee117eSZhang Yi  */
3118fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
3119fdee117eSZhang Yi 		     blk_opf_t op_flags, bool force_lock)
3120fdee117eSZhang Yi {
3121fdee117eSZhang Yi 	int i;
3122fdee117eSZhang Yi 
3123fdee117eSZhang Yi 	for (i = 0; i < nr; i++) {
3124fdee117eSZhang Yi 		struct buffer_head *bh = bhs[i];
3125fdee117eSZhang Yi 
3126fdee117eSZhang Yi 		if (buffer_uptodate(bh))
3127fdee117eSZhang Yi 			continue;
3128fdee117eSZhang Yi 
3129fdee117eSZhang Yi 		if (force_lock)
3130fdee117eSZhang Yi 			lock_buffer(bh);
3131fdee117eSZhang Yi 		else
3132fdee117eSZhang Yi 			if (!trylock_buffer(bh))
3133fdee117eSZhang Yi 				continue;
3134fdee117eSZhang Yi 
3135fdee117eSZhang Yi 		if (buffer_uptodate(bh)) {
3136fdee117eSZhang Yi 			unlock_buffer(bh);
3137fdee117eSZhang Yi 			continue;
3138fdee117eSZhang Yi 		}
3139fdee117eSZhang Yi 
3140fdee117eSZhang Yi 		bh->b_end_io = end_buffer_read_sync;
3141fdee117eSZhang Yi 		get_bh(bh);
3142fdee117eSZhang Yi 		submit_bh(REQ_OP_READ | op_flags, bh);
3143fdee117eSZhang Yi 	}
3144fdee117eSZhang Yi }
3145fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch);
3146389d1b08SAneesh Kumar K.V 
31471da177e4SLinus Torvalds void __init buffer_init(void)
31481da177e4SLinus Torvalds {
314943be594aSZhang Yanfei 	unsigned long nrpages;
3150fc4d24c9SSebastian Andrzej Siewior 	int ret;
31511da177e4SLinus Torvalds 
3152de8a3207SKunwu Chan 	bh_cachep = KMEM_CACHE(buffer_head,
3153c997d683SChengming Zhou 				SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
31541da177e4SLinus Torvalds 	/*
31551da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
31561da177e4SLinus Torvalds 	 */
31571da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
31581da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3159fc4d24c9SSebastian Andrzej Siewior 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3160fc4d24c9SSebastian Andrzej Siewior 					NULL, buffer_exit_cpu_dead);
3161fc4d24c9SSebastian Andrzej Siewior 	WARN_ON(ret < 0);
31621da177e4SLinus Torvalds }
3163