xref: /linux/fs/buffer.c (revision 3822a7c40997dc86b1458766a3f146d62393f084)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/fs/buffer.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
151da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
161da177e4SLinus Torvalds  *
171da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
514fa512ceSEric Biggers #include <linux/fsverity.h>
521da177e4SLinus Torvalds 
532b211dc0SBen Dooks #include "internal.h"
542b211dc0SBen Dooks 
551da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
565bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
571420c4a5SBart Van Assche 			  struct writeback_control *wbc);
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
601da177e4SLinus Torvalds 
61f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
62f0059afdSTejun Heo {
635305cb83STejun Heo 	trace_block_touch_buffer(bh);
6403c5f331SMatthew Wilcox (Oracle) 	folio_mark_accessed(bh->b_folio);
65f0059afdSTejun Heo }
66f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
67f0059afdSTejun Heo 
68fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
691da177e4SLinus Torvalds {
7074316201SNeilBrown 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
711da177e4SLinus Torvalds }
721da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
731da177e4SLinus Torvalds 
74fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
751da177e4SLinus Torvalds {
7651b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
774e857c58SPeter Zijlstra 	smp_mb__after_atomic();
781da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
791da177e4SLinus Torvalds }
801fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
811da177e4SLinus Torvalds 
821da177e4SLinus Torvalds /*
83520f301cSMatthew Wilcox (Oracle)  * Returns if the folio has dirty or writeback buffers. If all the buffers
84520f301cSMatthew Wilcox (Oracle)  * are unlocked and clean then the folio_test_dirty information is stale. If
85520f301cSMatthew Wilcox (Oracle)  * any of the buffers are locked, it is assumed they are locked for IO.
86b4597226SMel Gorman  */
87520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
88b4597226SMel Gorman 				     bool *dirty, bool *writeback)
89b4597226SMel Gorman {
90b4597226SMel Gorman 	struct buffer_head *head, *bh;
91b4597226SMel Gorman 	*dirty = false;
92b4597226SMel Gorman 	*writeback = false;
93b4597226SMel Gorman 
94520f301cSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
95b4597226SMel Gorman 
96520f301cSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
97520f301cSMatthew Wilcox (Oracle) 	if (!head)
98b4597226SMel Gorman 		return;
99b4597226SMel Gorman 
100520f301cSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
101b4597226SMel Gorman 		*writeback = true;
102b4597226SMel Gorman 
103b4597226SMel Gorman 	bh = head;
104b4597226SMel Gorman 	do {
105b4597226SMel Gorman 		if (buffer_locked(bh))
106b4597226SMel Gorman 			*writeback = true;
107b4597226SMel Gorman 
108b4597226SMel Gorman 		if (buffer_dirty(bh))
109b4597226SMel Gorman 			*dirty = true;
110b4597226SMel Gorman 
111b4597226SMel Gorman 		bh = bh->b_this_page;
112b4597226SMel Gorman 	} while (bh != head);
113b4597226SMel Gorman }
114b4597226SMel Gorman EXPORT_SYMBOL(buffer_check_dirty_writeback);
115b4597226SMel Gorman 
116b4597226SMel Gorman /*
1171da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
1181da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
1191da177e4SLinus Torvalds  * if you want to preserve its state.
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1221da177e4SLinus Torvalds {
12374316201SNeilBrown 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1241da177e4SLinus Torvalds }
1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1261da177e4SLinus Torvalds 
127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1281da177e4SLinus Torvalds {
129432f16e6SRobert Elliott 	if (!test_bit(BH_Quiet, &bh->b_state))
130432f16e6SRobert Elliott 		printk_ratelimited(KERN_ERR
131a1c6f057SDmitry Monakhov 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
132a1c6f057SDmitry Monakhov 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds /*
13668671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
13768671f35SDmitry Monakhov  * unlocking it.
13868671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13968671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
14068671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
14168671f35SDmitry Monakhov  * itself.
1421da177e4SLinus Torvalds  */
14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds 	if (uptodate) {
1461da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds 	} else {
14870246286SChristoph Hellwig 		/* This happens, due to failed read-ahead attempts. */
1491da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1501da177e4SLinus Torvalds 	}
1511da177e4SLinus Torvalds 	unlock_buffer(bh);
15268671f35SDmitry Monakhov }
15368671f35SDmitry Monakhov 
15468671f35SDmitry Monakhov /*
15568671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
15679f59784SZhang Yi  * unlock the buffer.
15768671f35SDmitry Monakhov  */
15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15968671f35SDmitry Monakhov {
16068671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1611da177e4SLinus Torvalds 	put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds 	if (uptodate) {
1681da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1691da177e4SLinus Torvalds 	} else {
170b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost sync page write");
17187354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
1721da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1731da177e4SLinus Torvalds 	}
1741da177e4SLinus Torvalds 	unlock_buffer(bh);
1751da177e4SLinus Torvalds 	put_bh(bh);
1761da177e4SLinus Torvalds }
1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1811da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1821da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1831da177e4SLinus Torvalds  * private_lock.
1841da177e4SLinus Torvalds  *
185b93b0163SMatthew Wilcox  * Hack idea: for the blockdev mapping, private_lock contention
1861da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
187b93b0163SMatthew Wilcox  * succeeds, there is no need to take private_lock.
1881da177e4SLinus Torvalds  */
1891da177e4SLinus Torvalds static struct buffer_head *
190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1911da177e4SLinus Torvalds {
1921da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1931da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1941da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1951da177e4SLinus Torvalds 	pgoff_t index;
1961da177e4SLinus Torvalds 	struct buffer_head *bh;
1971da177e4SLinus Torvalds 	struct buffer_head *head;
1981da177e4SLinus Torvalds 	struct page *page;
1991da177e4SLinus Torvalds 	int all_mapped = 1;
20043636c80STetsuo Handa 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
2011da177e4SLinus Torvalds 
20209cbfeafSKirill A. Shutemov 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
2032457aec6SMel Gorman 	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
2041da177e4SLinus Torvalds 	if (!page)
2051da177e4SLinus Torvalds 		goto out;
2061da177e4SLinus Torvalds 
2071da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2081da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2091da177e4SLinus Torvalds 		goto out_unlock;
2101da177e4SLinus Torvalds 	head = page_buffers(page);
2111da177e4SLinus Torvalds 	bh = head;
2121da177e4SLinus Torvalds 	do {
21397f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
21497f76d3dSNikanth Karthikesan 			all_mapped = 0;
21597f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2161da177e4SLinus Torvalds 			ret = bh;
2171da177e4SLinus Torvalds 			get_bh(bh);
2181da177e4SLinus Torvalds 			goto out_unlock;
2191da177e4SLinus Torvalds 		}
2201da177e4SLinus Torvalds 		bh = bh->b_this_page;
2211da177e4SLinus Torvalds 	} while (bh != head);
2221da177e4SLinus Torvalds 
2231da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2241da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2251da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2261da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2271da177e4SLinus Torvalds 	 */
22843636c80STetsuo Handa 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
22943636c80STetsuo Handa 	if (all_mapped && __ratelimit(&last_warned)) {
23043636c80STetsuo Handa 		printk("__find_get_block_slow() failed. block=%llu, "
23143636c80STetsuo Handa 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
23243636c80STetsuo Handa 		       "device %pg blocksize: %d\n",
233205f87f6SBadari Pulavarty 		       (unsigned long long)block,
23443636c80STetsuo Handa 		       (unsigned long long)bh->b_blocknr,
23543636c80STetsuo Handa 		       bh->b_state, bh->b_size, bdev,
23672a2ebd8STao Ma 		       1 << bd_inode->i_blkbits);
2371da177e4SLinus Torvalds 	}
2381da177e4SLinus Torvalds out_unlock:
2391da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
24009cbfeafSKirill A. Shutemov 	put_page(page);
2411da177e4SLinus Torvalds out:
2421da177e4SLinus Torvalds 	return ret;
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds 	unsigned long flags;
248a3972203SNick Piggin 	struct buffer_head *first;
2491da177e4SLinus Torvalds 	struct buffer_head *tmp;
2502e2dba15SMatthew Wilcox (Oracle) 	struct folio *folio;
2512e2dba15SMatthew Wilcox (Oracle) 	int folio_uptodate = 1;
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
2541da177e4SLinus Torvalds 
2552e2dba15SMatthew Wilcox (Oracle) 	folio = bh->b_folio;
2561da177e4SLinus Torvalds 	if (uptodate) {
2571da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
2581da177e4SLinus Torvalds 	} else {
2591da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
260b744c2acSRobert Elliott 		buffer_io_error(bh, ", async page read");
2612e2dba15SMatthew Wilcox (Oracle) 		folio_set_error(folio);
2621da177e4SLinus Torvalds 	}
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds 	/*
2651da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
2661da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
2671da177e4SLinus Torvalds 	 * decide that the page is now completely done.
2681da177e4SLinus Torvalds 	 */
2692e2dba15SMatthew Wilcox (Oracle) 	first = folio_buffers(folio);
270f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
2711da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
2721da177e4SLinus Torvalds 	unlock_buffer(bh);
2731da177e4SLinus Torvalds 	tmp = bh;
2741da177e4SLinus Torvalds 	do {
2751da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
2762e2dba15SMatthew Wilcox (Oracle) 			folio_uptodate = 0;
2771da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
2781da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
2791da177e4SLinus Torvalds 			goto still_busy;
2801da177e4SLinus Torvalds 		}
2811da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
2821da177e4SLinus Torvalds 	} while (tmp != bh);
283f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2841da177e4SLinus Torvalds 
2851da177e4SLinus Torvalds 	/*
2866e8e79fcSMatthew Wilcox (Oracle) 	 * If all of the buffers are uptodate then we can set the page
2876e8e79fcSMatthew Wilcox (Oracle) 	 * uptodate.
2881da177e4SLinus Torvalds 	 */
2892e2dba15SMatthew Wilcox (Oracle) 	if (folio_uptodate)
2902e2dba15SMatthew Wilcox (Oracle) 		folio_mark_uptodate(folio);
2912e2dba15SMatthew Wilcox (Oracle) 	folio_unlock(folio);
2921da177e4SLinus Torvalds 	return;
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds still_busy:
295f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2961da177e4SLinus Torvalds 	return;
2971da177e4SLinus Torvalds }
2981da177e4SLinus Torvalds 
2994fa512ceSEric Biggers struct postprocess_bh_ctx {
30031fb992cSEric Biggers 	struct work_struct work;
30131fb992cSEric Biggers 	struct buffer_head *bh;
30231fb992cSEric Biggers };
30331fb992cSEric Biggers 
3044fa512ceSEric Biggers static void verify_bh(struct work_struct *work)
3054fa512ceSEric Biggers {
3064fa512ceSEric Biggers 	struct postprocess_bh_ctx *ctx =
3074fa512ceSEric Biggers 		container_of(work, struct postprocess_bh_ctx, work);
3084fa512ceSEric Biggers 	struct buffer_head *bh = ctx->bh;
3094fa512ceSEric Biggers 	bool valid;
3104fa512ceSEric Biggers 
3115d0f0e57SEric Biggers 	valid = fsverity_verify_blocks(page_folio(bh->b_page), bh->b_size,
3125d0f0e57SEric Biggers 				       bh_offset(bh));
3134fa512ceSEric Biggers 	end_buffer_async_read(bh, valid);
3144fa512ceSEric Biggers 	kfree(ctx);
3154fa512ceSEric Biggers }
3164fa512ceSEric Biggers 
3174fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh)
3184fa512ceSEric Biggers {
3194fa512ceSEric Biggers 	struct page *page = bh->b_page;
3204fa512ceSEric Biggers 	struct inode *inode = page->mapping->host;
3214fa512ceSEric Biggers 
3224fa512ceSEric Biggers 	return fsverity_active(inode) &&
3234fa512ceSEric Biggers 		/* needed by ext4 */
3244fa512ceSEric Biggers 		page->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
3254fa512ceSEric Biggers }
3264fa512ceSEric Biggers 
32731fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
32831fb992cSEric Biggers {
3294fa512ceSEric Biggers 	struct postprocess_bh_ctx *ctx =
3304fa512ceSEric Biggers 		container_of(work, struct postprocess_bh_ctx, work);
33131fb992cSEric Biggers 	struct buffer_head *bh = ctx->bh;
33231fb992cSEric Biggers 	int err;
33331fb992cSEric Biggers 
33451e4e315SEric Biggers 	err = fscrypt_decrypt_pagecache_blocks(page_folio(bh->b_page),
33551e4e315SEric Biggers 					       bh->b_size, bh_offset(bh));
3364fa512ceSEric Biggers 	if (err == 0 && need_fsverity(bh)) {
3374fa512ceSEric Biggers 		/*
3384fa512ceSEric Biggers 		 * We use different work queues for decryption and for verity
3394fa512ceSEric Biggers 		 * because verity may require reading metadata pages that need
3404fa512ceSEric Biggers 		 * decryption, and we shouldn't recurse to the same workqueue.
3414fa512ceSEric Biggers 		 */
3424fa512ceSEric Biggers 		INIT_WORK(&ctx->work, verify_bh);
3434fa512ceSEric Biggers 		fsverity_enqueue_verify_work(&ctx->work);
3444fa512ceSEric Biggers 		return;
3454fa512ceSEric Biggers 	}
34631fb992cSEric Biggers 	end_buffer_async_read(bh, err == 0);
34731fb992cSEric Biggers 	kfree(ctx);
34831fb992cSEric Biggers }
34931fb992cSEric Biggers 
35031fb992cSEric Biggers /*
3512c69e205SMatthew Wilcox (Oracle)  * I/O completion handler for block_read_full_folio() - pages
35231fb992cSEric Biggers  * which come unlocked at the end of I/O.
35331fb992cSEric Biggers  */
35431fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
35531fb992cSEric Biggers {
356*3822a7c4SLinus Torvalds 	struct inode *inode = bh->b_folio->mapping->host;
3574fa512ceSEric Biggers 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
3584fa512ceSEric Biggers 	bool verify = need_fsverity(bh);
3594fa512ceSEric Biggers 
3604fa512ceSEric Biggers 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
3614fa512ceSEric Biggers 	if (uptodate && (decrypt || verify)) {
3624fa512ceSEric Biggers 		struct postprocess_bh_ctx *ctx =
3634fa512ceSEric Biggers 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
36431fb992cSEric Biggers 
36531fb992cSEric Biggers 		if (ctx) {
36631fb992cSEric Biggers 			ctx->bh = bh;
3674fa512ceSEric Biggers 			if (decrypt) {
3684fa512ceSEric Biggers 				INIT_WORK(&ctx->work, decrypt_bh);
36931fb992cSEric Biggers 				fscrypt_enqueue_decrypt_work(&ctx->work);
3704fa512ceSEric Biggers 			} else {
3714fa512ceSEric Biggers 				INIT_WORK(&ctx->work, verify_bh);
3724fa512ceSEric Biggers 				fsverity_enqueue_verify_work(&ctx->work);
3734fa512ceSEric Biggers 			}
37431fb992cSEric Biggers 			return;
37531fb992cSEric Biggers 		}
37631fb992cSEric Biggers 		uptodate = 0;
37731fb992cSEric Biggers 	}
37831fb992cSEric Biggers 	end_buffer_async_read(bh, uptodate);
37931fb992cSEric Biggers }
38031fb992cSEric Biggers 
3811da177e4SLinus Torvalds /*
3821da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3831da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3841da177e4SLinus Torvalds  */
38535c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3861da177e4SLinus Torvalds {
3871da177e4SLinus Torvalds 	unsigned long flags;
388a3972203SNick Piggin 	struct buffer_head *first;
3891da177e4SLinus Torvalds 	struct buffer_head *tmp;
390743ed81eSMatthew Wilcox (Oracle) 	struct folio *folio;
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3931da177e4SLinus Torvalds 
394743ed81eSMatthew Wilcox (Oracle) 	folio = bh->b_folio;
3951da177e4SLinus Torvalds 	if (uptodate) {
3961da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3971da177e4SLinus Torvalds 	} else {
398b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost async page write");
39987354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
4001da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
401743ed81eSMatthew Wilcox (Oracle) 		folio_set_error(folio);
4021da177e4SLinus Torvalds 	}
4031da177e4SLinus Torvalds 
404743ed81eSMatthew Wilcox (Oracle) 	first = folio_buffers(folio);
405f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
406a3972203SNick Piggin 
4071da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4081da177e4SLinus Torvalds 	unlock_buffer(bh);
4091da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4101da177e4SLinus Torvalds 	while (tmp != bh) {
4111da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4121da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4131da177e4SLinus Torvalds 			goto still_busy;
4141da177e4SLinus Torvalds 		}
4151da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4161da177e4SLinus Torvalds 	}
417f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
418743ed81eSMatthew Wilcox (Oracle) 	folio_end_writeback(folio);
4191da177e4SLinus Torvalds 	return;
4201da177e4SLinus Torvalds 
4211da177e4SLinus Torvalds still_busy:
422f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
4231da177e4SLinus Torvalds 	return;
4241da177e4SLinus Torvalds }
4251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write);
4261da177e4SLinus Torvalds 
4271da177e4SLinus Torvalds /*
4281da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4291da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4301da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4311da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4321da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4331da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4341da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4351da177e4SLinus Torvalds  *
4361da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4371da177e4SLinus Torvalds  * left.
4381da177e4SLinus Torvalds  *
4391da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4401da177e4SLinus Torvalds  * the buffers.
4411da177e4SLinus Torvalds  *
4421da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4431da177e4SLinus Torvalds  * page.
4441da177e4SLinus Torvalds  *
4451da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4461da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4471da177e4SLinus Torvalds  */
4481da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4491da177e4SLinus Torvalds {
45031fb992cSEric Biggers 	bh->b_end_io = end_buffer_async_read_io;
4511da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4521da177e4SLinus Torvalds }
4531da177e4SLinus Torvalds 
4541fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
45535c80d5fSChris Mason 					  bh_end_io_t *handler)
45635c80d5fSChris Mason {
45735c80d5fSChris Mason 	bh->b_end_io = handler;
45835c80d5fSChris Mason 	set_buffer_async_write(bh);
45935c80d5fSChris Mason }
46035c80d5fSChris Mason 
4611da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4621da177e4SLinus Torvalds {
46335c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4641da177e4SLinus Torvalds }
4651da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4661da177e4SLinus Torvalds 
4671da177e4SLinus Torvalds 
4681da177e4SLinus Torvalds /*
4691da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4701da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4711da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4721da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4731da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4741da177e4SLinus Torvalds  *
4751da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4761da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4771da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4781da177e4SLinus Torvalds  *
4791da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4801da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4811da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4821da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4831da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4841da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4851da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4861da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4871da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4881da177e4SLinus Torvalds  * ->private_lock.
4891da177e4SLinus Torvalds  *
4901da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4911da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4921da177e4SLinus Torvalds  *
4931da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4941da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4951da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4961da177e4SLinus Torvalds  * be true at clear_inode() time.
4971da177e4SLinus Torvalds  *
4981da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4991da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
5001da177e4SLinus Torvalds  * BUG_ON(!list_empty).
5011da177e4SLinus Torvalds  *
5021da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
5031da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
5041da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5051da177e4SLinus Torvalds  * queued up.
5061da177e4SLinus Torvalds  *
5071da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5081da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5091da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5101da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5111da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5121da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5131da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5141da177e4SLinus Torvalds  * b_inode back.
5151da177e4SLinus Torvalds  */
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds /*
5181da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5191da177e4SLinus Torvalds  */
520dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5211da177e4SLinus Torvalds {
5221da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
52358ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
52458ff407bSJan Kara 	bh->b_assoc_map = NULL;
5251da177e4SLinus Torvalds }
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5281da177e4SLinus Torvalds {
5291da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5301da177e4SLinus Torvalds }
5311da177e4SLinus Torvalds 
5321da177e4SLinus Torvalds /*
5331da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5341da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5351da177e4SLinus Torvalds  * writes to the disk.
5361da177e4SLinus Torvalds  *
53779f59784SZhang Yi  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
53879f59784SZhang Yi  * as you dirty the buffers, and then use osync_inode_buffers to wait for
5391da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5401da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5411da177e4SLinus Torvalds  */
5421da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5431da177e4SLinus Torvalds {
5441da177e4SLinus Torvalds 	struct buffer_head *bh;
5451da177e4SLinus Torvalds 	struct list_head *p;
5461da177e4SLinus Torvalds 	int err = 0;
5471da177e4SLinus Torvalds 
5481da177e4SLinus Torvalds 	spin_lock(lock);
5491da177e4SLinus Torvalds repeat:
5501da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5511da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5521da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5531da177e4SLinus Torvalds 			get_bh(bh);
5541da177e4SLinus Torvalds 			spin_unlock(lock);
5551da177e4SLinus Torvalds 			wait_on_buffer(bh);
5561da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5571da177e4SLinus Torvalds 				err = -EIO;
5581da177e4SLinus Torvalds 			brelse(bh);
5591da177e4SLinus Torvalds 			spin_lock(lock);
5601da177e4SLinus Torvalds 			goto repeat;
5611da177e4SLinus Torvalds 		}
5621da177e4SLinus Torvalds 	}
5631da177e4SLinus Torvalds 	spin_unlock(lock);
5641da177e4SLinus Torvalds 	return err;
5651da177e4SLinus Torvalds }
5661da177e4SLinus Torvalds 
56708fdc8a0SMateusz Guzik void emergency_thaw_bdev(struct super_block *sb)
568c2d75438SEric Sandeen {
569040f04bdSChristoph Hellwig 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
570a1c6f057SDmitry Monakhov 		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
571c2d75438SEric Sandeen }
57201a05b33SAl Viro 
5731da177e4SLinus Torvalds /**
57478a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
57567be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5761da177e4SLinus Torvalds  *
5771da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
5781da177e4SLinus Torvalds  * that I/O.
5791da177e4SLinus Torvalds  *
58067be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
58167be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
58267be2dd1SMartin Waitz  * a successful fsync().
5831da177e4SLinus Torvalds  */
5841da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5851da177e4SLinus Torvalds {
586252aa6f5SRafael Aquini 	struct address_space *buffer_mapping = mapping->private_data;
5871da177e4SLinus Torvalds 
5881da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
5891da177e4SLinus Torvalds 		return 0;
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
5921da177e4SLinus Torvalds 					&mapping->private_list);
5931da177e4SLinus Torvalds }
5941da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5951da177e4SLinus Torvalds 
5961da177e4SLinus Torvalds /*
5971da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
5981da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
5991da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6001da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6011da177e4SLinus Torvalds  */
6021da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6031da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6041da177e4SLinus Torvalds {
6051da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6061da177e4SLinus Torvalds 	if (bh) {
6071da177e4SLinus Torvalds 		if (buffer_dirty(bh))
608e7ea1129SZhang Yi 			write_dirty_buffer(bh, 0);
6091da177e4SLinus Torvalds 		put_bh(bh);
6101da177e4SLinus Torvalds 	}
6111da177e4SLinus Torvalds }
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6141da177e4SLinus Torvalds {
6151da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
616abc8a8a2SMatthew Wilcox (Oracle) 	struct address_space *buffer_mapping = bh->b_folio->mapping;
6171da177e4SLinus Torvalds 
6181da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
619252aa6f5SRafael Aquini 	if (!mapping->private_data) {
620252aa6f5SRafael Aquini 		mapping->private_data = buffer_mapping;
6211da177e4SLinus Torvalds 	} else {
622252aa6f5SRafael Aquini 		BUG_ON(mapping->private_data != buffer_mapping);
6231da177e4SLinus Torvalds 	}
624535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6251da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6261da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6271da177e4SLinus Torvalds 				&mapping->private_list);
62858ff407bSJan Kara 		bh->b_assoc_map = mapping;
6291da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6301da177e4SLinus Torvalds 	}
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6331da177e4SLinus Torvalds 
6341da177e4SLinus Torvalds /*
6351da177e4SLinus Torvalds  * Add a page to the dirty page list.
6361da177e4SLinus Torvalds  *
6371da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6381da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6391da177e4SLinus Torvalds  *
6401da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6411da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6421da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6431da177e4SLinus Torvalds  * dirty.
6441da177e4SLinus Torvalds  *
6451da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6461da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6471da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6481da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6491da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6501da177e4SLinus Torvalds  * page on the dirty page list.
6511da177e4SLinus Torvalds  *
6521da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6531da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6541da177e4SLinus Torvalds  * added to the page after it was set dirty.
6551da177e4SLinus Torvalds  *
6561da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
6571da177e4SLinus Torvalds  * address_space though.
6581da177e4SLinus Torvalds  */
659e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
6601da177e4SLinus Torvalds {
661e621900aSMatthew Wilcox (Oracle) 	struct buffer_head *head;
662e621900aSMatthew Wilcox (Oracle) 	bool newly_dirty;
6631da177e4SLinus Torvalds 
6641da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
665e621900aSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
666e621900aSMatthew Wilcox (Oracle) 	if (head) {
6671da177e4SLinus Torvalds 		struct buffer_head *bh = head;
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds 		do {
6701da177e4SLinus Torvalds 			set_buffer_dirty(bh);
6711da177e4SLinus Torvalds 			bh = bh->b_this_page;
6721da177e4SLinus Torvalds 		} while (bh != head);
6731da177e4SLinus Torvalds 	}
674c4843a75SGreg Thelen 	/*
675bcfe06bfSRoman Gushchin 	 * Lock out page's memcg migration to keep PageDirty
67681f8c3a4SJohannes Weiner 	 * synchronized with per-memcg dirty page counters.
677c4843a75SGreg Thelen 	 */
678e621900aSMatthew Wilcox (Oracle) 	folio_memcg_lock(folio);
679e621900aSMatthew Wilcox (Oracle) 	newly_dirty = !folio_test_set_dirty(folio);
6801da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
6811da177e4SLinus Torvalds 
682a8e7d49aSLinus Torvalds 	if (newly_dirty)
683e621900aSMatthew Wilcox (Oracle) 		__folio_mark_dirty(folio, mapping, 1);
684c4843a75SGreg Thelen 
685e621900aSMatthew Wilcox (Oracle) 	folio_memcg_unlock(folio);
686c4843a75SGreg Thelen 
687c4843a75SGreg Thelen 	if (newly_dirty)
688c4843a75SGreg Thelen 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
689c4843a75SGreg Thelen 
690a8e7d49aSLinus Torvalds 	return newly_dirty;
6911da177e4SLinus Torvalds }
692e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
6931da177e4SLinus Torvalds 
6941da177e4SLinus Torvalds /*
6951da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
6961da177e4SLinus Torvalds  *
6971da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
6981da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
6991da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7001da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7011da177e4SLinus Torvalds  *
7021da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7031da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7041da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7051da177e4SLinus Torvalds  *
7061da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7071da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7081da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7091da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7101da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7111da177e4SLinus Torvalds  * any newly dirty buffers for write.
7121da177e4SLinus Torvalds  */
7131da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7141da177e4SLinus Torvalds {
7151da177e4SLinus Torvalds 	struct buffer_head *bh;
7161da177e4SLinus Torvalds 	struct list_head tmp;
7177eaceaccSJens Axboe 	struct address_space *mapping;
7181da177e4SLinus Torvalds 	int err = 0, err2;
7194ee2491eSJens Axboe 	struct blk_plug plug;
7201da177e4SLinus Torvalds 
7211da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7224ee2491eSJens Axboe 	blk_start_plug(&plug);
7231da177e4SLinus Torvalds 
7241da177e4SLinus Torvalds 	spin_lock(lock);
7251da177e4SLinus Torvalds 	while (!list_empty(list)) {
7261da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
727535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
72858ff407bSJan Kara 		__remove_assoc_queue(bh);
729535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
730535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
731535ee2fbSJan Kara 		smp_mb();
7321da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7331da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
734535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7351da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7361da177e4SLinus Torvalds 				get_bh(bh);
7371da177e4SLinus Torvalds 				spin_unlock(lock);
7381da177e4SLinus Torvalds 				/*
7391da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7409cb569d6SChristoph Hellwig 				 * write_dirty_buffer() actually writes the
7419cb569d6SChristoph Hellwig 				 * current contents - it is a noop if I/O is
7429cb569d6SChristoph Hellwig 				 * still in flight on potentially older
7439cb569d6SChristoph Hellwig 				 * contents.
7441da177e4SLinus Torvalds 				 */
74570fd7614SChristoph Hellwig 				write_dirty_buffer(bh, REQ_SYNC);
7469cf6b720SJens Axboe 
7479cf6b720SJens Axboe 				/*
7489cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
7499cf6b720SJens Axboe 				 * that we will not run the very last mapping,
7509cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
7519cf6b720SJens Axboe 				 * through sync_buffer().
7529cf6b720SJens Axboe 				 */
7531da177e4SLinus Torvalds 				brelse(bh);
7541da177e4SLinus Torvalds 				spin_lock(lock);
7551da177e4SLinus Torvalds 			}
7561da177e4SLinus Torvalds 		}
7571da177e4SLinus Torvalds 	}
7581da177e4SLinus Torvalds 
7594ee2491eSJens Axboe 	spin_unlock(lock);
7604ee2491eSJens Axboe 	blk_finish_plug(&plug);
7614ee2491eSJens Axboe 	spin_lock(lock);
7624ee2491eSJens Axboe 
7631da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7641da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
7651da177e4SLinus Torvalds 		get_bh(bh);
766535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
767535ee2fbSJan Kara 		__remove_assoc_queue(bh);
768535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
769535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
770535ee2fbSJan Kara 		smp_mb();
771535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
772535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
773e3892296SJan Kara 				 &mapping->private_list);
774535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
775535ee2fbSJan Kara 		}
7761da177e4SLinus Torvalds 		spin_unlock(lock);
7771da177e4SLinus Torvalds 		wait_on_buffer(bh);
7781da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
7791da177e4SLinus Torvalds 			err = -EIO;
7801da177e4SLinus Torvalds 		brelse(bh);
7811da177e4SLinus Torvalds 		spin_lock(lock);
7821da177e4SLinus Torvalds 	}
7831da177e4SLinus Torvalds 
7841da177e4SLinus Torvalds 	spin_unlock(lock);
7851da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
7861da177e4SLinus Torvalds 	if (err)
7871da177e4SLinus Torvalds 		return err;
7881da177e4SLinus Torvalds 	else
7891da177e4SLinus Torvalds 		return err2;
7901da177e4SLinus Torvalds }
7911da177e4SLinus Torvalds 
7921da177e4SLinus Torvalds /*
7931da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
7941da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
7951da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
7961da177e4SLinus Torvalds  *
7971da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
7981da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
7991da177e4SLinus Torvalds  * for reiserfs.
8001da177e4SLinus Torvalds  */
8011da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8021da177e4SLinus Torvalds {
8031da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8041da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8051da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
806252aa6f5SRafael Aquini 		struct address_space *buffer_mapping = mapping->private_data;
8071da177e4SLinus Torvalds 
8081da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8091da177e4SLinus Torvalds 		while (!list_empty(list))
8101da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8111da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8121da177e4SLinus Torvalds 	}
8131da177e4SLinus Torvalds }
81452b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8151da177e4SLinus Torvalds 
8161da177e4SLinus Torvalds /*
8171da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8181da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8191da177e4SLinus Torvalds  *
8201da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8211da177e4SLinus Torvalds  */
8221da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8231da177e4SLinus Torvalds {
8241da177e4SLinus Torvalds 	int ret = 1;
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8271da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8281da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
829252aa6f5SRafael Aquini 		struct address_space *buffer_mapping = mapping->private_data;
8301da177e4SLinus Torvalds 
8311da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8321da177e4SLinus Torvalds 		while (!list_empty(list)) {
8331da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8341da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8351da177e4SLinus Torvalds 				ret = 0;
8361da177e4SLinus Torvalds 				break;
8371da177e4SLinus Torvalds 			}
8381da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8391da177e4SLinus Torvalds 		}
8401da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8411da177e4SLinus Torvalds 	}
8421da177e4SLinus Torvalds 	return ret;
8431da177e4SLinus Torvalds }
8441da177e4SLinus Torvalds 
8451da177e4SLinus Torvalds /*
8461da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8471da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8481da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8491da177e4SLinus Torvalds  * buffers.
8501da177e4SLinus Torvalds  *
8511da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8521da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8531da177e4SLinus Torvalds  */
8541da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
855640ab98fSJens Axboe 		bool retry)
8561da177e4SLinus Torvalds {
8571da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
858f745c6f5SShakeel Butt 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
8591da177e4SLinus Torvalds 	long offset;
860b87d8cefSRoman Gushchin 	struct mem_cgroup *memcg, *old_memcg;
8611da177e4SLinus Torvalds 
862640ab98fSJens Axboe 	if (retry)
863640ab98fSJens Axboe 		gfp |= __GFP_NOFAIL;
864640ab98fSJens Axboe 
8656eeb104eSJohannes Weiner 	/* The page lock pins the memcg */
8666eeb104eSJohannes Weiner 	memcg = page_memcg(page);
867b87d8cefSRoman Gushchin 	old_memcg = set_active_memcg(memcg);
868f745c6f5SShakeel Butt 
8691da177e4SLinus Torvalds 	head = NULL;
8701da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8711da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
872640ab98fSJens Axboe 		bh = alloc_buffer_head(gfp);
8731da177e4SLinus Torvalds 		if (!bh)
8741da177e4SLinus Torvalds 			goto no_grow;
8751da177e4SLinus Torvalds 
8761da177e4SLinus Torvalds 		bh->b_this_page = head;
8771da177e4SLinus Torvalds 		bh->b_blocknr = -1;
8781da177e4SLinus Torvalds 		head = bh;
8791da177e4SLinus Torvalds 
8801da177e4SLinus Torvalds 		bh->b_size = size;
8811da177e4SLinus Torvalds 
8821da177e4SLinus Torvalds 		/* Link the buffer to its page */
8831da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
8841da177e4SLinus Torvalds 	}
885f745c6f5SShakeel Butt out:
886b87d8cefSRoman Gushchin 	set_active_memcg(old_memcg);
8871da177e4SLinus Torvalds 	return head;
8881da177e4SLinus Torvalds /*
8891da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
8901da177e4SLinus Torvalds  */
8911da177e4SLinus Torvalds no_grow:
8921da177e4SLinus Torvalds 	if (head) {
8931da177e4SLinus Torvalds 		do {
8941da177e4SLinus Torvalds 			bh = head;
8951da177e4SLinus Torvalds 			head = head->b_this_page;
8961da177e4SLinus Torvalds 			free_buffer_head(bh);
8971da177e4SLinus Torvalds 		} while (head);
8981da177e4SLinus Torvalds 	}
8991da177e4SLinus Torvalds 
900f745c6f5SShakeel Butt 	goto out;
9011da177e4SLinus Torvalds }
9021da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds static inline void
9051da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9061da177e4SLinus Torvalds {
9071da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9081da177e4SLinus Torvalds 
9091da177e4SLinus Torvalds 	bh = head;
9101da177e4SLinus Torvalds 	do {
9111da177e4SLinus Torvalds 		tail = bh;
9121da177e4SLinus Torvalds 		bh = bh->b_this_page;
9131da177e4SLinus Torvalds 	} while (bh);
9141da177e4SLinus Torvalds 	tail->b_this_page = head;
91545dcfc27SGuoqing Jiang 	attach_page_private(page, head);
9161da177e4SLinus Torvalds }
9171da177e4SLinus Torvalds 
918bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
919bbec0270SLinus Torvalds {
920bbec0270SLinus Torvalds 	sector_t retval = ~((sector_t)0);
921b86058f9SChristoph Hellwig 	loff_t sz = bdev_nr_bytes(bdev);
922bbec0270SLinus Torvalds 
923bbec0270SLinus Torvalds 	if (sz) {
924bbec0270SLinus Torvalds 		unsigned int sizebits = blksize_bits(size);
925bbec0270SLinus Torvalds 		retval = (sz >> sizebits);
926bbec0270SLinus Torvalds 	}
927bbec0270SLinus Torvalds 	return retval;
928bbec0270SLinus Torvalds }
929bbec0270SLinus Torvalds 
9301da177e4SLinus Torvalds /*
9311da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9321da177e4SLinus Torvalds  */
933676ce6d5SHugh Dickins static sector_t
9341da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9351da177e4SLinus Torvalds 			sector_t block, int size)
9361da177e4SLinus Torvalds {
9371da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9381da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9391da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
940bcd1d063SChristoph Hellwig 	sector_t end_block = blkdev_max_block(bdev, size);
9411da177e4SLinus Torvalds 
9421da177e4SLinus Torvalds 	do {
9431da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
94401950a34SEric Biggers 			bh->b_end_io = NULL;
94501950a34SEric Biggers 			bh->b_private = NULL;
9461da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9471da177e4SLinus Torvalds 			bh->b_blocknr = block;
9481da177e4SLinus Torvalds 			if (uptodate)
9491da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
950080399aaSJeff Moyer 			if (block < end_block)
9511da177e4SLinus Torvalds 				set_buffer_mapped(bh);
9521da177e4SLinus Torvalds 		}
9531da177e4SLinus Torvalds 		block++;
9541da177e4SLinus Torvalds 		bh = bh->b_this_page;
9551da177e4SLinus Torvalds 	} while (bh != head);
956676ce6d5SHugh Dickins 
957676ce6d5SHugh Dickins 	/*
958676ce6d5SHugh Dickins 	 * Caller needs to validate requested block against end of device.
959676ce6d5SHugh Dickins 	 */
960676ce6d5SHugh Dickins 	return end_block;
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds /*
9641da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9651da177e4SLinus Torvalds  *
966676ce6d5SHugh Dickins  * This is used purely for blockdev mappings.
9671da177e4SLinus Torvalds  */
968676ce6d5SHugh Dickins static int
9691da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9703b5e6454SGioh Kim 	      pgoff_t index, int size, int sizebits, gfp_t gfp)
9711da177e4SLinus Torvalds {
9721da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9731da177e4SLinus Torvalds 	struct page *page;
9741da177e4SLinus Torvalds 	struct buffer_head *bh;
975676ce6d5SHugh Dickins 	sector_t end_block;
976c4b4c2a7SZhiqiang Liu 	int ret = 0;
97784235de3SJohannes Weiner 	gfp_t gfp_mask;
9781da177e4SLinus Torvalds 
979c62d2555SMichal Hocko 	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
9803b5e6454SGioh Kim 
98184235de3SJohannes Weiner 	/*
98284235de3SJohannes Weiner 	 * XXX: __getblk_slow() can not really deal with failure and
98384235de3SJohannes Weiner 	 * will endlessly loop on improvised global reclaim.  Prefer
98484235de3SJohannes Weiner 	 * looping in the allocator rather than here, at least that
98584235de3SJohannes Weiner 	 * code knows what it's doing.
98684235de3SJohannes Weiner 	 */
98784235de3SJohannes Weiner 	gfp_mask |= __GFP_NOFAIL;
98884235de3SJohannes Weiner 
98984235de3SJohannes Weiner 	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
9901da177e4SLinus Torvalds 
991e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
9921da177e4SLinus Torvalds 
9931da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
9941da177e4SLinus Torvalds 		bh = page_buffers(page);
9951da177e4SLinus Torvalds 		if (bh->b_size == size) {
996676ce6d5SHugh Dickins 			end_block = init_page_buffers(page, bdev,
997f2d5a944SAnton Altaparmakov 						(sector_t)index << sizebits,
998f2d5a944SAnton Altaparmakov 						size);
999676ce6d5SHugh Dickins 			goto done;
10001da177e4SLinus Torvalds 		}
100168189fefSMatthew Wilcox (Oracle) 		if (!try_to_free_buffers(page_folio(page)))
10021da177e4SLinus Torvalds 			goto failed;
10031da177e4SLinus Torvalds 	}
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds 	/*
10061da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10071da177e4SLinus Torvalds 	 */
100894dc24c0SJens Axboe 	bh = alloc_page_buffers(page, size, true);
10091da177e4SLinus Torvalds 
10101da177e4SLinus Torvalds 	/*
10111da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10121da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10131da177e4SLinus Torvalds 	 * run under the page lock.
10141da177e4SLinus Torvalds 	 */
10151da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10161da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
1017f2d5a944SAnton Altaparmakov 	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1018f2d5a944SAnton Altaparmakov 			size);
10191da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
1020676ce6d5SHugh Dickins done:
1021676ce6d5SHugh Dickins 	ret = (block < end_block) ? 1 : -ENXIO;
10221da177e4SLinus Torvalds failed:
10231da177e4SLinus Torvalds 	unlock_page(page);
102409cbfeafSKirill A. Shutemov 	put_page(page);
1025676ce6d5SHugh Dickins 	return ret;
10261da177e4SLinus Torvalds }
10271da177e4SLinus Torvalds 
10281da177e4SLinus Torvalds /*
10291da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10301da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10311da177e4SLinus Torvalds  */
1032858119e1SArjan van de Ven static int
10333b5e6454SGioh Kim grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
10341da177e4SLinus Torvalds {
10351da177e4SLinus Torvalds 	pgoff_t index;
10361da177e4SLinus Torvalds 	int sizebits;
10371da177e4SLinus Torvalds 
103890432e60SMikulas Patocka 	sizebits = PAGE_SHIFT - __ffs(size);
10391da177e4SLinus Torvalds 	index = block >> sizebits;
10401da177e4SLinus Torvalds 
1041e5657933SAndrew Morton 	/*
1042e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1043e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1044e5657933SAndrew Morton 	 */
1045e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1046e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1047a1c6f057SDmitry Monakhov 			"device %pg\n",
10488e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1049a1c6f057SDmitry Monakhov 			bdev);
1050e5657933SAndrew Morton 		return -EIO;
1051e5657933SAndrew Morton 	}
1052676ce6d5SHugh Dickins 
10531da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10543b5e6454SGioh Kim 	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
10551da177e4SLinus Torvalds }
10561da177e4SLinus Torvalds 
10570026ba40SEric Biggers static struct buffer_head *
10583b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
10593b5e6454SGioh Kim 	     unsigned size, gfp_t gfp)
10601da177e4SLinus Torvalds {
10611da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1062e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
10631da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10641da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10651da177e4SLinus Torvalds 					size);
1066e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1067e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
10681da177e4SLinus Torvalds 
10691da177e4SLinus Torvalds 		dump_stack();
10701da177e4SLinus Torvalds 		return NULL;
10711da177e4SLinus Torvalds 	}
10721da177e4SLinus Torvalds 
1073676ce6d5SHugh Dickins 	for (;;) {
1074676ce6d5SHugh Dickins 		struct buffer_head *bh;
1075676ce6d5SHugh Dickins 		int ret;
1076676ce6d5SHugh Dickins 
10771da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
10781da177e4SLinus Torvalds 		if (bh)
10791da177e4SLinus Torvalds 			return bh;
10801da177e4SLinus Torvalds 
10813b5e6454SGioh Kim 		ret = grow_buffers(bdev, block, size, gfp);
1082676ce6d5SHugh Dickins 		if (ret < 0)
108391f68c89SJeff Moyer 			return NULL;
1084676ce6d5SHugh Dickins 	}
10851da177e4SLinus Torvalds }
10861da177e4SLinus Torvalds 
10871da177e4SLinus Torvalds /*
10881da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
10891da177e4SLinus Torvalds  *
10901da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1091ec82e1c1SMatthew Wilcox  * the page is tagged dirty in the page cache.
10921da177e4SLinus Torvalds  *
10931da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
10941da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
10951da177e4SLinus Torvalds  * merely a hint about the true dirty state.
10961da177e4SLinus Torvalds  *
10971da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
10981da177e4SLinus Torvalds  * (if the page has buffers).
10991da177e4SLinus Torvalds  *
11001da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11011da177e4SLinus Torvalds  * buffers are not.
11021da177e4SLinus Torvalds  *
11031da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11041da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11051da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11062c69e205SMatthew Wilcox (Oracle)  * block_read_full_folio() against that folio will discover all the uptodate
11072c69e205SMatthew Wilcox (Oracle)  * buffers, will set the folio uptodate and will perform no I/O.
11081da177e4SLinus Torvalds  */
11091da177e4SLinus Torvalds 
11101da177e4SLinus Torvalds /**
11111da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
111267be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11131da177e4SLinus Torvalds  *
1114ec82e1c1SMatthew Wilcox  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1115ec82e1c1SMatthew Wilcox  * its backing page dirty, then tag the page as dirty in the page cache
1116ec82e1c1SMatthew Wilcox  * and then attach the address_space's inode to its superblock's dirty
11171da177e4SLinus Torvalds  * inode list.
11181da177e4SLinus Torvalds  *
1119abc8a8a2SMatthew Wilcox (Oracle)  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1120b93b0163SMatthew Wilcox  * i_pages lock and mapping->host->i_lock.
11211da177e4SLinus Torvalds  */
1122fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11231da177e4SLinus Torvalds {
1124787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11251be62dc1SLinus Torvalds 
11265305cb83STejun Heo 	trace_block_dirty_buffer(bh);
11275305cb83STejun Heo 
11281be62dc1SLinus Torvalds 	/*
11291be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11301be62dc1SLinus Torvalds 	 *
11311be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11321be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11331be62dc1SLinus Torvalds 	 */
11341be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11351be62dc1SLinus Torvalds 		smp_mb();
11361be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11371be62dc1SLinus Torvalds 			return;
11381be62dc1SLinus Torvalds 	}
11391be62dc1SLinus Torvalds 
1140a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1141cf1d3417SMatthew Wilcox (Oracle) 		struct folio *folio = bh->b_folio;
1142c4843a75SGreg Thelen 		struct address_space *mapping = NULL;
1143c4843a75SGreg Thelen 
1144cf1d3417SMatthew Wilcox (Oracle) 		folio_memcg_lock(folio);
1145cf1d3417SMatthew Wilcox (Oracle) 		if (!folio_test_set_dirty(folio)) {
1146cf1d3417SMatthew Wilcox (Oracle) 			mapping = folio->mapping;
11478e9d78edSLinus Torvalds 			if (mapping)
1148cf1d3417SMatthew Wilcox (Oracle) 				__folio_mark_dirty(folio, mapping, 0);
11498e9d78edSLinus Torvalds 		}
1150cf1d3417SMatthew Wilcox (Oracle) 		folio_memcg_unlock(folio);
1151c4843a75SGreg Thelen 		if (mapping)
1152c4843a75SGreg Thelen 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1153a8e7d49aSLinus Torvalds 	}
11541da177e4SLinus Torvalds }
11551fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
11561da177e4SLinus Torvalds 
115787354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
115887354e5dSJeff Layton {
1159485e9605SJeff Layton 	struct super_block *sb;
1160485e9605SJeff Layton 
116187354e5dSJeff Layton 	set_buffer_write_io_error(bh);
116287354e5dSJeff Layton 	/* FIXME: do we need to set this in both places? */
1163abc8a8a2SMatthew Wilcox (Oracle) 	if (bh->b_folio && bh->b_folio->mapping)
1164abc8a8a2SMatthew Wilcox (Oracle) 		mapping_set_error(bh->b_folio->mapping, -EIO);
116587354e5dSJeff Layton 	if (bh->b_assoc_map)
116687354e5dSJeff Layton 		mapping_set_error(bh->b_assoc_map, -EIO);
1167485e9605SJeff Layton 	rcu_read_lock();
1168485e9605SJeff Layton 	sb = READ_ONCE(bh->b_bdev->bd_super);
1169485e9605SJeff Layton 	if (sb)
1170485e9605SJeff Layton 		errseq_set(&sb->s_wb_err, -EIO);
1171485e9605SJeff Layton 	rcu_read_unlock();
117287354e5dSJeff Layton }
117387354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
117487354e5dSJeff Layton 
11751da177e4SLinus Torvalds /*
11761da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11771da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11781da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11791da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11801da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11811da177e4SLinus Torvalds  */
11821da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11831da177e4SLinus Torvalds {
11841da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11851da177e4SLinus Torvalds 		put_bh(buf);
11861da177e4SLinus Torvalds 		return;
11871da177e4SLinus Torvalds 	}
11885c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11891da177e4SLinus Torvalds }
11901fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
11911da177e4SLinus Torvalds 
11921da177e4SLinus Torvalds /*
11931da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11941da177e4SLinus Torvalds  * potentially dirty data.
11951da177e4SLinus Torvalds  */
11961da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11971da177e4SLinus Torvalds {
11981da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1199535ee2fbSJan Kara 	if (bh->b_assoc_map) {
1200abc8a8a2SMatthew Wilcox (Oracle) 		struct address_space *buffer_mapping = bh->b_folio->mapping;
12011da177e4SLinus Torvalds 
12021da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12031da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
120458ff407bSJan Kara 		bh->b_assoc_map = NULL;
12051da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12061da177e4SLinus Torvalds 	}
12071da177e4SLinus Torvalds 	__brelse(bh);
12081da177e4SLinus Torvalds }
12091fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12121da177e4SLinus Torvalds {
12131da177e4SLinus Torvalds 	lock_buffer(bh);
12141da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12151da177e4SLinus Torvalds 		unlock_buffer(bh);
12161da177e4SLinus Torvalds 		return bh;
12171da177e4SLinus Torvalds 	} else {
12181da177e4SLinus Torvalds 		get_bh(bh);
12191da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12201420c4a5SBart Van Assche 		submit_bh(REQ_OP_READ, bh);
12211da177e4SLinus Torvalds 		wait_on_buffer(bh);
12221da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12231da177e4SLinus Torvalds 			return bh;
12241da177e4SLinus Torvalds 	}
12251da177e4SLinus Torvalds 	brelse(bh);
12261da177e4SLinus Torvalds 	return NULL;
12271da177e4SLinus Torvalds }
12281da177e4SLinus Torvalds 
12291da177e4SLinus Torvalds /*
12301da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12311da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12321da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12331da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12341da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12351da177e4SLinus Torvalds  *
12361da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12371da177e4SLinus Torvalds  * sb_find_get_block().
12381da177e4SLinus Torvalds  *
12391da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12401da177e4SLinus Torvalds  * a local interrupt disable for that.
12411da177e4SLinus Torvalds  */
12421da177e4SLinus Torvalds 
124386cf78d7SSebastien Buisson #define BH_LRU_SIZE	16
12441da177e4SLinus Torvalds 
12451da177e4SLinus Torvalds struct bh_lru {
12461da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12471da177e4SLinus Torvalds };
12481da177e4SLinus Torvalds 
12491da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12501da177e4SLinus Torvalds 
12511da177e4SLinus Torvalds #ifdef CONFIG_SMP
12521da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12531da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12541da177e4SLinus Torvalds #else
12551da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12561da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12571da177e4SLinus Torvalds #endif
12581da177e4SLinus Torvalds 
12591da177e4SLinus Torvalds static inline void check_irqs_on(void)
12601da177e4SLinus Torvalds {
12611da177e4SLinus Torvalds #ifdef irqs_disabled
12621da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12631da177e4SLinus Torvalds #endif
12641da177e4SLinus Torvalds }
12651da177e4SLinus Torvalds 
12661da177e4SLinus Torvalds /*
1267241f01fbSEric Biggers  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1268241f01fbSEric Biggers  * inserted at the front, and the buffer_head at the back if any is evicted.
1269241f01fbSEric Biggers  * Or, if already in the LRU it is moved to the front.
12701da177e4SLinus Torvalds  */
12711da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12721da177e4SLinus Torvalds {
1273241f01fbSEric Biggers 	struct buffer_head *evictee = bh;
1274241f01fbSEric Biggers 	struct bh_lru *b;
1275241f01fbSEric Biggers 	int i;
12761da177e4SLinus Torvalds 
12771da177e4SLinus Torvalds 	check_irqs_on();
1278c0226eb8SMinchan Kim 	bh_lru_lock();
1279c0226eb8SMinchan Kim 
12808cc621d2SMinchan Kim 	/*
12818cc621d2SMinchan Kim 	 * the refcount of buffer_head in bh_lru prevents dropping the
12828cc621d2SMinchan Kim 	 * attached page(i.e., try_to_free_buffers) so it could cause
12838cc621d2SMinchan Kim 	 * failing page migration.
12848cc621d2SMinchan Kim 	 * Skip putting upcoming bh into bh_lru until migration is done.
12858cc621d2SMinchan Kim 	 */
1286c0226eb8SMinchan Kim 	if (lru_cache_disabled()) {
1287c0226eb8SMinchan Kim 		bh_lru_unlock();
12888cc621d2SMinchan Kim 		return;
1289c0226eb8SMinchan Kim 	}
1290241f01fbSEric Biggers 
1291241f01fbSEric Biggers 	b = this_cpu_ptr(&bh_lrus);
1292241f01fbSEric Biggers 	for (i = 0; i < BH_LRU_SIZE; i++) {
1293241f01fbSEric Biggers 		swap(evictee, b->bhs[i]);
1294241f01fbSEric Biggers 		if (evictee == bh) {
1295241f01fbSEric Biggers 			bh_lru_unlock();
1296241f01fbSEric Biggers 			return;
1297241f01fbSEric Biggers 		}
1298241f01fbSEric Biggers 	}
12991da177e4SLinus Torvalds 
13001da177e4SLinus Torvalds 	get_bh(bh);
13011da177e4SLinus Torvalds 	bh_lru_unlock();
1302241f01fbSEric Biggers 	brelse(evictee);
13031da177e4SLinus Torvalds }
13041da177e4SLinus Torvalds 
13051da177e4SLinus Torvalds /*
13061da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13071da177e4SLinus Torvalds  */
1308858119e1SArjan van de Ven static struct buffer_head *
13093991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13101da177e4SLinus Torvalds {
13111da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13123991d3bdSTomasz Kvarsin 	unsigned int i;
13131da177e4SLinus Torvalds 
13141da177e4SLinus Torvalds 	check_irqs_on();
13151da177e4SLinus Torvalds 	bh_lru_lock();
13161da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
1317c7b92516SChristoph Lameter 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13181da177e4SLinus Torvalds 
13199470dd5dSZach Brown 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
13209470dd5dSZach Brown 		    bh->b_size == size) {
13211da177e4SLinus Torvalds 			if (i) {
13221da177e4SLinus Torvalds 				while (i) {
1323c7b92516SChristoph Lameter 					__this_cpu_write(bh_lrus.bhs[i],
1324c7b92516SChristoph Lameter 						__this_cpu_read(bh_lrus.bhs[i - 1]));
13251da177e4SLinus Torvalds 					i--;
13261da177e4SLinus Torvalds 				}
1327c7b92516SChristoph Lameter 				__this_cpu_write(bh_lrus.bhs[0], bh);
13281da177e4SLinus Torvalds 			}
13291da177e4SLinus Torvalds 			get_bh(bh);
13301da177e4SLinus Torvalds 			ret = bh;
13311da177e4SLinus Torvalds 			break;
13321da177e4SLinus Torvalds 		}
13331da177e4SLinus Torvalds 	}
13341da177e4SLinus Torvalds 	bh_lru_unlock();
13351da177e4SLinus Torvalds 	return ret;
13361da177e4SLinus Torvalds }
13371da177e4SLinus Torvalds 
13381da177e4SLinus Torvalds /*
13391da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13401da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13411da177e4SLinus Torvalds  * NULL
13421da177e4SLinus Torvalds  */
13431da177e4SLinus Torvalds struct buffer_head *
13443991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13451da177e4SLinus Torvalds {
13461da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	if (bh == NULL) {
13492457aec6SMel Gorman 		/* __find_get_block_slow will mark the page accessed */
1350385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13511da177e4SLinus Torvalds 		if (bh)
13521da177e4SLinus Torvalds 			bh_lru_install(bh);
13532457aec6SMel Gorman 	} else
13541da177e4SLinus Torvalds 		touch_buffer(bh);
13552457aec6SMel Gorman 
13561da177e4SLinus Torvalds 	return bh;
13571da177e4SLinus Torvalds }
13581da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13591da177e4SLinus Torvalds 
13601da177e4SLinus Torvalds /*
13613b5e6454SGioh Kim  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
13621da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13631da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13641da177e4SLinus Torvalds  *
13653b5e6454SGioh Kim  * __getblk_gfp() will lock up the machine if grow_dev_page's
13663b5e6454SGioh Kim  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
13671da177e4SLinus Torvalds  */
13681da177e4SLinus Torvalds struct buffer_head *
13693b5e6454SGioh Kim __getblk_gfp(struct block_device *bdev, sector_t block,
13703b5e6454SGioh Kim 	     unsigned size, gfp_t gfp)
13711da177e4SLinus Torvalds {
13721da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13731da177e4SLinus Torvalds 
13741da177e4SLinus Torvalds 	might_sleep();
13751da177e4SLinus Torvalds 	if (bh == NULL)
13763b5e6454SGioh Kim 		bh = __getblk_slow(bdev, block, size, gfp);
13771da177e4SLinus Torvalds 	return bh;
13781da177e4SLinus Torvalds }
13793b5e6454SGioh Kim EXPORT_SYMBOL(__getblk_gfp);
13801da177e4SLinus Torvalds 
13811da177e4SLinus Torvalds /*
13821da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13831da177e4SLinus Torvalds  */
13843991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13851da177e4SLinus Torvalds {
13861da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1387a3e713b5SAndrew Morton 	if (likely(bh)) {
1388e7ea1129SZhang Yi 		bh_readahead(bh, REQ_RAHEAD);
13891da177e4SLinus Torvalds 		brelse(bh);
13901da177e4SLinus Torvalds 	}
1391a3e713b5SAndrew Morton }
13921da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13931da177e4SLinus Torvalds 
13941da177e4SLinus Torvalds /**
13953b5e6454SGioh Kim  *  __bread_gfp() - reads a specified block and returns the bh
139667be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13971da177e4SLinus Torvalds  *  @block: number of block
13981da177e4SLinus Torvalds  *  @size: size (in bytes) to read
13993b5e6454SGioh Kim  *  @gfp: page allocation flag
14001da177e4SLinus Torvalds  *
14011da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14023b5e6454SGioh Kim  *  The page cache can be allocated from non-movable area
14033b5e6454SGioh Kim  *  not to prevent page migration if you set gfp to zero.
14041da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14051da177e4SLinus Torvalds  */
14061da177e4SLinus Torvalds struct buffer_head *
14073b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block,
14083b5e6454SGioh Kim 		   unsigned size, gfp_t gfp)
14091da177e4SLinus Torvalds {
14103b5e6454SGioh Kim 	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
14111da177e4SLinus Torvalds 
1412a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14131da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14141da177e4SLinus Torvalds 	return bh;
14151da177e4SLinus Torvalds }
14163b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
14171da177e4SLinus Torvalds 
14188cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
14198cc621d2SMinchan Kim {
14208cc621d2SMinchan Kim 	int i;
14218cc621d2SMinchan Kim 
14228cc621d2SMinchan Kim 	for (i = 0; i < BH_LRU_SIZE; i++) {
14238cc621d2SMinchan Kim 		brelse(b->bhs[i]);
14248cc621d2SMinchan Kim 		b->bhs[i] = NULL;
14258cc621d2SMinchan Kim 	}
14268cc621d2SMinchan Kim }
14271da177e4SLinus Torvalds /*
14281da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14291da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14301da177e4SLinus Torvalds  * or with preempt disabled.
14311da177e4SLinus Torvalds  */
14321da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14331da177e4SLinus Torvalds {
14341da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14351da177e4SLinus Torvalds 
14368cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
14371da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14381da177e4SLinus Torvalds }
14391da177e4SLinus Torvalds 
14408cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
144142be35d0SGilad Ben-Yossef {
144242be35d0SGilad Ben-Yossef 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
144342be35d0SGilad Ben-Yossef 	int i;
144442be35d0SGilad Ben-Yossef 
144542be35d0SGilad Ben-Yossef 	for (i = 0; i < BH_LRU_SIZE; i++) {
144642be35d0SGilad Ben-Yossef 		if (b->bhs[i])
14471d706679SSaurav Girepunje 			return true;
144842be35d0SGilad Ben-Yossef 	}
144942be35d0SGilad Ben-Yossef 
14501d706679SSaurav Girepunje 	return false;
145142be35d0SGilad Ben-Yossef }
145242be35d0SGilad Ben-Yossef 
1453f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14541da177e4SLinus Torvalds {
1455cb923159SSebastian Andrzej Siewior 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
14561da177e4SLinus Torvalds }
14579db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14581da177e4SLinus Torvalds 
1459243418e3SMinchan Kim /*
1460243418e3SMinchan Kim  * It's called from workqueue context so we need a bh_lru_lock to close
1461243418e3SMinchan Kim  * the race with preemption/irq.
1462243418e3SMinchan Kim  */
1463243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
14648cc621d2SMinchan Kim {
14658cc621d2SMinchan Kim 	struct bh_lru *b;
14668cc621d2SMinchan Kim 
14678cc621d2SMinchan Kim 	bh_lru_lock();
1468243418e3SMinchan Kim 	b = this_cpu_ptr(&bh_lrus);
14698cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
14708cc621d2SMinchan Kim 	bh_lru_unlock();
14718cc621d2SMinchan Kim }
14728cc621d2SMinchan Kim 
14731da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14741da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14751da177e4SLinus Torvalds {
14761da177e4SLinus Torvalds 	bh->b_page = page;
1477e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14781da177e4SLinus Torvalds 	if (PageHighMem(page))
14791da177e4SLinus Torvalds 		/*
14801da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14811da177e4SLinus Torvalds 		 */
14821da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14831da177e4SLinus Torvalds 	else
14841da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14851da177e4SLinus Torvalds }
14861da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14871da177e4SLinus Torvalds 
14881da177e4SLinus Torvalds /*
14891da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14901da177e4SLinus Torvalds  */
1491e7470ee8SMel Gorman 
1492e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1493e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1494e7470ee8SMel Gorman 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1495e7470ee8SMel Gorman 	 1 << BH_Delay | 1 << BH_Unwritten)
1496e7470ee8SMel Gorman 
1497858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14981da177e4SLinus Torvalds {
1499b0192296SUros Bizjak 	unsigned long b_state;
1500e7470ee8SMel Gorman 
15011da177e4SLinus Torvalds 	lock_buffer(bh);
15021da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
15031da177e4SLinus Torvalds 	bh->b_bdev = NULL;
1504b0192296SUros Bizjak 	b_state = READ_ONCE(bh->b_state);
1505b0192296SUros Bizjak 	do {
1506b0192296SUros Bizjak 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1507b0192296SUros Bizjak 			      b_state & ~BUFFER_FLAGS_DISCARD));
15081da177e4SLinus Torvalds 	unlock_buffer(bh);
15091da177e4SLinus Torvalds }
15101da177e4SLinus Torvalds 
15111da177e4SLinus Torvalds /**
15127ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
15137ba13abbSMatthew Wilcox (Oracle)  * @folio: The folio which is affected.
1514d47992f8SLukas Czerner  * @offset: start of the range to invalidate
1515d47992f8SLukas Czerner  * @length: length of the range to invalidate
15161da177e4SLinus Torvalds  *
15177ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() is called when all or part of the folio has been
15181da177e4SLinus Torvalds  * invalidated by a truncate operation.
15191da177e4SLinus Torvalds  *
15207ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() does not have to release all buffers, but it must
15211da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
15221da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
15231da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
15241da177e4SLinus Torvalds  * blocks on-disk.
15251da177e4SLinus Torvalds  */
15267ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
15271da177e4SLinus Torvalds {
15281da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
15297ba13abbSMatthew Wilcox (Oracle) 	size_t curr_off = 0;
15307ba13abbSMatthew Wilcox (Oracle) 	size_t stop = length + offset;
15311da177e4SLinus Torvalds 
15327ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
15331da177e4SLinus Torvalds 
1534d47992f8SLukas Czerner 	/*
1535d47992f8SLukas Czerner 	 * Check for overflow
1536d47992f8SLukas Czerner 	 */
15377ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(stop > folio_size(folio) || stop < length);
1538d47992f8SLukas Czerner 
15397ba13abbSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
15407ba13abbSMatthew Wilcox (Oracle) 	if (!head)
15417ba13abbSMatthew Wilcox (Oracle) 		return;
15427ba13abbSMatthew Wilcox (Oracle) 
15431da177e4SLinus Torvalds 	bh = head;
15441da177e4SLinus Torvalds 	do {
15457ba13abbSMatthew Wilcox (Oracle) 		size_t next_off = curr_off + bh->b_size;
15461da177e4SLinus Torvalds 		next = bh->b_this_page;
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds 		/*
1549d47992f8SLukas Czerner 		 * Are we still fully in range ?
1550d47992f8SLukas Czerner 		 */
1551d47992f8SLukas Czerner 		if (next_off > stop)
1552d47992f8SLukas Czerner 			goto out;
1553d47992f8SLukas Czerner 
1554d47992f8SLukas Czerner 		/*
15551da177e4SLinus Torvalds 		 * is this block fully invalidated?
15561da177e4SLinus Torvalds 		 */
15571da177e4SLinus Torvalds 		if (offset <= curr_off)
15581da177e4SLinus Torvalds 			discard_buffer(bh);
15591da177e4SLinus Torvalds 		curr_off = next_off;
15601da177e4SLinus Torvalds 		bh = next;
15611da177e4SLinus Torvalds 	} while (bh != head);
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	/*
15647ba13abbSMatthew Wilcox (Oracle) 	 * We release buffers only if the entire folio is being invalidated.
15651da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15661da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15671da177e4SLinus Torvalds 	 */
15687ba13abbSMatthew Wilcox (Oracle) 	if (length == folio_size(folio))
15697ba13abbSMatthew Wilcox (Oracle) 		filemap_release_folio(folio, 0);
15701da177e4SLinus Torvalds out:
15712ff28e22SNeilBrown 	return;
15721da177e4SLinus Torvalds }
15737ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
15741da177e4SLinus Torvalds 
1575d47992f8SLukas Czerner 
15761da177e4SLinus Torvalds /*
15771da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
1578e621900aSMatthew Wilcox (Oracle)  * block_dirty_folio() via private_lock.  try_to_free_buffers
15791da177e4SLinus Torvalds  * is already excluded via the page lock.
15801da177e4SLinus Torvalds  */
15811da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15821da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15831da177e4SLinus Torvalds {
15841da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15851da177e4SLinus Torvalds 
1586640ab98fSJens Axboe 	head = alloc_page_buffers(page, blocksize, true);
15871da177e4SLinus Torvalds 	bh = head;
15881da177e4SLinus Torvalds 	do {
15891da177e4SLinus Torvalds 		bh->b_state |= b_state;
15901da177e4SLinus Torvalds 		tail = bh;
15911da177e4SLinus Torvalds 		bh = bh->b_this_page;
15921da177e4SLinus Torvalds 	} while (bh);
15931da177e4SLinus Torvalds 	tail->b_this_page = head;
15941da177e4SLinus Torvalds 
15951da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15961da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15971da177e4SLinus Torvalds 		bh = head;
15981da177e4SLinus Torvalds 		do {
15991da177e4SLinus Torvalds 			if (PageDirty(page))
16001da177e4SLinus Torvalds 				set_buffer_dirty(bh);
16011da177e4SLinus Torvalds 			if (PageUptodate(page))
16021da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
16031da177e4SLinus Torvalds 			bh = bh->b_this_page;
16041da177e4SLinus Torvalds 		} while (bh != head);
16051da177e4SLinus Torvalds 	}
160645dcfc27SGuoqing Jiang 	attach_page_private(page, head);
16071da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
16081da177e4SLinus Torvalds }
16091da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16101da177e4SLinus Torvalds 
161129f3ad7dSJan Kara /**
161229f3ad7dSJan Kara  * clean_bdev_aliases: clean a range of buffers in block device
161329f3ad7dSJan Kara  * @bdev: Block device to clean buffers in
161429f3ad7dSJan Kara  * @block: Start of a range of blocks to clean
161529f3ad7dSJan Kara  * @len: Number of blocks to clean
16161da177e4SLinus Torvalds  *
161729f3ad7dSJan Kara  * We are taking a range of blocks for data and we don't want writeback of any
161829f3ad7dSJan Kara  * buffer-cache aliases starting from return from this function and until the
161929f3ad7dSJan Kara  * moment when something will explicitly mark the buffer dirty (hopefully that
162029f3ad7dSJan Kara  * will not happen until we will free that block ;-) We don't even need to mark
162129f3ad7dSJan Kara  * it not-uptodate - nobody can expect anything from a newly allocated buffer
162229f3ad7dSJan Kara  * anyway. We used to use unmap_buffer() for such invalidation, but that was
162329f3ad7dSJan Kara  * wrong. We definitely don't want to mark the alias unmapped, for example - it
162429f3ad7dSJan Kara  * would confuse anyone who might pick it with bread() afterwards...
162529f3ad7dSJan Kara  *
162629f3ad7dSJan Kara  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
162729f3ad7dSJan Kara  * writeout I/O going on against recently-freed buffers.  We don't wait on that
162829f3ad7dSJan Kara  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
162929f3ad7dSJan Kara  * need to.  That happens here.
16301da177e4SLinus Torvalds  */
163129f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
16321da177e4SLinus Torvalds {
163329f3ad7dSJan Kara 	struct inode *bd_inode = bdev->bd_inode;
163429f3ad7dSJan Kara 	struct address_space *bd_mapping = bd_inode->i_mapping;
16359e0b6f31SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
163629f3ad7dSJan Kara 	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
163729f3ad7dSJan Kara 	pgoff_t end;
1638c10f778dSJan Kara 	int i, count;
163929f3ad7dSJan Kara 	struct buffer_head *bh;
164029f3ad7dSJan Kara 	struct buffer_head *head;
16411da177e4SLinus Torvalds 
164229f3ad7dSJan Kara 	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
16439e0b6f31SMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
16449e0b6f31SMatthew Wilcox (Oracle) 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
16459e0b6f31SMatthew Wilcox (Oracle) 		count = folio_batch_count(&fbatch);
1646c10f778dSJan Kara 		for (i = 0; i < count; i++) {
16479e0b6f31SMatthew Wilcox (Oracle) 			struct folio *folio = fbatch.folios[i];
16481da177e4SLinus Torvalds 
16499e0b6f31SMatthew Wilcox (Oracle) 			if (!folio_buffers(folio))
165029f3ad7dSJan Kara 				continue;
165129f3ad7dSJan Kara 			/*
16529e0b6f31SMatthew Wilcox (Oracle) 			 * We use folio lock instead of bd_mapping->private_lock
165329f3ad7dSJan Kara 			 * to pin buffers here since we can afford to sleep and
165429f3ad7dSJan Kara 			 * it scales better than a global spinlock lock.
165529f3ad7dSJan Kara 			 */
16569e0b6f31SMatthew Wilcox (Oracle) 			folio_lock(folio);
16579e0b6f31SMatthew Wilcox (Oracle) 			/* Recheck when the folio is locked which pins bhs */
16589e0b6f31SMatthew Wilcox (Oracle) 			head = folio_buffers(folio);
16599e0b6f31SMatthew Wilcox (Oracle) 			if (!head)
166029f3ad7dSJan Kara 				goto unlock_page;
166129f3ad7dSJan Kara 			bh = head;
166229f3ad7dSJan Kara 			do {
16636c006a9dSChandan Rajendra 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
166429f3ad7dSJan Kara 					goto next;
166529f3ad7dSJan Kara 				if (bh->b_blocknr >= block + len)
166629f3ad7dSJan Kara 					break;
166729f3ad7dSJan Kara 				clear_buffer_dirty(bh);
166829f3ad7dSJan Kara 				wait_on_buffer(bh);
166929f3ad7dSJan Kara 				clear_buffer_req(bh);
167029f3ad7dSJan Kara next:
167129f3ad7dSJan Kara 				bh = bh->b_this_page;
167229f3ad7dSJan Kara 			} while (bh != head);
167329f3ad7dSJan Kara unlock_page:
16749e0b6f31SMatthew Wilcox (Oracle) 			folio_unlock(folio);
167529f3ad7dSJan Kara 		}
16769e0b6f31SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
167729f3ad7dSJan Kara 		cond_resched();
1678c10f778dSJan Kara 		/* End of range already reached? */
1679c10f778dSJan Kara 		if (index > end || !index)
1680c10f778dSJan Kara 			break;
16811da177e4SLinus Torvalds 	}
16821da177e4SLinus Torvalds }
168329f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
16841da177e4SLinus Torvalds 
16851da177e4SLinus Torvalds /*
168645bce8f3SLinus Torvalds  * Size is a power-of-two in the range 512..PAGE_SIZE,
168745bce8f3SLinus Torvalds  * and the case we care about most is PAGE_SIZE.
168845bce8f3SLinus Torvalds  *
168945bce8f3SLinus Torvalds  * So this *could* possibly be written with those
169045bce8f3SLinus Torvalds  * constraints in mind (relevant mostly if some
169145bce8f3SLinus Torvalds  * architecture has a slow bit-scan instruction)
169245bce8f3SLinus Torvalds  */
169345bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize)
169445bce8f3SLinus Torvalds {
169545bce8f3SLinus Torvalds 	return ilog2(blocksize);
169645bce8f3SLinus Torvalds }
169745bce8f3SLinus Torvalds 
169845bce8f3SLinus Torvalds static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
169945bce8f3SLinus Torvalds {
170045bce8f3SLinus Torvalds 	BUG_ON(!PageLocked(page));
170145bce8f3SLinus Torvalds 
170245bce8f3SLinus Torvalds 	if (!page_has_buffers(page))
17036aa7de05SMark Rutland 		create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
17046aa7de05SMark Rutland 				     b_state);
170545bce8f3SLinus Torvalds 	return page_buffers(page);
170645bce8f3SLinus Torvalds }
170745bce8f3SLinus Torvalds 
170845bce8f3SLinus Torvalds /*
17091da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
17101da177e4SLinus Torvalds  *
17111da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
17121da177e4SLinus Torvalds  *
17131da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
17141da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
17151da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
17161da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
17171da177e4SLinus Torvalds  *
17181da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
17191da177e4SLinus Torvalds  */
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds /*
17221da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
17231da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
17241da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
17251da177e4SLinus Torvalds  * state inside lock_buffer().
17261da177e4SLinus Torvalds  *
17271da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
17281da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
17291da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
17301da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
17311da177e4SLinus Torvalds  * prevents this contention from occurring.
17326e34eeddSTheodore Ts'o  *
17336e34eeddSTheodore Ts'o  * If block_write_full_page() is called with wbc->sync_mode ==
173470fd7614SChristoph Hellwig  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1735721a9602SJens Axboe  * causes the writes to be flagged as synchronous writes.
17361da177e4SLinus Torvalds  */
1737b4bba389SBenjamin Marzinski int __block_write_full_page(struct inode *inode, struct page *page,
173835c80d5fSChris Mason 			get_block_t *get_block, struct writeback_control *wbc,
173935c80d5fSChris Mason 			bh_end_io_t *handler)
17401da177e4SLinus Torvalds {
17411da177e4SLinus Torvalds 	int err;
17421da177e4SLinus Torvalds 	sector_t block;
17431da177e4SLinus Torvalds 	sector_t last_block;
1744f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
174545bce8f3SLinus Torvalds 	unsigned int blocksize, bbits;
17461da177e4SLinus Torvalds 	int nr_underway = 0;
17473ae72869SBart Van Assche 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
17481da177e4SLinus Torvalds 
174945bce8f3SLinus Torvalds 	head = create_page_buffers(page, inode,
17501da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds 	/*
1753e621900aSMatthew Wilcox (Oracle) 	 * Be very careful.  We have no exclusion from block_dirty_folio
17541da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
17551da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
17561da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
17571da177e4SLinus Torvalds 	 *
1758e621900aSMatthew Wilcox (Oracle) 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
17591da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
17601da177e4SLinus Torvalds 	 */
17611da177e4SLinus Torvalds 
17621da177e4SLinus Torvalds 	bh = head;
176345bce8f3SLinus Torvalds 	blocksize = bh->b_size;
176445bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
176545bce8f3SLinus Torvalds 
176609cbfeafSKirill A. Shutemov 	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
176745bce8f3SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> bbits;
17681da177e4SLinus Torvalds 
17691da177e4SLinus Torvalds 	/*
17701da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
17711da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
17721da177e4SLinus Torvalds 	 */
17731da177e4SLinus Torvalds 	do {
17741da177e4SLinus Torvalds 		if (block > last_block) {
17751da177e4SLinus Torvalds 			/*
17761da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
17771da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
17781da177e4SLinus Torvalds 			 * truncate in progress.
17791da177e4SLinus Torvalds 			 */
17801da177e4SLinus Torvalds 			/*
17811da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
17821da177e4SLinus Torvalds 			 */
17831da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17841da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
178529a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
178629a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1787b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
17881da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17891da177e4SLinus Torvalds 			if (err)
17901da177e4SLinus Torvalds 				goto recover;
179129a814d2SAlex Tomas 			clear_buffer_delay(bh);
17921da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17931da177e4SLinus Torvalds 				/* blockdev mappings never come here */
17941da177e4SLinus Torvalds 				clear_buffer_new(bh);
1795e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
17961da177e4SLinus Torvalds 			}
17971da177e4SLinus Torvalds 		}
17981da177e4SLinus Torvalds 		bh = bh->b_this_page;
17991da177e4SLinus Torvalds 		block++;
18001da177e4SLinus Torvalds 	} while (bh != head);
18011da177e4SLinus Torvalds 
18021da177e4SLinus Torvalds 	do {
18031da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
18041da177e4SLinus Torvalds 			continue;
18051da177e4SLinus Torvalds 		/*
18061da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
18071da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
18085b0830cbSJens Axboe 		 * potentially cause a busy-wait loop from writeback threads
18095b0830cbSJens Axboe 		 * and kswapd activity, but those code paths have their own
18105b0830cbSJens Axboe 		 * higher-level throttling.
18111da177e4SLinus Torvalds 		 */
18121b430beeSWu Fengguang 		if (wbc->sync_mode != WB_SYNC_NONE) {
18131da177e4SLinus Torvalds 			lock_buffer(bh);
1814ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
18151da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
18161da177e4SLinus Torvalds 			continue;
18171da177e4SLinus Torvalds 		}
18181da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
181935c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
18201da177e4SLinus Torvalds 		} else {
18211da177e4SLinus Torvalds 			unlock_buffer(bh);
18221da177e4SLinus Torvalds 		}
18231da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18241da177e4SLinus Torvalds 
18251da177e4SLinus Torvalds 	/*
18261da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
18271da177e4SLinus Torvalds 	 * drop the bh refcounts early.
18281da177e4SLinus Torvalds 	 */
18291da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18301da177e4SLinus Torvalds 	set_page_writeback(page);
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds 	do {
18331da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18341da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18351420c4a5SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
18361da177e4SLinus Torvalds 			nr_underway++;
1837ad576e63SNick Piggin 		}
18381da177e4SLinus Torvalds 		bh = next;
18391da177e4SLinus Torvalds 	} while (bh != head);
184005937baaSAndrew Morton 	unlock_page(page);
18411da177e4SLinus Torvalds 
18421da177e4SLinus Torvalds 	err = 0;
18431da177e4SLinus Torvalds done:
18441da177e4SLinus Torvalds 	if (nr_underway == 0) {
18451da177e4SLinus Torvalds 		/*
18461da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
18471da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
184879f59784SZhang Yi 		 * write_dirty_buffer/submit_bh.  A rare case.
18491da177e4SLinus Torvalds 		 */
18501da177e4SLinus Torvalds 		end_page_writeback(page);
18513d67f2d7SNick Piggin 
18521da177e4SLinus Torvalds 		/*
18531da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
18541da177e4SLinus Torvalds 		 * here on.
18551da177e4SLinus Torvalds 		 */
18561da177e4SLinus Torvalds 	}
18571da177e4SLinus Torvalds 	return err;
18581da177e4SLinus Torvalds 
18591da177e4SLinus Torvalds recover:
18601da177e4SLinus Torvalds 	/*
18611da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
18621da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
18631da177e4SLinus Torvalds 	 * exposing stale data.
18641da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
18651da177e4SLinus Torvalds 	 */
18661da177e4SLinus Torvalds 	bh = head;
18671da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
18681da177e4SLinus Torvalds 	do {
186929a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
187029a814d2SAlex Tomas 		    !buffer_delay(bh)) {
18711da177e4SLinus Torvalds 			lock_buffer(bh);
187235c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
18731da177e4SLinus Torvalds 		} else {
18741da177e4SLinus Torvalds 			/*
18751da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
18761da177e4SLinus Torvalds 			 * attachment to a dirty page.
18771da177e4SLinus Torvalds 			 */
18781da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18791da177e4SLinus Torvalds 		}
18801da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18811da177e4SLinus Torvalds 	SetPageError(page);
18821da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18837e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
18841da177e4SLinus Torvalds 	set_page_writeback(page);
18851da177e4SLinus Torvalds 	do {
18861da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18871da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18881da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18891420c4a5SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
18901da177e4SLinus Torvalds 			nr_underway++;
1891ad576e63SNick Piggin 		}
18921da177e4SLinus Torvalds 		bh = next;
18931da177e4SLinus Torvalds 	} while (bh != head);
1894ffda9d30SNick Piggin 	unlock_page(page);
18951da177e4SLinus Torvalds 	goto done;
18961da177e4SLinus Torvalds }
1897b4bba389SBenjamin Marzinski EXPORT_SYMBOL(__block_write_full_page);
18981da177e4SLinus Torvalds 
1899afddba49SNick Piggin /*
1900afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1901afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1902afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1903afddba49SNick Piggin  */
1904afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1905afddba49SNick Piggin {
1906afddba49SNick Piggin 	unsigned int block_start, block_end;
1907afddba49SNick Piggin 	struct buffer_head *head, *bh;
1908afddba49SNick Piggin 
1909afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1910afddba49SNick Piggin 	if (!page_has_buffers(page))
1911afddba49SNick Piggin 		return;
1912afddba49SNick Piggin 
1913afddba49SNick Piggin 	bh = head = page_buffers(page);
1914afddba49SNick Piggin 	block_start = 0;
1915afddba49SNick Piggin 	do {
1916afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1917afddba49SNick Piggin 
1918afddba49SNick Piggin 		if (buffer_new(bh)) {
1919afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1920afddba49SNick Piggin 				if (!PageUptodate(page)) {
1921afddba49SNick Piggin 					unsigned start, size;
1922afddba49SNick Piggin 
1923afddba49SNick Piggin 					start = max(from, block_start);
1924afddba49SNick Piggin 					size = min(to, block_end) - start;
1925afddba49SNick Piggin 
1926eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1927afddba49SNick Piggin 					set_buffer_uptodate(bh);
1928afddba49SNick Piggin 				}
1929afddba49SNick Piggin 
1930afddba49SNick Piggin 				clear_buffer_new(bh);
1931afddba49SNick Piggin 				mark_buffer_dirty(bh);
1932afddba49SNick Piggin 			}
1933afddba49SNick Piggin 		}
1934afddba49SNick Piggin 
1935afddba49SNick Piggin 		block_start = block_end;
1936afddba49SNick Piggin 		bh = bh->b_this_page;
1937afddba49SNick Piggin 	} while (bh != head);
1938afddba49SNick Piggin }
1939afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1940afddba49SNick Piggin 
1941ae259a9cSChristoph Hellwig static void
1942ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
19436d49cc85SChristoph Hellwig 		const struct iomap *iomap)
1944ae259a9cSChristoph Hellwig {
1945ae259a9cSChristoph Hellwig 	loff_t offset = block << inode->i_blkbits;
1946ae259a9cSChristoph Hellwig 
1947ae259a9cSChristoph Hellwig 	bh->b_bdev = iomap->bdev;
1948ae259a9cSChristoph Hellwig 
1949ae259a9cSChristoph Hellwig 	/*
1950ae259a9cSChristoph Hellwig 	 * Block points to offset in file we need to map, iomap contains
1951ae259a9cSChristoph Hellwig 	 * the offset at which the map starts. If the map ends before the
1952ae259a9cSChristoph Hellwig 	 * current block, then do not map the buffer and let the caller
1953ae259a9cSChristoph Hellwig 	 * handle it.
1954ae259a9cSChristoph Hellwig 	 */
1955ae259a9cSChristoph Hellwig 	BUG_ON(offset >= iomap->offset + iomap->length);
1956ae259a9cSChristoph Hellwig 
1957ae259a9cSChristoph Hellwig 	switch (iomap->type) {
1958ae259a9cSChristoph Hellwig 	case IOMAP_HOLE:
1959ae259a9cSChristoph Hellwig 		/*
1960ae259a9cSChristoph Hellwig 		 * If the buffer is not up to date or beyond the current EOF,
1961ae259a9cSChristoph Hellwig 		 * we need to mark it as new to ensure sub-block zeroing is
1962ae259a9cSChristoph Hellwig 		 * executed if necessary.
1963ae259a9cSChristoph Hellwig 		 */
1964ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
1965ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
1966ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
1967ae259a9cSChristoph Hellwig 		break;
1968ae259a9cSChristoph Hellwig 	case IOMAP_DELALLOC:
1969ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
1970ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
1971ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
1972ae259a9cSChristoph Hellwig 		set_buffer_uptodate(bh);
1973ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
1974ae259a9cSChristoph Hellwig 		set_buffer_delay(bh);
1975ae259a9cSChristoph Hellwig 		break;
1976ae259a9cSChristoph Hellwig 	case IOMAP_UNWRITTEN:
1977ae259a9cSChristoph Hellwig 		/*
19783d7b6b21SAndreas Gruenbacher 		 * For unwritten regions, we always need to ensure that regions
19793d7b6b21SAndreas Gruenbacher 		 * in the block we are not writing to are zeroed. Mark the
19803d7b6b21SAndreas Gruenbacher 		 * buffer as new to ensure this.
1981ae259a9cSChristoph Hellwig 		 */
1982ae259a9cSChristoph Hellwig 		set_buffer_new(bh);
1983ae259a9cSChristoph Hellwig 		set_buffer_unwritten(bh);
1984df561f66SGustavo A. R. Silva 		fallthrough;
1985ae259a9cSChristoph Hellwig 	case IOMAP_MAPPED:
19863d7b6b21SAndreas Gruenbacher 		if ((iomap->flags & IOMAP_F_NEW) ||
19873d7b6b21SAndreas Gruenbacher 		    offset >= i_size_read(inode))
1988ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
198919fe5f64SAndreas Gruenbacher 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
199019fe5f64SAndreas Gruenbacher 				inode->i_blkbits;
1991ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
1992ae259a9cSChristoph Hellwig 		break;
1993ae259a9cSChristoph Hellwig 	}
1994ae259a9cSChristoph Hellwig }
1995ae259a9cSChristoph Hellwig 
1996d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
19976d49cc85SChristoph Hellwig 		get_block_t *get_block, const struct iomap *iomap)
19981da177e4SLinus Torvalds {
199909cbfeafSKirill A. Shutemov 	unsigned from = pos & (PAGE_SIZE - 1);
2000ebdec241SChristoph Hellwig 	unsigned to = from + len;
2001d1bd0b4eSMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
20021da177e4SLinus Torvalds 	unsigned block_start, block_end;
20031da177e4SLinus Torvalds 	sector_t block;
20041da177e4SLinus Torvalds 	int err = 0;
20051da177e4SLinus Torvalds 	unsigned blocksize, bbits;
20061da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
20071da177e4SLinus Torvalds 
2008d1bd0b4eSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
200909cbfeafSKirill A. Shutemov 	BUG_ON(from > PAGE_SIZE);
201009cbfeafSKirill A. Shutemov 	BUG_ON(to > PAGE_SIZE);
20111da177e4SLinus Torvalds 	BUG_ON(from > to);
20121da177e4SLinus Torvalds 
2013d1bd0b4eSMatthew Wilcox (Oracle) 	head = create_page_buffers(&folio->page, inode, 0);
201445bce8f3SLinus Torvalds 	blocksize = head->b_size;
201545bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
20161da177e4SLinus Torvalds 
2017d1bd0b4eSMatthew Wilcox (Oracle) 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
20181da177e4SLinus Torvalds 
20191da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
20201da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
20211da177e4SLinus Torvalds 		block_end = block_start + blocksize;
20221da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
2023d1bd0b4eSMatthew Wilcox (Oracle) 			if (folio_test_uptodate(folio)) {
20241da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
20251da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
20261da177e4SLinus Torvalds 			}
20271da177e4SLinus Torvalds 			continue;
20281da177e4SLinus Torvalds 		}
20291da177e4SLinus Torvalds 		if (buffer_new(bh))
20301da177e4SLinus Torvalds 			clear_buffer_new(bh);
20311da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2032b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
2033ae259a9cSChristoph Hellwig 			if (get_block) {
20341da177e4SLinus Torvalds 				err = get_block(inode, block, bh, 1);
20351da177e4SLinus Torvalds 				if (err)
2036f3ddbdc6SNick Piggin 					break;
2037ae259a9cSChristoph Hellwig 			} else {
2038ae259a9cSChristoph Hellwig 				iomap_to_bh(inode, block, bh, iomap);
2039ae259a9cSChristoph Hellwig 			}
2040ae259a9cSChristoph Hellwig 
20411da177e4SLinus Torvalds 			if (buffer_new(bh)) {
2042e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
2043d1bd0b4eSMatthew Wilcox (Oracle) 				if (folio_test_uptodate(folio)) {
2044637aff46SNick Piggin 					clear_buffer_new(bh);
20451da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
2046637aff46SNick Piggin 					mark_buffer_dirty(bh);
20471da177e4SLinus Torvalds 					continue;
20481da177e4SLinus Torvalds 				}
2049eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
2050d1bd0b4eSMatthew Wilcox (Oracle) 					folio_zero_segments(folio,
2051eebd2aa3SChristoph Lameter 						to, block_end,
2052eebd2aa3SChristoph Lameter 						block_start, from);
20531da177e4SLinus Torvalds 				continue;
20541da177e4SLinus Torvalds 			}
20551da177e4SLinus Torvalds 		}
2056d1bd0b4eSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio)) {
20571da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
20581da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
20591da177e4SLinus Torvalds 			continue;
20601da177e4SLinus Torvalds 		}
20611da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
206233a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
20631da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
2064e7ea1129SZhang Yi 			bh_read_nowait(bh, 0);
20651da177e4SLinus Torvalds 			*wait_bh++=bh;
20661da177e4SLinus Torvalds 		}
20671da177e4SLinus Torvalds 	}
20681da177e4SLinus Torvalds 	/*
20691da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
20701da177e4SLinus Torvalds 	 */
20711da177e4SLinus Torvalds 	while(wait_bh > wait) {
20721da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
20731da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
2074f3ddbdc6SNick Piggin 			err = -EIO;
20751da177e4SLinus Torvalds 	}
2076f9f07b6cSJan Kara 	if (unlikely(err))
2077d1bd0b4eSMatthew Wilcox (Oracle) 		page_zero_new_buffers(&folio->page, from, to);
20781da177e4SLinus Torvalds 	return err;
20791da177e4SLinus Torvalds }
2080ae259a9cSChristoph Hellwig 
2081ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2082ae259a9cSChristoph Hellwig 		get_block_t *get_block)
2083ae259a9cSChristoph Hellwig {
2084d1bd0b4eSMatthew Wilcox (Oracle) 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2085d1bd0b4eSMatthew Wilcox (Oracle) 				       NULL);
2086ae259a9cSChristoph Hellwig }
2087ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
20901da177e4SLinus Torvalds 		unsigned from, unsigned to)
20911da177e4SLinus Torvalds {
20921da177e4SLinus Torvalds 	unsigned block_start, block_end;
20931da177e4SLinus Torvalds 	int partial = 0;
20941da177e4SLinus Torvalds 	unsigned blocksize;
20951da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
20961da177e4SLinus Torvalds 
209745bce8f3SLinus Torvalds 	bh = head = page_buffers(page);
209845bce8f3SLinus Torvalds 	blocksize = bh->b_size;
20991da177e4SLinus Torvalds 
210045bce8f3SLinus Torvalds 	block_start = 0;
210145bce8f3SLinus Torvalds 	do {
21021da177e4SLinus Torvalds 		block_end = block_start + blocksize;
21031da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
21041da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
21051da177e4SLinus Torvalds 				partial = 1;
21061da177e4SLinus Torvalds 		} else {
21071da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
21081da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
21091da177e4SLinus Torvalds 		}
21104ebd3aecSYang Guo 		if (buffer_new(bh))
2111afddba49SNick Piggin 			clear_buffer_new(bh);
211245bce8f3SLinus Torvalds 
211345bce8f3SLinus Torvalds 		block_start = block_end;
211445bce8f3SLinus Torvalds 		bh = bh->b_this_page;
211545bce8f3SLinus Torvalds 	} while (bh != head);
21161da177e4SLinus Torvalds 
21171da177e4SLinus Torvalds 	/*
21181da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
21192c69e205SMatthew Wilcox (Oracle) 	 * uptodate then we can optimize away a bogus read_folio() for
21201da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
21211da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
21221da177e4SLinus Torvalds 	 */
21231da177e4SLinus Torvalds 	if (!partial)
21241da177e4SLinus Torvalds 		SetPageUptodate(page);
21251da177e4SLinus Torvalds 	return 0;
21261da177e4SLinus Torvalds }
21271da177e4SLinus Torvalds 
21281da177e4SLinus Torvalds /*
2129155130a4SChristoph Hellwig  * block_write_begin takes care of the basic task of block allocation and
2130155130a4SChristoph Hellwig  * bringing partial write blocks uptodate first.
2131155130a4SChristoph Hellwig  *
21327bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
2133afddba49SNick Piggin  */
2134155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2135b3992d1eSMatthew Wilcox (Oracle) 		struct page **pagep, get_block_t *get_block)
2136afddba49SNick Piggin {
213709cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
2138afddba49SNick Piggin 	struct page *page;
21396e1db88dSChristoph Hellwig 	int status;
2140afddba49SNick Piggin 
2141b7446e7cSMatthew Wilcox (Oracle) 	page = grab_cache_page_write_begin(mapping, index);
21426e1db88dSChristoph Hellwig 	if (!page)
21436e1db88dSChristoph Hellwig 		return -ENOMEM;
2144afddba49SNick Piggin 
21456e1db88dSChristoph Hellwig 	status = __block_write_begin(page, pos, len, get_block);
2146afddba49SNick Piggin 	if (unlikely(status)) {
2147afddba49SNick Piggin 		unlock_page(page);
214809cbfeafSKirill A. Shutemov 		put_page(page);
21496e1db88dSChristoph Hellwig 		page = NULL;
2150afddba49SNick Piggin 	}
2151afddba49SNick Piggin 
21526e1db88dSChristoph Hellwig 	*pagep = page;
2153afddba49SNick Piggin 	return status;
2154afddba49SNick Piggin }
2155afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2156afddba49SNick Piggin 
2157afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2158afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2159afddba49SNick Piggin 			struct page *page, void *fsdata)
2160afddba49SNick Piggin {
2161afddba49SNick Piggin 	struct inode *inode = mapping->host;
2162afddba49SNick Piggin 	unsigned start;
2163afddba49SNick Piggin 
216409cbfeafSKirill A. Shutemov 	start = pos & (PAGE_SIZE - 1);
2165afddba49SNick Piggin 
2166afddba49SNick Piggin 	if (unlikely(copied < len)) {
2167afddba49SNick Piggin 		/*
21682c69e205SMatthew Wilcox (Oracle) 		 * The buffers that were written will now be uptodate, so
21692c69e205SMatthew Wilcox (Oracle) 		 * we don't have to worry about a read_folio reading them
21702c69e205SMatthew Wilcox (Oracle) 		 * and overwriting a partial write. However if we have
21712c69e205SMatthew Wilcox (Oracle) 		 * encountered a short write and only partially written
21722c69e205SMatthew Wilcox (Oracle) 		 * into a buffer, it will not be marked uptodate, so a
21732c69e205SMatthew Wilcox (Oracle) 		 * read_folio might come in and destroy our partial write.
2174afddba49SNick Piggin 		 *
2175afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2176afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2177afddba49SNick Piggin 		 * caller to redo the whole thing.
2178afddba49SNick Piggin 		 */
2179afddba49SNick Piggin 		if (!PageUptodate(page))
2180afddba49SNick Piggin 			copied = 0;
2181afddba49SNick Piggin 
2182afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2183afddba49SNick Piggin 	}
2184afddba49SNick Piggin 	flush_dcache_page(page);
2185afddba49SNick Piggin 
2186afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2187afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2188afddba49SNick Piggin 
2189afddba49SNick Piggin 	return copied;
2190afddba49SNick Piggin }
2191afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2192afddba49SNick Piggin 
2193afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2194afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2195afddba49SNick Piggin 			struct page *page, void *fsdata)
2196afddba49SNick Piggin {
21978af54f29SChristoph Hellwig 	struct inode *inode = mapping->host;
21988af54f29SChristoph Hellwig 	loff_t old_size = inode->i_size;
21998af54f29SChristoph Hellwig 	bool i_size_changed = false;
22008af54f29SChristoph Hellwig 
2201afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
22028af54f29SChristoph Hellwig 
22038af54f29SChristoph Hellwig 	/*
22048af54f29SChristoph Hellwig 	 * No need to use i_size_read() here, the i_size cannot change under us
22058af54f29SChristoph Hellwig 	 * because we hold i_rwsem.
22068af54f29SChristoph Hellwig 	 *
22078af54f29SChristoph Hellwig 	 * But it's important to update i_size while still holding page lock:
22088af54f29SChristoph Hellwig 	 * page writeout could otherwise come in and zero beyond i_size.
22098af54f29SChristoph Hellwig 	 */
22108af54f29SChristoph Hellwig 	if (pos + copied > inode->i_size) {
22118af54f29SChristoph Hellwig 		i_size_write(inode, pos + copied);
22128af54f29SChristoph Hellwig 		i_size_changed = true;
22138af54f29SChristoph Hellwig 	}
22148af54f29SChristoph Hellwig 
22158af54f29SChristoph Hellwig 	unlock_page(page);
22167a77dad7SAndreas Gruenbacher 	put_page(page);
22178af54f29SChristoph Hellwig 
22188af54f29SChristoph Hellwig 	if (old_size < pos)
22198af54f29SChristoph Hellwig 		pagecache_isize_extended(inode, old_size, pos);
22208af54f29SChristoph Hellwig 	/*
22218af54f29SChristoph Hellwig 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
22228af54f29SChristoph Hellwig 	 * makes the holding time of page lock longer. Second, it forces lock
22238af54f29SChristoph Hellwig 	 * ordering of page lock and transaction start for journaling
22248af54f29SChristoph Hellwig 	 * filesystems.
22258af54f29SChristoph Hellwig 	 */
22268af54f29SChristoph Hellwig 	if (i_size_changed)
22278af54f29SChristoph Hellwig 		mark_inode_dirty(inode);
222826ddb1f4SAndreas Gruenbacher 	return copied;
2229afddba49SNick Piggin }
2230afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2231afddba49SNick Piggin 
2232afddba49SNick Piggin /*
22332e7e80f7SMatthew Wilcox (Oracle)  * block_is_partially_uptodate checks whether buffers within a folio are
22348ab22b9aSHisashi Hifumi  * uptodate or not.
22358ab22b9aSHisashi Hifumi  *
22362e7e80f7SMatthew Wilcox (Oracle)  * Returns true if all buffers which correspond to the specified part
22372e7e80f7SMatthew Wilcox (Oracle)  * of the folio are uptodate.
22388ab22b9aSHisashi Hifumi  */
22392e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
22408ab22b9aSHisashi Hifumi {
22418ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
22428ab22b9aSHisashi Hifumi 	unsigned to;
22438ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
22442e7e80f7SMatthew Wilcox (Oracle) 	bool ret = true;
22458ab22b9aSHisashi Hifumi 
22462e7e80f7SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
22472e7e80f7SMatthew Wilcox (Oracle) 	if (!head)
22482e7e80f7SMatthew Wilcox (Oracle) 		return false;
224945bce8f3SLinus Torvalds 	blocksize = head->b_size;
22502e7e80f7SMatthew Wilcox (Oracle) 	to = min_t(unsigned, folio_size(folio) - from, count);
22518ab22b9aSHisashi Hifumi 	to = from + to;
22522e7e80f7SMatthew Wilcox (Oracle) 	if (from < blocksize && to > folio_size(folio) - blocksize)
22532e7e80f7SMatthew Wilcox (Oracle) 		return false;
22548ab22b9aSHisashi Hifumi 
22558ab22b9aSHisashi Hifumi 	bh = head;
22568ab22b9aSHisashi Hifumi 	block_start = 0;
22578ab22b9aSHisashi Hifumi 	do {
22588ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
22598ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
22608ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
22612e7e80f7SMatthew Wilcox (Oracle) 				ret = false;
22628ab22b9aSHisashi Hifumi 				break;
22638ab22b9aSHisashi Hifumi 			}
22648ab22b9aSHisashi Hifumi 			if (block_end >= to)
22658ab22b9aSHisashi Hifumi 				break;
22668ab22b9aSHisashi Hifumi 		}
22678ab22b9aSHisashi Hifumi 		block_start = block_end;
22688ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
22698ab22b9aSHisashi Hifumi 	} while (bh != head);
22708ab22b9aSHisashi Hifumi 
22718ab22b9aSHisashi Hifumi 	return ret;
22728ab22b9aSHisashi Hifumi }
22738ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
22748ab22b9aSHisashi Hifumi 
22758ab22b9aSHisashi Hifumi /*
22762c69e205SMatthew Wilcox (Oracle)  * Generic "read_folio" function for block devices that have the normal
22771da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
22782c69e205SMatthew Wilcox (Oracle)  * Reads the folio asynchronously --- the unlock_buffer() and
22791da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
22802c69e205SMatthew Wilcox (Oracle)  * folio once IO has completed.
22811da177e4SLinus Torvalds  */
22822c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
22831da177e4SLinus Torvalds {
22842c69e205SMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
22851da177e4SLinus Torvalds 	sector_t iblock, lblock;
22861da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
228745bce8f3SLinus Torvalds 	unsigned int blocksize, bbits;
22881da177e4SLinus Torvalds 	int nr, i;
22891da177e4SLinus Torvalds 	int fully_mapped = 1;
2290b7a6eb22SMatthew Wilcox (Oracle) 	bool page_error = false;
22914fa512ceSEric Biggers 	loff_t limit = i_size_read(inode);
22924fa512ceSEric Biggers 
22934fa512ceSEric Biggers 	/* This is needed for ext4. */
22944fa512ceSEric Biggers 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
22954fa512ceSEric Biggers 		limit = inode->i_sb->s_maxbytes;
22961da177e4SLinus Torvalds 
22972c69e205SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
22982c69e205SMatthew Wilcox (Oracle) 
22992c69e205SMatthew Wilcox (Oracle) 	head = create_page_buffers(&folio->page, inode, 0);
230045bce8f3SLinus Torvalds 	blocksize = head->b_size;
230145bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
23021da177e4SLinus Torvalds 
23032c69e205SMatthew Wilcox (Oracle) 	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
23044fa512ceSEric Biggers 	lblock = (limit+blocksize-1) >> bbits;
23051da177e4SLinus Torvalds 	bh = head;
23061da177e4SLinus Torvalds 	nr = 0;
23071da177e4SLinus Torvalds 	i = 0;
23081da177e4SLinus Torvalds 
23091da177e4SLinus Torvalds 	do {
23101da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
23111da177e4SLinus Torvalds 			continue;
23121da177e4SLinus Torvalds 
23131da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2314c64610baSAndrew Morton 			int err = 0;
2315c64610baSAndrew Morton 
23161da177e4SLinus Torvalds 			fully_mapped = 0;
23171da177e4SLinus Torvalds 			if (iblock < lblock) {
2318b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2319c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2320b7a6eb22SMatthew Wilcox (Oracle) 				if (err) {
23212c69e205SMatthew Wilcox (Oracle) 					folio_set_error(folio);
2322b7a6eb22SMatthew Wilcox (Oracle) 					page_error = true;
2323b7a6eb22SMatthew Wilcox (Oracle) 				}
23241da177e4SLinus Torvalds 			}
23251da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
23262c69e205SMatthew Wilcox (Oracle) 				folio_zero_range(folio, i * blocksize,
23272c69e205SMatthew Wilcox (Oracle) 						blocksize);
2328c64610baSAndrew Morton 				if (!err)
23291da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
23301da177e4SLinus Torvalds 				continue;
23311da177e4SLinus Torvalds 			}
23321da177e4SLinus Torvalds 			/*
23331da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
23341da177e4SLinus Torvalds 			 * synchronously
23351da177e4SLinus Torvalds 			 */
23361da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
23371da177e4SLinus Torvalds 				continue;
23381da177e4SLinus Torvalds 		}
23391da177e4SLinus Torvalds 		arr[nr++] = bh;
23401da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
23411da177e4SLinus Torvalds 
23421da177e4SLinus Torvalds 	if (fully_mapped)
23432c69e205SMatthew Wilcox (Oracle) 		folio_set_mappedtodisk(folio);
23441da177e4SLinus Torvalds 
23451da177e4SLinus Torvalds 	if (!nr) {
23461da177e4SLinus Torvalds 		/*
23472c69e205SMatthew Wilcox (Oracle) 		 * All buffers are uptodate - we can set the folio uptodate
23481da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
23491da177e4SLinus Torvalds 		 */
2350b7a6eb22SMatthew Wilcox (Oracle) 		if (!page_error)
23512c69e205SMatthew Wilcox (Oracle) 			folio_mark_uptodate(folio);
23522c69e205SMatthew Wilcox (Oracle) 		folio_unlock(folio);
23531da177e4SLinus Torvalds 		return 0;
23541da177e4SLinus Torvalds 	}
23551da177e4SLinus Torvalds 
23561da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
23571da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
23581da177e4SLinus Torvalds 		bh = arr[i];
23591da177e4SLinus Torvalds 		lock_buffer(bh);
23601da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
23611da177e4SLinus Torvalds 	}
23621da177e4SLinus Torvalds 
23631da177e4SLinus Torvalds 	/*
23641da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
23651da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
23661da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
23671da177e4SLinus Torvalds 	 */
23681da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
23691da177e4SLinus Torvalds 		bh = arr[i];
23701da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
23711da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
23721da177e4SLinus Torvalds 		else
23731420c4a5SBart Van Assche 			submit_bh(REQ_OP_READ, bh);
23741da177e4SLinus Torvalds 	}
23751da177e4SLinus Torvalds 	return 0;
23761da177e4SLinus Torvalds }
23772c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
23781da177e4SLinus Torvalds 
23791da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
238089e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
23811da177e4SLinus Torvalds  * deal with the hole.
23821da177e4SLinus Torvalds  */
238389e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
23841da177e4SLinus Torvalds {
23851da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
238653b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
23871da177e4SLinus Torvalds 	struct page *page;
23881468c6f4SAlexander Potapenko 	void *fsdata = NULL;
23891da177e4SLinus Torvalds 	int err;
23901da177e4SLinus Torvalds 
2391c08d3b0eSnpiggin@suse.de 	err = inode_newsize_ok(inode, size);
2392c08d3b0eSnpiggin@suse.de 	if (err)
23931da177e4SLinus Torvalds 		goto out;
23941da177e4SLinus Torvalds 
239553b524b8SMatthew Wilcox (Oracle) 	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
239689e10787SNick Piggin 	if (err)
239705eb0b51SOGAWA Hirofumi 		goto out;
239805eb0b51SOGAWA Hirofumi 
239953b524b8SMatthew Wilcox (Oracle) 	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
240089e10787SNick Piggin 	BUG_ON(err > 0);
240105eb0b51SOGAWA Hirofumi 
240205eb0b51SOGAWA Hirofumi out:
240305eb0b51SOGAWA Hirofumi 	return err;
240405eb0b51SOGAWA Hirofumi }
24051fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
240605eb0b51SOGAWA Hirofumi 
2407f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
240889e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
240905eb0b51SOGAWA Hirofumi {
241089e10787SNick Piggin 	struct inode *inode = mapping->host;
241153b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
241293407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
241389e10787SNick Piggin 	struct page *page;
24141468c6f4SAlexander Potapenko 	void *fsdata = NULL;
241589e10787SNick Piggin 	pgoff_t index, curidx;
241689e10787SNick Piggin 	loff_t curpos;
241789e10787SNick Piggin 	unsigned zerofrom, offset, len;
241889e10787SNick Piggin 	int err = 0;
241905eb0b51SOGAWA Hirofumi 
242009cbfeafSKirill A. Shutemov 	index = pos >> PAGE_SHIFT;
242109cbfeafSKirill A. Shutemov 	offset = pos & ~PAGE_MASK;
242289e10787SNick Piggin 
242309cbfeafSKirill A. Shutemov 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
242409cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
242589e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
242689e10787SNick Piggin 			*bytes |= (blocksize-1);
242789e10787SNick Piggin 			(*bytes)++;
242889e10787SNick Piggin 		}
242909cbfeafSKirill A. Shutemov 		len = PAGE_SIZE - zerofrom;
243089e10787SNick Piggin 
243153b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
243289e10787SNick Piggin 					    &page, &fsdata);
243389e10787SNick Piggin 		if (err)
243489e10787SNick Piggin 			goto out;
2435eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
243653b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
243789e10787SNick Piggin 						page, fsdata);
243889e10787SNick Piggin 		if (err < 0)
243989e10787SNick Piggin 			goto out;
244089e10787SNick Piggin 		BUG_ON(err != len);
244189e10787SNick Piggin 		err = 0;
2442061e9746SOGAWA Hirofumi 
2443061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
2444c2ca0fcdSMikulas Patocka 
244508d405c8SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
2446c2ca0fcdSMikulas Patocka 			err = -EINTR;
2447c2ca0fcdSMikulas Patocka 			goto out;
2448c2ca0fcdSMikulas Patocka 		}
244989e10787SNick Piggin 	}
245089e10787SNick Piggin 
245189e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
245289e10787SNick Piggin 	if (index == curidx) {
245309cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
245489e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
245589e10787SNick Piggin 		if (offset <= zerofrom) {
245689e10787SNick Piggin 			goto out;
245789e10787SNick Piggin 		}
245889e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
245989e10787SNick Piggin 			*bytes |= (blocksize-1);
246089e10787SNick Piggin 			(*bytes)++;
246189e10787SNick Piggin 		}
246289e10787SNick Piggin 		len = offset - zerofrom;
246389e10787SNick Piggin 
246453b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
246589e10787SNick Piggin 					    &page, &fsdata);
246689e10787SNick Piggin 		if (err)
246789e10787SNick Piggin 			goto out;
2468eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
246953b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
247089e10787SNick Piggin 						page, fsdata);
247189e10787SNick Piggin 		if (err < 0)
247289e10787SNick Piggin 			goto out;
247389e10787SNick Piggin 		BUG_ON(err != len);
247489e10787SNick Piggin 		err = 0;
247589e10787SNick Piggin 	}
247689e10787SNick Piggin out:
247789e10787SNick Piggin 	return err;
24781da177e4SLinus Torvalds }
24791da177e4SLinus Torvalds 
24801da177e4SLinus Torvalds /*
24811da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
24821da177e4SLinus Torvalds  * We may have to extend the file.
24831da177e4SLinus Torvalds  */
2484282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2485be3bbbc5SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
248689e10787SNick Piggin 			struct page **pagep, void **fsdata,
248789e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
24881da177e4SLinus Torvalds {
24891da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
249093407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
249193407472SFabian Frederick 	unsigned int zerofrom;
249289e10787SNick Piggin 	int err;
24931da177e4SLinus Torvalds 
249489e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
249589e10787SNick Piggin 	if (err)
2496155130a4SChristoph Hellwig 		return err;
24971da177e4SLinus Torvalds 
249809cbfeafSKirill A. Shutemov 	zerofrom = *bytes & ~PAGE_MASK;
249989e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
25001da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
25011da177e4SLinus Torvalds 		(*bytes)++;
25021da177e4SLinus Torvalds 	}
25031da177e4SLinus Torvalds 
2504b3992d1eSMatthew Wilcox (Oracle) 	return block_write_begin(mapping, pos, len, pagep, get_block);
25051da177e4SLinus Torvalds }
25061fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
25071da177e4SLinus Torvalds 
25081da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
25091da177e4SLinus Torvalds {
25101da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
25111da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
25121da177e4SLinus Torvalds 	return 0;
25131da177e4SLinus Torvalds }
25141fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
25151da177e4SLinus Torvalds 
251654171690SDavid Chinner /*
251754171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
251854171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
251954171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
252054171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
252154171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
252254171690SDavid Chinner  * support these features.
252354171690SDavid Chinner  *
252454171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
252554171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
25267bb46a67Snpiggin@suse.de  * truncate writes the inode size before removing pages, once we have the
252754171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
252854171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
252954171690SDavid Chinner  * unlock the page.
2530ea13a864SJan Kara  *
253114da9200SJan Kara  * Direct callers of this function should protect against filesystem freezing
25325c500029SRoss Zwisler  * using sb_start_pagefault() - sb_end_pagefault() functions.
253354171690SDavid Chinner  */
25345c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
253554171690SDavid Chinner 			 get_block_t get_block)
253654171690SDavid Chinner {
2537c2ec175cSNick Piggin 	struct page *page = vmf->page;
2538496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
253954171690SDavid Chinner 	unsigned long end;
254054171690SDavid Chinner 	loff_t size;
254124da4fabSJan Kara 	int ret;
254254171690SDavid Chinner 
254354171690SDavid Chinner 	lock_page(page);
254454171690SDavid Chinner 	size = i_size_read(inode);
254554171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
254618336338SNick Piggin 	    (page_offset(page) > size)) {
254724da4fabSJan Kara 		/* We overload EFAULT to mean page got truncated */
254824da4fabSJan Kara 		ret = -EFAULT;
254924da4fabSJan Kara 		goto out_unlock;
255054171690SDavid Chinner 	}
255154171690SDavid Chinner 
255254171690SDavid Chinner 	/* page is wholly or partially inside EOF */
255309cbfeafSKirill A. Shutemov 	if (((page->index + 1) << PAGE_SHIFT) > size)
255409cbfeafSKirill A. Shutemov 		end = size & ~PAGE_MASK;
255554171690SDavid Chinner 	else
255609cbfeafSKirill A. Shutemov 		end = PAGE_SIZE;
255754171690SDavid Chinner 
2558ebdec241SChristoph Hellwig 	ret = __block_write_begin(page, 0, end, get_block);
255954171690SDavid Chinner 	if (!ret)
256054171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
256154171690SDavid Chinner 
256224da4fabSJan Kara 	if (unlikely(ret < 0))
256324da4fabSJan Kara 		goto out_unlock;
2564ea13a864SJan Kara 	set_page_dirty(page);
25651d1d1a76SDarrick J. Wong 	wait_for_stable_page(page);
256624da4fabSJan Kara 	return 0;
256724da4fabSJan Kara out_unlock:
2568b827e496SNick Piggin 	unlock_page(page);
256954171690SDavid Chinner 	return ret;
257054171690SDavid Chinner }
25711fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
25721da177e4SLinus Torvalds 
25731da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
25741da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
25751da177e4SLinus Torvalds {
257609cbfeafSKirill A. Shutemov 	pgoff_t index = from >> PAGE_SHIFT;
257709cbfeafSKirill A. Shutemov 	unsigned offset = from & (PAGE_SIZE-1);
25781da177e4SLinus Torvalds 	unsigned blocksize;
257954b21a79SAndrew Morton 	sector_t iblock;
25801da177e4SLinus Torvalds 	unsigned length, pos;
25811da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
25821da177e4SLinus Torvalds 	struct page *page;
25831da177e4SLinus Torvalds 	struct buffer_head *bh;
25841da177e4SLinus Torvalds 	int err;
25851da177e4SLinus Torvalds 
258693407472SFabian Frederick 	blocksize = i_blocksize(inode);
25871da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
25901da177e4SLinus Torvalds 	if (!length)
25911da177e4SLinus Torvalds 		return 0;
25921da177e4SLinus Torvalds 
25931da177e4SLinus Torvalds 	length = blocksize - length;
259409cbfeafSKirill A. Shutemov 	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
25951da177e4SLinus Torvalds 
25961da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
25971da177e4SLinus Torvalds 	err = -ENOMEM;
25981da177e4SLinus Torvalds 	if (!page)
25991da177e4SLinus Torvalds 		goto out;
26001da177e4SLinus Torvalds 
26011da177e4SLinus Torvalds 	if (!page_has_buffers(page))
26021da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
26031da177e4SLinus Torvalds 
26041da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
26051da177e4SLinus Torvalds 	bh = page_buffers(page);
26061da177e4SLinus Torvalds 	pos = blocksize;
26071da177e4SLinus Torvalds 	while (offset >= pos) {
26081da177e4SLinus Torvalds 		bh = bh->b_this_page;
26091da177e4SLinus Torvalds 		iblock++;
26101da177e4SLinus Torvalds 		pos += blocksize;
26111da177e4SLinus Torvalds 	}
26121da177e4SLinus Torvalds 
26131da177e4SLinus Torvalds 	err = 0;
26141da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2615b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
26161da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
26171da177e4SLinus Torvalds 		if (err)
26181da177e4SLinus Torvalds 			goto unlock;
26191da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
26201da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
26211da177e4SLinus Torvalds 			goto unlock;
26221da177e4SLinus Torvalds 	}
26231da177e4SLinus Torvalds 
26241da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
26251da177e4SLinus Torvalds 	if (PageUptodate(page))
26261da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
26271da177e4SLinus Torvalds 
262833a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2629e7ea1129SZhang Yi 		err = bh_read(bh, 0);
26301da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
2631e7ea1129SZhang Yi 		if (err < 0)
26321da177e4SLinus Torvalds 			goto unlock;
26331da177e4SLinus Torvalds 	}
26341da177e4SLinus Torvalds 
2635eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
26361da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
26371da177e4SLinus Torvalds 	err = 0;
26381da177e4SLinus Torvalds 
26391da177e4SLinus Torvalds unlock:
26401da177e4SLinus Torvalds 	unlock_page(page);
264109cbfeafSKirill A. Shutemov 	put_page(page);
26421da177e4SLinus Torvalds out:
26431da177e4SLinus Torvalds 	return err;
26441da177e4SLinus Torvalds }
26451fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
26461da177e4SLinus Torvalds 
26471da177e4SLinus Torvalds /*
26481da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
26491da177e4SLinus Torvalds  */
26501b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block,
26511b938c08SMatthew Wilcox 			struct writeback_control *wbc)
26521da177e4SLinus Torvalds {
26531da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
26541da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
265509cbfeafSKirill A. Shutemov 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
26561da177e4SLinus Torvalds 	unsigned offset;
26571da177e4SLinus Torvalds 
26581da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
26591da177e4SLinus Torvalds 	if (page->index < end_index)
266035c80d5fSChris Mason 		return __block_write_full_page(inode, page, get_block, wbc,
26611b938c08SMatthew Wilcox 					       end_buffer_async_write);
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
266409cbfeafSKirill A. Shutemov 	offset = i_size & (PAGE_SIZE-1);
26651da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26661da177e4SLinus Torvalds 		unlock_page(page);
26671da177e4SLinus Torvalds 		return 0; /* don't care */
26681da177e4SLinus Torvalds 	}
26691da177e4SLinus Torvalds 
26701da177e4SLinus Torvalds 	/*
26711da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26722a61aa40SAdam Buchbinder 	 * writepage invocation because it may be mmapped.  "A file is mapped
26731da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26741da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26751da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26761da177e4SLinus Torvalds 	 */
267709cbfeafSKirill A. Shutemov 	zero_user_segment(page, offset, PAGE_SIZE);
26781b938c08SMatthew Wilcox 	return __block_write_full_page(inode, page, get_block, wbc,
267935c80d5fSChris Mason 							end_buffer_async_write);
268035c80d5fSChris Mason }
26811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page);
268235c80d5fSChris Mason 
26831da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
26841da177e4SLinus Torvalds 			    get_block_t *get_block)
26851da177e4SLinus Torvalds {
26861da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26872a527d68SAlexander Potapenko 	struct buffer_head tmp = {
26882a527d68SAlexander Potapenko 		.b_size = i_blocksize(inode),
26892a527d68SAlexander Potapenko 	};
26902a527d68SAlexander Potapenko 
26911da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
26921da177e4SLinus Torvalds 	return tmp.b_blocknr;
26931da177e4SLinus Torvalds }
26941fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
26951da177e4SLinus Torvalds 
26964246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
26971da177e4SLinus Torvalds {
26981da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
26991da177e4SLinus Torvalds 
2700b7c44ed9SJens Axboe 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
270108bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
270208bafc03SKeith Mannthey 
27034e4cbee9SChristoph Hellwig 	bh->b_end_io(bh, !bio->bi_status);
27041da177e4SLinus Torvalds 	bio_put(bio);
27051da177e4SLinus Torvalds }
27061da177e4SLinus Torvalds 
27075bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
27081420c4a5SBart Van Assche 			  struct writeback_control *wbc)
27091da177e4SLinus Torvalds {
27101420c4a5SBart Van Assche 	const enum req_op op = opf & REQ_OP_MASK;
27111da177e4SLinus Torvalds 	struct bio *bio;
27121da177e4SLinus Torvalds 
27131da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
27141da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
27151da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
27168fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_delay(bh));
27178fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_unwritten(bh));
27181da177e4SLinus Torvalds 
271948fd4f93SJens Axboe 	/*
272048fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
27211da177e4SLinus Torvalds 	 */
27222a222ca9SMike Christie 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
27231da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
27241da177e4SLinus Torvalds 
272507888c66SChristoph Hellwig 	if (buffer_meta(bh))
27261420c4a5SBart Van Assche 		opf |= REQ_META;
272707888c66SChristoph Hellwig 	if (buffer_prio(bh))
27281420c4a5SBart Van Assche 		opf |= REQ_PRIO;
272907888c66SChristoph Hellwig 
27301420c4a5SBart Van Assche 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
27311da177e4SLinus Torvalds 
27324f74d15fSEric Biggers 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
27334f74d15fSEric Biggers 
27344f024f37SKent Overstreet 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
27351da177e4SLinus Torvalds 
27366cf66b4cSKent Overstreet 	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
27376cf66b4cSKent Overstreet 	BUG_ON(bio->bi_iter.bi_size != bh->b_size);
27381da177e4SLinus Torvalds 
27391da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
27401da177e4SLinus Torvalds 	bio->bi_private = bh;
27411da177e4SLinus Torvalds 
274283c9c547SMing Lei 	/* Take care of bh's that straddle the end of the device */
274383c9c547SMing Lei 	guard_bio_eod(bio);
274483c9c547SMing Lei 
2745fd42df30SDennis Zhou 	if (wbc) {
2746fd42df30SDennis Zhou 		wbc_init_bio(wbc, bio);
274734e51a5eSTejun Heo 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2748fd42df30SDennis Zhou 	}
2749fd42df30SDennis Zhou 
27504e49ea4aSMike Christie 	submit_bio(bio);
27511da177e4SLinus Torvalds }
2752bafc0dbaSTejun Heo 
27535bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh)
275471368511SDarrick J. Wong {
27555bdf402aSRitesh Harjani (IBM) 	submit_bh_wbc(opf, bh, NULL);
275671368511SDarrick J. Wong }
27571fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
27581da177e4SLinus Torvalds 
27593ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
27609cb569d6SChristoph Hellwig {
27619cb569d6SChristoph Hellwig 	lock_buffer(bh);
27629cb569d6SChristoph Hellwig 	if (!test_clear_buffer_dirty(bh)) {
27639cb569d6SChristoph Hellwig 		unlock_buffer(bh);
27649cb569d6SChristoph Hellwig 		return;
27659cb569d6SChristoph Hellwig 	}
27669cb569d6SChristoph Hellwig 	bh->b_end_io = end_buffer_write_sync;
27679cb569d6SChristoph Hellwig 	get_bh(bh);
27681420c4a5SBart Van Assche 	submit_bh(REQ_OP_WRITE | op_flags, bh);
27699cb569d6SChristoph Hellwig }
27709cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
27719cb569d6SChristoph Hellwig 
27721da177e4SLinus Torvalds /*
27731da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
27741da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
27751da177e4SLinus Torvalds  * the buffer_head.
27761da177e4SLinus Torvalds  */
27773ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
27781da177e4SLinus Torvalds {
27791da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
27801da177e4SLinus Torvalds 	lock_buffer(bh);
27811da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
2782377254b2SXianting Tian 		/*
2783377254b2SXianting Tian 		 * The bh should be mapped, but it might not be if the
2784377254b2SXianting Tian 		 * device was hot-removed. Not much we can do but fail the I/O.
2785377254b2SXianting Tian 		 */
2786377254b2SXianting Tian 		if (!buffer_mapped(bh)) {
2787377254b2SXianting Tian 			unlock_buffer(bh);
2788377254b2SXianting Tian 			return -EIO;
2789377254b2SXianting Tian 		}
2790377254b2SXianting Tian 
27911da177e4SLinus Torvalds 		get_bh(bh);
27921da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
2793ab620620SRitesh Harjani (IBM) 		submit_bh(REQ_OP_WRITE | op_flags, bh);
27941da177e4SLinus Torvalds 		wait_on_buffer(bh);
2795ab620620SRitesh Harjani (IBM) 		if (!buffer_uptodate(bh))
2796ab620620SRitesh Harjani (IBM) 			return -EIO;
27971da177e4SLinus Torvalds 	} else {
27981da177e4SLinus Torvalds 		unlock_buffer(bh);
27991da177e4SLinus Torvalds 	}
2800ab620620SRitesh Harjani (IBM) 	return 0;
28011da177e4SLinus Torvalds }
280287e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
280387e99511SChristoph Hellwig 
280487e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
280587e99511SChristoph Hellwig {
280670fd7614SChristoph Hellwig 	return __sync_dirty_buffer(bh, REQ_SYNC);
280787e99511SChristoph Hellwig }
28081fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28091da177e4SLinus Torvalds 
28101da177e4SLinus Torvalds /*
281168189fefSMatthew Wilcox (Oracle)  * try_to_free_buffers() checks if all the buffers on this particular folio
28121da177e4SLinus Torvalds  * are unused, and releases them if so.
28131da177e4SLinus Torvalds  *
28141da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
281568189fefSMatthew Wilcox (Oracle)  * locking the folio or by holding its mapping's private_lock.
28161da177e4SLinus Torvalds  *
281768189fefSMatthew Wilcox (Oracle)  * If the folio is dirty but all the buffers are clean then we need to
281868189fefSMatthew Wilcox (Oracle)  * be sure to mark the folio clean as well.  This is because the folio
28191da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
282068189fefSMatthew Wilcox (Oracle)  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
28211da177e4SLinus Torvalds  * filesystem data on the same device.
28221da177e4SLinus Torvalds  *
282368189fefSMatthew Wilcox (Oracle)  * The same applies to regular filesystem folios: if all the buffers are
282468189fefSMatthew Wilcox (Oracle)  * clean then we set the folio clean and proceed.  To do that, we require
2825e621900aSMatthew Wilcox (Oracle)  * total exclusion from block_dirty_folio().  That is obtained with
28261da177e4SLinus Torvalds  * private_lock.
28271da177e4SLinus Torvalds  *
28281da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
28291da177e4SLinus Torvalds  */
28301da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28311da177e4SLinus Torvalds {
28321da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
28331da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28341da177e4SLinus Torvalds }
28351da177e4SLinus Torvalds 
283664394763SMatthew Wilcox (Oracle) static bool
283764394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
28381da177e4SLinus Torvalds {
283964394763SMatthew Wilcox (Oracle) 	struct buffer_head *head = folio_buffers(folio);
28401da177e4SLinus Torvalds 	struct buffer_head *bh;
28411da177e4SLinus Torvalds 
28421da177e4SLinus Torvalds 	bh = head;
28431da177e4SLinus Torvalds 	do {
28441da177e4SLinus Torvalds 		if (buffer_busy(bh))
28451da177e4SLinus Torvalds 			goto failed;
28461da177e4SLinus Torvalds 		bh = bh->b_this_page;
28471da177e4SLinus Torvalds 	} while (bh != head);
28481da177e4SLinus Torvalds 
28491da177e4SLinus Torvalds 	do {
28501da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
28511da177e4SLinus Torvalds 
2852535ee2fbSJan Kara 		if (bh->b_assoc_map)
28531da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
28541da177e4SLinus Torvalds 		bh = next;
28551da177e4SLinus Torvalds 	} while (bh != head);
28561da177e4SLinus Torvalds 	*buffers_to_free = head;
285764394763SMatthew Wilcox (Oracle) 	folio_detach_private(folio);
285864394763SMatthew Wilcox (Oracle) 	return true;
28591da177e4SLinus Torvalds failed:
286064394763SMatthew Wilcox (Oracle) 	return false;
28611da177e4SLinus Torvalds }
28621da177e4SLinus Torvalds 
286368189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
28641da177e4SLinus Torvalds {
286568189fefSMatthew Wilcox (Oracle) 	struct address_space * const mapping = folio->mapping;
28661da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
286768189fefSMatthew Wilcox (Oracle) 	bool ret = 0;
28681da177e4SLinus Torvalds 
286968189fefSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
287068189fefSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
287168189fefSMatthew Wilcox (Oracle) 		return false;
28721da177e4SLinus Torvalds 
28731da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
287464394763SMatthew Wilcox (Oracle) 		ret = drop_buffers(folio, &buffers_to_free);
28751da177e4SLinus Torvalds 		goto out;
28761da177e4SLinus Torvalds 	}
28771da177e4SLinus Torvalds 
28781da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
287964394763SMatthew Wilcox (Oracle) 	ret = drop_buffers(folio, &buffers_to_free);
2880ecdfc978SLinus Torvalds 
2881ecdfc978SLinus Torvalds 	/*
2882ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
288368189fefSMatthew Wilcox (Oracle) 	 * then we can have clean buffers against a dirty folio.  We
288468189fefSMatthew Wilcox (Oracle) 	 * clean the folio here; otherwise the VM will never notice
2885ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2886ecdfc978SLinus Torvalds 	 *
2887ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
288868189fefSMatthew Wilcox (Oracle) 	 * the folio's buffers clean.  We discover that here and clean
288968189fefSMatthew Wilcox (Oracle) 	 * the folio also.
289087df7241SNick Piggin 	 *
289187df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
2892e621900aSMatthew Wilcox (Oracle) 	 * to synchronise against block_dirty_folio and prevent the
289387df7241SNick Piggin 	 * dirty bit from being lost.
2894ecdfc978SLinus Torvalds 	 */
289511f81becSTejun Heo 	if (ret)
289668189fefSMatthew Wilcox (Oracle) 		folio_cancel_dirty(folio);
289787df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
28981da177e4SLinus Torvalds out:
28991da177e4SLinus Torvalds 	if (buffers_to_free) {
29001da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
29011da177e4SLinus Torvalds 
29021da177e4SLinus Torvalds 		do {
29031da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
29041da177e4SLinus Torvalds 			free_buffer_head(bh);
29051da177e4SLinus Torvalds 			bh = next;
29061da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
29071da177e4SLinus Torvalds 	}
29081da177e4SLinus Torvalds 	return ret;
29091da177e4SLinus Torvalds }
29101da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29111da177e4SLinus Torvalds 
29121da177e4SLinus Torvalds /*
29131da177e4SLinus Torvalds  * Buffer-head allocation
29141da177e4SLinus Torvalds  */
2915a0a9b043SShai Fultheim static struct kmem_cache *bh_cachep __read_mostly;
29161da177e4SLinus Torvalds 
29171da177e4SLinus Torvalds /*
29181da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
29191da177e4SLinus Torvalds  * stripping them in writeback.
29201da177e4SLinus Torvalds  */
292143be594aSZhang Yanfei static unsigned long max_buffer_heads;
29221da177e4SLinus Torvalds 
29231da177e4SLinus Torvalds int buffer_heads_over_limit;
29241da177e4SLinus Torvalds 
29251da177e4SLinus Torvalds struct bh_accounting {
29261da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
29271da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
29281da177e4SLinus Torvalds };
29291da177e4SLinus Torvalds 
29301da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
29311da177e4SLinus Torvalds 
29321da177e4SLinus Torvalds static void recalc_bh_state(void)
29331da177e4SLinus Torvalds {
29341da177e4SLinus Torvalds 	int i;
29351da177e4SLinus Torvalds 	int tot = 0;
29361da177e4SLinus Torvalds 
2937ee1be862SChristoph Lameter 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
29381da177e4SLinus Torvalds 		return;
2939c7b92516SChristoph Lameter 	__this_cpu_write(bh_accounting.ratelimit, 0);
29408a143426SEric Dumazet 	for_each_online_cpu(i)
29411da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
29421da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
29431da177e4SLinus Torvalds }
29441da177e4SLinus Torvalds 
2945dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
29461da177e4SLinus Torvalds {
2947019b4d12SRichard Kennedy 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
29481da177e4SLinus Torvalds 	if (ret) {
2949a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2950f1e67e35SThomas Gleixner 		spin_lock_init(&ret->b_uptodate_lock);
2951c7b92516SChristoph Lameter 		preempt_disable();
2952c7b92516SChristoph Lameter 		__this_cpu_inc(bh_accounting.nr);
29531da177e4SLinus Torvalds 		recalc_bh_state();
2954c7b92516SChristoph Lameter 		preempt_enable();
29551da177e4SLinus Torvalds 	}
29561da177e4SLinus Torvalds 	return ret;
29571da177e4SLinus Torvalds }
29581da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
29591da177e4SLinus Torvalds 
29601da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
29611da177e4SLinus Torvalds {
29621da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
29631da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
2964c7b92516SChristoph Lameter 	preempt_disable();
2965c7b92516SChristoph Lameter 	__this_cpu_dec(bh_accounting.nr);
29661da177e4SLinus Torvalds 	recalc_bh_state();
2967c7b92516SChristoph Lameter 	preempt_enable();
29681da177e4SLinus Torvalds }
29691da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
29701da177e4SLinus Torvalds 
2971fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
29721da177e4SLinus Torvalds {
29731da177e4SLinus Torvalds 	int i;
29741da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
29751da177e4SLinus Torvalds 
29761da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
29771da177e4SLinus Torvalds 		brelse(b->bhs[i]);
29781da177e4SLinus Torvalds 		b->bhs[i] = NULL;
29791da177e4SLinus Torvalds 	}
2980c7b92516SChristoph Lameter 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
29818a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
2982fc4d24c9SSebastian Andrzej Siewior 	return 0;
29831da177e4SLinus Torvalds }
29841da177e4SLinus Torvalds 
2985389d1b08SAneesh Kumar K.V /**
2986a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
2987389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
2988389d1b08SAneesh Kumar K.V  *
2989389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
2990389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
2991389d1b08SAneesh Kumar K.V  */
2992389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
2993389d1b08SAneesh Kumar K.V {
2994389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
2995389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
2996389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
2997389d1b08SAneesh Kumar K.V 			return 0;
2998389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
2999389d1b08SAneesh Kumar K.V 	}
3000389d1b08SAneesh Kumar K.V 	return 1;
3001389d1b08SAneesh Kumar K.V }
3002389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3003389d1b08SAneesh Kumar K.V 
3004389d1b08SAneesh Kumar K.V /**
3005fdee117eSZhang Yi  * __bh_read - Submit read for a locked buffer
3006389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3007fdee117eSZhang Yi  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3008fdee117eSZhang Yi  * @wait: wait until reading finish
3009389d1b08SAneesh Kumar K.V  *
3010fdee117eSZhang Yi  * Returns zero on success or don't wait, and -EIO on error.
3011389d1b08SAneesh Kumar K.V  */
3012fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3013389d1b08SAneesh Kumar K.V {
3014fdee117eSZhang Yi 	int ret = 0;
3015389d1b08SAneesh Kumar K.V 
3016fdee117eSZhang Yi 	BUG_ON(!buffer_locked(bh));
3017389d1b08SAneesh Kumar K.V 
3018389d1b08SAneesh Kumar K.V 	get_bh(bh);
3019389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3020fdee117eSZhang Yi 	submit_bh(REQ_OP_READ | op_flags, bh);
3021fdee117eSZhang Yi 	if (wait) {
3022389d1b08SAneesh Kumar K.V 		wait_on_buffer(bh);
3023fdee117eSZhang Yi 		if (!buffer_uptodate(bh))
3024fdee117eSZhang Yi 			ret = -EIO;
3025389d1b08SAneesh Kumar K.V 	}
3026fdee117eSZhang Yi 	return ret;
3027fdee117eSZhang Yi }
3028fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read);
3029fdee117eSZhang Yi 
3030fdee117eSZhang Yi /**
3031fdee117eSZhang Yi  * __bh_read_batch - Submit read for a batch of unlocked buffers
3032fdee117eSZhang Yi  * @nr: entry number of the buffer batch
3033fdee117eSZhang Yi  * @bhs: a batch of struct buffer_head
3034fdee117eSZhang Yi  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3035fdee117eSZhang Yi  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3036fdee117eSZhang Yi  *              buffer that cannot lock.
3037fdee117eSZhang Yi  *
3038fdee117eSZhang Yi  * Returns zero on success or don't wait, and -EIO on error.
3039fdee117eSZhang Yi  */
3040fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
3041fdee117eSZhang Yi 		     blk_opf_t op_flags, bool force_lock)
3042fdee117eSZhang Yi {
3043fdee117eSZhang Yi 	int i;
3044fdee117eSZhang Yi 
3045fdee117eSZhang Yi 	for (i = 0; i < nr; i++) {
3046fdee117eSZhang Yi 		struct buffer_head *bh = bhs[i];
3047fdee117eSZhang Yi 
3048fdee117eSZhang Yi 		if (buffer_uptodate(bh))
3049fdee117eSZhang Yi 			continue;
3050fdee117eSZhang Yi 
3051fdee117eSZhang Yi 		if (force_lock)
3052fdee117eSZhang Yi 			lock_buffer(bh);
3053fdee117eSZhang Yi 		else
3054fdee117eSZhang Yi 			if (!trylock_buffer(bh))
3055fdee117eSZhang Yi 				continue;
3056fdee117eSZhang Yi 
3057fdee117eSZhang Yi 		if (buffer_uptodate(bh)) {
3058fdee117eSZhang Yi 			unlock_buffer(bh);
3059fdee117eSZhang Yi 			continue;
3060fdee117eSZhang Yi 		}
3061fdee117eSZhang Yi 
3062fdee117eSZhang Yi 		bh->b_end_io = end_buffer_read_sync;
3063fdee117eSZhang Yi 		get_bh(bh);
3064fdee117eSZhang Yi 		submit_bh(REQ_OP_READ | op_flags, bh);
3065fdee117eSZhang Yi 	}
3066fdee117eSZhang Yi }
3067fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch);
3068389d1b08SAneesh Kumar K.V 
30691da177e4SLinus Torvalds void __init buffer_init(void)
30701da177e4SLinus Torvalds {
307143be594aSZhang Yanfei 	unsigned long nrpages;
3072fc4d24c9SSebastian Andrzej Siewior 	int ret;
30731da177e4SLinus Torvalds 
3074b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3075b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3076b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3077b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3078019b4d12SRichard Kennedy 				NULL);
30791da177e4SLinus Torvalds 
30801da177e4SLinus Torvalds 	/*
30811da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
30821da177e4SLinus Torvalds 	 */
30831da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
30841da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3085fc4d24c9SSebastian Andrzej Siewior 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3086fc4d24c9SSebastian Andrzej Siewior 					NULL, buffer_exit_cpu_dead);
3087fc4d24c9SSebastian Andrzej Siewior 	WARN_ON(ret < 0);
30881da177e4SLinus Torvalds }
3089