xref: /linux/fs/buffer.c (revision ab620620bf426af110093c5921888c0a4bf86ab4)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/fs/buffer.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
61da177e4SLinus Torvalds  */
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
151da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
161da177e4SLinus Torvalds  *
171da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds  *
191da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
511da177e4SLinus Torvalds 
522b211dc0SBen Dooks #include "internal.h"
532b211dc0SBen Dooks 
541da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
551420c4a5SBart Van Assche static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
561420c4a5SBart Van Assche 			 struct writeback_control *wbc);
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
591da177e4SLinus Torvalds 
60f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
61f0059afdSTejun Heo {
625305cb83STejun Heo 	trace_block_touch_buffer(bh);
63f0059afdSTejun Heo 	mark_page_accessed(bh->b_page);
64f0059afdSTejun Heo }
65f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
66f0059afdSTejun Heo 
67fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
681da177e4SLinus Torvalds {
6974316201SNeilBrown 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
701da177e4SLinus Torvalds }
711da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
721da177e4SLinus Torvalds 
73fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
741da177e4SLinus Torvalds {
7551b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
764e857c58SPeter Zijlstra 	smp_mb__after_atomic();
771da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
781da177e4SLinus Torvalds }
791fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds /*
82520f301cSMatthew Wilcox (Oracle)  * Returns if the folio has dirty or writeback buffers. If all the buffers
83520f301cSMatthew Wilcox (Oracle)  * are unlocked and clean then the folio_test_dirty information is stale. If
84520f301cSMatthew Wilcox (Oracle)  * any of the buffers are locked, it is assumed they are locked for IO.
85b4597226SMel Gorman  */
86520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
87b4597226SMel Gorman 				     bool *dirty, bool *writeback)
88b4597226SMel Gorman {
89b4597226SMel Gorman 	struct buffer_head *head, *bh;
90b4597226SMel Gorman 	*dirty = false;
91b4597226SMel Gorman 	*writeback = false;
92b4597226SMel Gorman 
93520f301cSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
94b4597226SMel Gorman 
95520f301cSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
96520f301cSMatthew Wilcox (Oracle) 	if (!head)
97b4597226SMel Gorman 		return;
98b4597226SMel Gorman 
99520f301cSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
100b4597226SMel Gorman 		*writeback = true;
101b4597226SMel Gorman 
102b4597226SMel Gorman 	bh = head;
103b4597226SMel Gorman 	do {
104b4597226SMel Gorman 		if (buffer_locked(bh))
105b4597226SMel Gorman 			*writeback = true;
106b4597226SMel Gorman 
107b4597226SMel Gorman 		if (buffer_dirty(bh))
108b4597226SMel Gorman 			*dirty = true;
109b4597226SMel Gorman 
110b4597226SMel Gorman 		bh = bh->b_this_page;
111b4597226SMel Gorman 	} while (bh != head);
112b4597226SMel Gorman }
113b4597226SMel Gorman EXPORT_SYMBOL(buffer_check_dirty_writeback);
114b4597226SMel Gorman 
115b4597226SMel Gorman /*
1161da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
1171da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
1181da177e4SLinus Torvalds  * if you want to preserve its state.
1191da177e4SLinus Torvalds  */
1201da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1211da177e4SLinus Torvalds {
12274316201SNeilBrown 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1231da177e4SLinus Torvalds }
1241fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1251da177e4SLinus Torvalds 
126b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1271da177e4SLinus Torvalds {
128432f16e6SRobert Elliott 	if (!test_bit(BH_Quiet, &bh->b_state))
129432f16e6SRobert Elliott 		printk_ratelimited(KERN_ERR
130a1c6f057SDmitry Monakhov 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
131a1c6f057SDmitry Monakhov 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1321da177e4SLinus Torvalds }
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds /*
13568671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
13668671f35SDmitry Monakhov  * unlocking it.
13768671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13868671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
13968671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
14068671f35SDmitry Monakhov  * itself.
1411da177e4SLinus Torvalds  */
14268671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1431da177e4SLinus Torvalds {
1441da177e4SLinus Torvalds 	if (uptodate) {
1451da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1461da177e4SLinus Torvalds 	} else {
14770246286SChristoph Hellwig 		/* This happens, due to failed read-ahead attempts. */
1481da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1491da177e4SLinus Torvalds 	}
1501da177e4SLinus Torvalds 	unlock_buffer(bh);
15168671f35SDmitry Monakhov }
15268671f35SDmitry Monakhov 
15368671f35SDmitry Monakhov /*
15468671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
15568671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
15668671f35SDmitry Monakhov  */
15768671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15868671f35SDmitry Monakhov {
15968671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1601da177e4SLinus Torvalds 	put_bh(bh);
1611da177e4SLinus Torvalds }
1621fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1651da177e4SLinus Torvalds {
1661da177e4SLinus Torvalds 	if (uptodate) {
1671da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1681da177e4SLinus Torvalds 	} else {
169b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost sync page write");
17087354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
1711da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1721da177e4SLinus Torvalds 	}
1731da177e4SLinus Torvalds 	unlock_buffer(bh);
1741da177e4SLinus Torvalds 	put_bh(bh);
1751da177e4SLinus Torvalds }
1761fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1771da177e4SLinus Torvalds 
1781da177e4SLinus Torvalds /*
1791da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1801da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1811da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1821da177e4SLinus Torvalds  * private_lock.
1831da177e4SLinus Torvalds  *
184b93b0163SMatthew Wilcox  * Hack idea: for the blockdev mapping, private_lock contention
1851da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
186b93b0163SMatthew Wilcox  * succeeds, there is no need to take private_lock.
1871da177e4SLinus Torvalds  */
1881da177e4SLinus Torvalds static struct buffer_head *
189385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1901da177e4SLinus Torvalds {
1911da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1921da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1931da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1941da177e4SLinus Torvalds 	pgoff_t index;
1951da177e4SLinus Torvalds 	struct buffer_head *bh;
1961da177e4SLinus Torvalds 	struct buffer_head *head;
1971da177e4SLinus Torvalds 	struct page *page;
1981da177e4SLinus Torvalds 	int all_mapped = 1;
19943636c80STetsuo Handa 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
2001da177e4SLinus Torvalds 
20109cbfeafSKirill A. Shutemov 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
2022457aec6SMel Gorman 	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
2031da177e4SLinus Torvalds 	if (!page)
2041da177e4SLinus Torvalds 		goto out;
2051da177e4SLinus Torvalds 
2061da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2071da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2081da177e4SLinus Torvalds 		goto out_unlock;
2091da177e4SLinus Torvalds 	head = page_buffers(page);
2101da177e4SLinus Torvalds 	bh = head;
2111da177e4SLinus Torvalds 	do {
21297f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
21397f76d3dSNikanth Karthikesan 			all_mapped = 0;
21497f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2151da177e4SLinus Torvalds 			ret = bh;
2161da177e4SLinus Torvalds 			get_bh(bh);
2171da177e4SLinus Torvalds 			goto out_unlock;
2181da177e4SLinus Torvalds 		}
2191da177e4SLinus Torvalds 		bh = bh->b_this_page;
2201da177e4SLinus Torvalds 	} while (bh != head);
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2231da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2241da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2251da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2261da177e4SLinus Torvalds 	 */
22743636c80STetsuo Handa 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
22843636c80STetsuo Handa 	if (all_mapped && __ratelimit(&last_warned)) {
22943636c80STetsuo Handa 		printk("__find_get_block_slow() failed. block=%llu, "
23043636c80STetsuo Handa 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
23143636c80STetsuo Handa 		       "device %pg blocksize: %d\n",
232205f87f6SBadari Pulavarty 		       (unsigned long long)block,
23343636c80STetsuo Handa 		       (unsigned long long)bh->b_blocknr,
23443636c80STetsuo Handa 		       bh->b_state, bh->b_size, bdev,
23572a2ebd8STao Ma 		       1 << bd_inode->i_blkbits);
2361da177e4SLinus Torvalds 	}
2371da177e4SLinus Torvalds out_unlock:
2381da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
23909cbfeafSKirill A. Shutemov 	put_page(page);
2401da177e4SLinus Torvalds out:
2411da177e4SLinus Torvalds 	return ret;
2421da177e4SLinus Torvalds }
2431da177e4SLinus Torvalds 
2441da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2451da177e4SLinus Torvalds {
2461da177e4SLinus Torvalds 	unsigned long flags;
247a3972203SNick Piggin 	struct buffer_head *first;
2481da177e4SLinus Torvalds 	struct buffer_head *tmp;
2491da177e4SLinus Torvalds 	struct page *page;
2501da177e4SLinus Torvalds 	int page_uptodate = 1;
2511da177e4SLinus Torvalds 
2521da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
2531da177e4SLinus Torvalds 
2541da177e4SLinus Torvalds 	page = bh->b_page;
2551da177e4SLinus Torvalds 	if (uptodate) {
2561da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
2571da177e4SLinus Torvalds 	} else {
2581da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
259b744c2acSRobert Elliott 		buffer_io_error(bh, ", async page read");
2601da177e4SLinus Torvalds 		SetPageError(page);
2611da177e4SLinus Torvalds 	}
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds 	/*
2641da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
2651da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
2661da177e4SLinus Torvalds 	 * decide that the page is now completely done.
2671da177e4SLinus Torvalds 	 */
268a3972203SNick Piggin 	first = page_buffers(page);
269f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
2701da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
2711da177e4SLinus Torvalds 	unlock_buffer(bh);
2721da177e4SLinus Torvalds 	tmp = bh;
2731da177e4SLinus Torvalds 	do {
2741da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
2751da177e4SLinus Torvalds 			page_uptodate = 0;
2761da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
2771da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
2781da177e4SLinus Torvalds 			goto still_busy;
2791da177e4SLinus Torvalds 		}
2801da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
2811da177e4SLinus Torvalds 	} while (tmp != bh);
282f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2831da177e4SLinus Torvalds 
2841da177e4SLinus Torvalds 	/*
2856e8e79fcSMatthew Wilcox (Oracle) 	 * If all of the buffers are uptodate then we can set the page
2866e8e79fcSMatthew Wilcox (Oracle) 	 * uptodate.
2871da177e4SLinus Torvalds 	 */
2886e8e79fcSMatthew Wilcox (Oracle) 	if (page_uptodate)
2891da177e4SLinus Torvalds 		SetPageUptodate(page);
2901da177e4SLinus Torvalds 	unlock_page(page);
2911da177e4SLinus Torvalds 	return;
2921da177e4SLinus Torvalds 
2931da177e4SLinus Torvalds still_busy:
294f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2951da177e4SLinus Torvalds 	return;
2961da177e4SLinus Torvalds }
2971da177e4SLinus Torvalds 
29831fb992cSEric Biggers struct decrypt_bh_ctx {
29931fb992cSEric Biggers 	struct work_struct work;
30031fb992cSEric Biggers 	struct buffer_head *bh;
30131fb992cSEric Biggers };
30231fb992cSEric Biggers 
30331fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
30431fb992cSEric Biggers {
30531fb992cSEric Biggers 	struct decrypt_bh_ctx *ctx =
30631fb992cSEric Biggers 		container_of(work, struct decrypt_bh_ctx, work);
30731fb992cSEric Biggers 	struct buffer_head *bh = ctx->bh;
30831fb992cSEric Biggers 	int err;
30931fb992cSEric Biggers 
31031fb992cSEric Biggers 	err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
31131fb992cSEric Biggers 					       bh_offset(bh));
31231fb992cSEric Biggers 	end_buffer_async_read(bh, err == 0);
31331fb992cSEric Biggers 	kfree(ctx);
31431fb992cSEric Biggers }
31531fb992cSEric Biggers 
31631fb992cSEric Biggers /*
3172c69e205SMatthew Wilcox (Oracle)  * I/O completion handler for block_read_full_folio() - pages
31831fb992cSEric Biggers  * which come unlocked at the end of I/O.
31931fb992cSEric Biggers  */
32031fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
32131fb992cSEric Biggers {
32231fb992cSEric Biggers 	/* Decrypt if needed */
3234f74d15fSEric Biggers 	if (uptodate &&
3244f74d15fSEric Biggers 	    fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
32531fb992cSEric Biggers 		struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
32631fb992cSEric Biggers 
32731fb992cSEric Biggers 		if (ctx) {
32831fb992cSEric Biggers 			INIT_WORK(&ctx->work, decrypt_bh);
32931fb992cSEric Biggers 			ctx->bh = bh;
33031fb992cSEric Biggers 			fscrypt_enqueue_decrypt_work(&ctx->work);
33131fb992cSEric Biggers 			return;
33231fb992cSEric Biggers 		}
33331fb992cSEric Biggers 		uptodate = 0;
33431fb992cSEric Biggers 	}
33531fb992cSEric Biggers 	end_buffer_async_read(bh, uptodate);
33631fb992cSEric Biggers }
33731fb992cSEric Biggers 
3381da177e4SLinus Torvalds /*
3391da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3401da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3411da177e4SLinus Torvalds  */
34235c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3431da177e4SLinus Torvalds {
3441da177e4SLinus Torvalds 	unsigned long flags;
345a3972203SNick Piggin 	struct buffer_head *first;
3461da177e4SLinus Torvalds 	struct buffer_head *tmp;
3471da177e4SLinus Torvalds 	struct page *page;
3481da177e4SLinus Torvalds 
3491da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3501da177e4SLinus Torvalds 
3511da177e4SLinus Torvalds 	page = bh->b_page;
3521da177e4SLinus Torvalds 	if (uptodate) {
3531da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3541da177e4SLinus Torvalds 	} else {
355b744c2acSRobert Elliott 		buffer_io_error(bh, ", lost async page write");
35687354e5dSJeff Layton 		mark_buffer_write_io_error(bh);
3571da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3581da177e4SLinus Torvalds 		SetPageError(page);
3591da177e4SLinus Torvalds 	}
3601da177e4SLinus Torvalds 
361a3972203SNick Piggin 	first = page_buffers(page);
362f1e67e35SThomas Gleixner 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
363a3972203SNick Piggin 
3641da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
3651da177e4SLinus Torvalds 	unlock_buffer(bh);
3661da177e4SLinus Torvalds 	tmp = bh->b_this_page;
3671da177e4SLinus Torvalds 	while (tmp != bh) {
3681da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
3691da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3701da177e4SLinus Torvalds 			goto still_busy;
3711da177e4SLinus Torvalds 		}
3721da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
3731da177e4SLinus Torvalds 	}
374f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
3751da177e4SLinus Torvalds 	end_page_writeback(page);
3761da177e4SLinus Torvalds 	return;
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds still_busy:
379f1e67e35SThomas Gleixner 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
3801da177e4SLinus Torvalds 	return;
3811da177e4SLinus Torvalds }
3821fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write);
3831da177e4SLinus Torvalds 
3841da177e4SLinus Torvalds /*
3851da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
3861da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
3871da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
3881da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
3891da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
3901da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
3911da177e4SLinus Torvalds  * that this buffer is not under async I/O.
3921da177e4SLinus Torvalds  *
3931da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
3941da177e4SLinus Torvalds  * left.
3951da177e4SLinus Torvalds  *
3961da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
3971da177e4SLinus Torvalds  * the buffers.
3981da177e4SLinus Torvalds  *
3991da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4001da177e4SLinus Torvalds  * page.
4011da177e4SLinus Torvalds  *
4021da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4031da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4041da177e4SLinus Torvalds  */
4051da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4061da177e4SLinus Torvalds {
40731fb992cSEric Biggers 	bh->b_end_io = end_buffer_async_read_io;
4081da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4091da177e4SLinus Torvalds }
4101da177e4SLinus Torvalds 
4111fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
41235c80d5fSChris Mason 					  bh_end_io_t *handler)
41335c80d5fSChris Mason {
41435c80d5fSChris Mason 	bh->b_end_io = handler;
41535c80d5fSChris Mason 	set_buffer_async_write(bh);
41635c80d5fSChris Mason }
41735c80d5fSChris Mason 
4181da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4191da177e4SLinus Torvalds {
42035c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4211da177e4SLinus Torvalds }
4221da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4231da177e4SLinus Torvalds 
4241da177e4SLinus Torvalds 
4251da177e4SLinus Torvalds /*
4261da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4271da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4281da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4291da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4301da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4311da177e4SLinus Torvalds  *
4321da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4331da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4341da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4351da177e4SLinus Torvalds  *
4361da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4371da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4381da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4391da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4401da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4411da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4421da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4431da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4441da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4451da177e4SLinus Torvalds  * ->private_lock.
4461da177e4SLinus Torvalds  *
4471da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4481da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4491da177e4SLinus Torvalds  *
4501da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4511da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4521da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4531da177e4SLinus Torvalds  * be true at clear_inode() time.
4541da177e4SLinus Torvalds  *
4551da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4561da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4571da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4581da177e4SLinus Torvalds  *
4591da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4601da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4611da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4621da177e4SLinus Torvalds  * queued up.
4631da177e4SLinus Torvalds  *
4641da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
4651da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
4661da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
4671da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
4681da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
4691da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
4701da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
4711da177e4SLinus Torvalds  * b_inode back.
4721da177e4SLinus Torvalds  */
4731da177e4SLinus Torvalds 
4741da177e4SLinus Torvalds /*
4751da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
4761da177e4SLinus Torvalds  */
477dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
4781da177e4SLinus Torvalds {
4791da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
48058ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
48158ff407bSJan Kara 	bh->b_assoc_map = NULL;
4821da177e4SLinus Torvalds }
4831da177e4SLinus Torvalds 
4841da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
4851da177e4SLinus Torvalds {
4861da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
4871da177e4SLinus Torvalds }
4881da177e4SLinus Torvalds 
4891da177e4SLinus Torvalds /*
4901da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
4911da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
4921da177e4SLinus Torvalds  * writes to the disk.
4931da177e4SLinus Torvalds  *
4941da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
4951da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
4961da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
4971da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
4981da177e4SLinus Torvalds  */
4991da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5001da177e4SLinus Torvalds {
5011da177e4SLinus Torvalds 	struct buffer_head *bh;
5021da177e4SLinus Torvalds 	struct list_head *p;
5031da177e4SLinus Torvalds 	int err = 0;
5041da177e4SLinus Torvalds 
5051da177e4SLinus Torvalds 	spin_lock(lock);
5061da177e4SLinus Torvalds repeat:
5071da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5081da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5091da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5101da177e4SLinus Torvalds 			get_bh(bh);
5111da177e4SLinus Torvalds 			spin_unlock(lock);
5121da177e4SLinus Torvalds 			wait_on_buffer(bh);
5131da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5141da177e4SLinus Torvalds 				err = -EIO;
5151da177e4SLinus Torvalds 			brelse(bh);
5161da177e4SLinus Torvalds 			spin_lock(lock);
5171da177e4SLinus Torvalds 			goto repeat;
5181da177e4SLinus Torvalds 		}
5191da177e4SLinus Torvalds 	}
5201da177e4SLinus Torvalds 	spin_unlock(lock);
5211da177e4SLinus Torvalds 	return err;
5221da177e4SLinus Torvalds }
5231da177e4SLinus Torvalds 
52408fdc8a0SMateusz Guzik void emergency_thaw_bdev(struct super_block *sb)
525c2d75438SEric Sandeen {
526040f04bdSChristoph Hellwig 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
527a1c6f057SDmitry Monakhov 		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
528c2d75438SEric Sandeen }
52901a05b33SAl Viro 
5301da177e4SLinus Torvalds /**
53178a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
53267be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5331da177e4SLinus Torvalds  *
5341da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
5351da177e4SLinus Torvalds  * that I/O.
5361da177e4SLinus Torvalds  *
53767be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
53867be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
53967be2dd1SMartin Waitz  * a successful fsync().
5401da177e4SLinus Torvalds  */
5411da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5421da177e4SLinus Torvalds {
543252aa6f5SRafael Aquini 	struct address_space *buffer_mapping = mapping->private_data;
5441da177e4SLinus Torvalds 
5451da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
5461da177e4SLinus Torvalds 		return 0;
5471da177e4SLinus Torvalds 
5481da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
5491da177e4SLinus Torvalds 					&mapping->private_list);
5501da177e4SLinus Torvalds }
5511da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds /*
5541da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
5551da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
5561da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
5571da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
5581da177e4SLinus Torvalds  */
5591da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
5601da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
5611da177e4SLinus Torvalds {
5621da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
5631da177e4SLinus Torvalds 	if (bh) {
5641da177e4SLinus Torvalds 		if (buffer_dirty(bh))
5651420c4a5SBart Van Assche 			ll_rw_block(REQ_OP_WRITE, 1, &bh);
5661da177e4SLinus Torvalds 		put_bh(bh);
5671da177e4SLinus Torvalds 	}
5681da177e4SLinus Torvalds }
5691da177e4SLinus Torvalds 
5701da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
5711da177e4SLinus Torvalds {
5721da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
5731da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
5741da177e4SLinus Torvalds 
5751da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
576252aa6f5SRafael Aquini 	if (!mapping->private_data) {
577252aa6f5SRafael Aquini 		mapping->private_data = buffer_mapping;
5781da177e4SLinus Torvalds 	} else {
579252aa6f5SRafael Aquini 		BUG_ON(mapping->private_data != buffer_mapping);
5801da177e4SLinus Torvalds 	}
581535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
5821da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
5831da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
5841da177e4SLinus Torvalds 				&mapping->private_list);
58558ff407bSJan Kara 		bh->b_assoc_map = mapping;
5861da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
5871da177e4SLinus Torvalds 	}
5881da177e4SLinus Torvalds }
5891da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds /*
5921da177e4SLinus Torvalds  * Add a page to the dirty page list.
5931da177e4SLinus Torvalds  *
5941da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
5951da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
5961da177e4SLinus Torvalds  *
5971da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
5981da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
5991da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6001da177e4SLinus Torvalds  * dirty.
6011da177e4SLinus Torvalds  *
6021da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6031da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6041da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6051da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6061da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6071da177e4SLinus Torvalds  * page on the dirty page list.
6081da177e4SLinus Torvalds  *
6091da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6101da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6111da177e4SLinus Torvalds  * added to the page after it was set dirty.
6121da177e4SLinus Torvalds  *
6131da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
6141da177e4SLinus Torvalds  * address_space though.
6151da177e4SLinus Torvalds  */
616e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
6171da177e4SLinus Torvalds {
618e621900aSMatthew Wilcox (Oracle) 	struct buffer_head *head;
619e621900aSMatthew Wilcox (Oracle) 	bool newly_dirty;
6201da177e4SLinus Torvalds 
6211da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
622e621900aSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
623e621900aSMatthew Wilcox (Oracle) 	if (head) {
6241da177e4SLinus Torvalds 		struct buffer_head *bh = head;
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds 		do {
6271da177e4SLinus Torvalds 			set_buffer_dirty(bh);
6281da177e4SLinus Torvalds 			bh = bh->b_this_page;
6291da177e4SLinus Torvalds 		} while (bh != head);
6301da177e4SLinus Torvalds 	}
631c4843a75SGreg Thelen 	/*
632bcfe06bfSRoman Gushchin 	 * Lock out page's memcg migration to keep PageDirty
63381f8c3a4SJohannes Weiner 	 * synchronized with per-memcg dirty page counters.
634c4843a75SGreg Thelen 	 */
635e621900aSMatthew Wilcox (Oracle) 	folio_memcg_lock(folio);
636e621900aSMatthew Wilcox (Oracle) 	newly_dirty = !folio_test_set_dirty(folio);
6371da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
6381da177e4SLinus Torvalds 
639a8e7d49aSLinus Torvalds 	if (newly_dirty)
640e621900aSMatthew Wilcox (Oracle) 		__folio_mark_dirty(folio, mapping, 1);
641c4843a75SGreg Thelen 
642e621900aSMatthew Wilcox (Oracle) 	folio_memcg_unlock(folio);
643c4843a75SGreg Thelen 
644c4843a75SGreg Thelen 	if (newly_dirty)
645c4843a75SGreg Thelen 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
646c4843a75SGreg Thelen 
647a8e7d49aSLinus Torvalds 	return newly_dirty;
6481da177e4SLinus Torvalds }
649e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
6501da177e4SLinus Torvalds 
6511da177e4SLinus Torvalds /*
6521da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
6531da177e4SLinus Torvalds  *
6541da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
6551da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
6561da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
6571da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
6581da177e4SLinus Torvalds  *
6591da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
6601da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
6611da177e4SLinus Torvalds  * up, waiting for those writes to complete.
6621da177e4SLinus Torvalds  *
6631da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
6641da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
6651da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
6661da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
6671da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
6681da177e4SLinus Torvalds  * any newly dirty buffers for write.
6691da177e4SLinus Torvalds  */
6701da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	struct buffer_head *bh;
6731da177e4SLinus Torvalds 	struct list_head tmp;
6747eaceaccSJens Axboe 	struct address_space *mapping;
6751da177e4SLinus Torvalds 	int err = 0, err2;
6764ee2491eSJens Axboe 	struct blk_plug plug;
6771da177e4SLinus Torvalds 
6781da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
6794ee2491eSJens Axboe 	blk_start_plug(&plug);
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	spin_lock(lock);
6821da177e4SLinus Torvalds 	while (!list_empty(list)) {
6831da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
684535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
68558ff407bSJan Kara 		__remove_assoc_queue(bh);
686535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
687535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
688535ee2fbSJan Kara 		smp_mb();
6891da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
6901da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
691535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
6921da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
6931da177e4SLinus Torvalds 				get_bh(bh);
6941da177e4SLinus Torvalds 				spin_unlock(lock);
6951da177e4SLinus Torvalds 				/*
6961da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
6979cb569d6SChristoph Hellwig 				 * write_dirty_buffer() actually writes the
6989cb569d6SChristoph Hellwig 				 * current contents - it is a noop if I/O is
6999cb569d6SChristoph Hellwig 				 * still in flight on potentially older
7009cb569d6SChristoph Hellwig 				 * contents.
7011da177e4SLinus Torvalds 				 */
70270fd7614SChristoph Hellwig 				write_dirty_buffer(bh, REQ_SYNC);
7039cf6b720SJens Axboe 
7049cf6b720SJens Axboe 				/*
7059cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
7069cf6b720SJens Axboe 				 * that we will not run the very last mapping,
7079cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
7089cf6b720SJens Axboe 				 * through sync_buffer().
7099cf6b720SJens Axboe 				 */
7101da177e4SLinus Torvalds 				brelse(bh);
7111da177e4SLinus Torvalds 				spin_lock(lock);
7121da177e4SLinus Torvalds 			}
7131da177e4SLinus Torvalds 		}
7141da177e4SLinus Torvalds 	}
7151da177e4SLinus Torvalds 
7164ee2491eSJens Axboe 	spin_unlock(lock);
7174ee2491eSJens Axboe 	blk_finish_plug(&plug);
7184ee2491eSJens Axboe 	spin_lock(lock);
7194ee2491eSJens Axboe 
7201da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7211da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
7221da177e4SLinus Torvalds 		get_bh(bh);
723535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
724535ee2fbSJan Kara 		__remove_assoc_queue(bh);
725535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
726535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
727535ee2fbSJan Kara 		smp_mb();
728535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
729535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
730e3892296SJan Kara 				 &mapping->private_list);
731535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
732535ee2fbSJan Kara 		}
7331da177e4SLinus Torvalds 		spin_unlock(lock);
7341da177e4SLinus Torvalds 		wait_on_buffer(bh);
7351da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
7361da177e4SLinus Torvalds 			err = -EIO;
7371da177e4SLinus Torvalds 		brelse(bh);
7381da177e4SLinus Torvalds 		spin_lock(lock);
7391da177e4SLinus Torvalds 	}
7401da177e4SLinus Torvalds 
7411da177e4SLinus Torvalds 	spin_unlock(lock);
7421da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
7431da177e4SLinus Torvalds 	if (err)
7441da177e4SLinus Torvalds 		return err;
7451da177e4SLinus Torvalds 	else
7461da177e4SLinus Torvalds 		return err2;
7471da177e4SLinus Torvalds }
7481da177e4SLinus Torvalds 
7491da177e4SLinus Torvalds /*
7501da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
7511da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
7521da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
7531da177e4SLinus Torvalds  *
7541da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
7551da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
7561da177e4SLinus Torvalds  * for reiserfs.
7571da177e4SLinus Torvalds  */
7581da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
7591da177e4SLinus Torvalds {
7601da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
7611da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
7621da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
763252aa6f5SRafael Aquini 		struct address_space *buffer_mapping = mapping->private_data;
7641da177e4SLinus Torvalds 
7651da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
7661da177e4SLinus Torvalds 		while (!list_empty(list))
7671da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
7681da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
7691da177e4SLinus Torvalds 	}
7701da177e4SLinus Torvalds }
77152b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds /*
7741da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
7751da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
7761da177e4SLinus Torvalds  *
7771da177e4SLinus Torvalds  * Returns true if all buffers were removed.
7781da177e4SLinus Torvalds  */
7791da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
7801da177e4SLinus Torvalds {
7811da177e4SLinus Torvalds 	int ret = 1;
7821da177e4SLinus Torvalds 
7831da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
7841da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
7851da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
786252aa6f5SRafael Aquini 		struct address_space *buffer_mapping = mapping->private_data;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
7891da177e4SLinus Torvalds 		while (!list_empty(list)) {
7901da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
7911da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7921da177e4SLinus Torvalds 				ret = 0;
7931da177e4SLinus Torvalds 				break;
7941da177e4SLinus Torvalds 			}
7951da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
7961da177e4SLinus Torvalds 		}
7971da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
7981da177e4SLinus Torvalds 	}
7991da177e4SLinus Torvalds 	return ret;
8001da177e4SLinus Torvalds }
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds /*
8031da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8041da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8051da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8061da177e4SLinus Torvalds  * buffers.
8071da177e4SLinus Torvalds  *
8081da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8091da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8101da177e4SLinus Torvalds  */
8111da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
812640ab98fSJens Axboe 		bool retry)
8131da177e4SLinus Torvalds {
8141da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
815f745c6f5SShakeel Butt 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
8161da177e4SLinus Torvalds 	long offset;
817b87d8cefSRoman Gushchin 	struct mem_cgroup *memcg, *old_memcg;
8181da177e4SLinus Torvalds 
819640ab98fSJens Axboe 	if (retry)
820640ab98fSJens Axboe 		gfp |= __GFP_NOFAIL;
821640ab98fSJens Axboe 
8226eeb104eSJohannes Weiner 	/* The page lock pins the memcg */
8236eeb104eSJohannes Weiner 	memcg = page_memcg(page);
824b87d8cefSRoman Gushchin 	old_memcg = set_active_memcg(memcg);
825f745c6f5SShakeel Butt 
8261da177e4SLinus Torvalds 	head = NULL;
8271da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8281da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
829640ab98fSJens Axboe 		bh = alloc_buffer_head(gfp);
8301da177e4SLinus Torvalds 		if (!bh)
8311da177e4SLinus Torvalds 			goto no_grow;
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 		bh->b_this_page = head;
8341da177e4SLinus Torvalds 		bh->b_blocknr = -1;
8351da177e4SLinus Torvalds 		head = bh;
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds 		bh->b_size = size;
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds 		/* Link the buffer to its page */
8401da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
8411da177e4SLinus Torvalds 	}
842f745c6f5SShakeel Butt out:
843b87d8cefSRoman Gushchin 	set_active_memcg(old_memcg);
8441da177e4SLinus Torvalds 	return head;
8451da177e4SLinus Torvalds /*
8461da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
8471da177e4SLinus Torvalds  */
8481da177e4SLinus Torvalds no_grow:
8491da177e4SLinus Torvalds 	if (head) {
8501da177e4SLinus Torvalds 		do {
8511da177e4SLinus Torvalds 			bh = head;
8521da177e4SLinus Torvalds 			head = head->b_this_page;
8531da177e4SLinus Torvalds 			free_buffer_head(bh);
8541da177e4SLinus Torvalds 		} while (head);
8551da177e4SLinus Torvalds 	}
8561da177e4SLinus Torvalds 
857f745c6f5SShakeel Butt 	goto out;
8581da177e4SLinus Torvalds }
8591da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds static inline void
8621da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
8631da177e4SLinus Torvalds {
8641da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds 	bh = head;
8671da177e4SLinus Torvalds 	do {
8681da177e4SLinus Torvalds 		tail = bh;
8691da177e4SLinus Torvalds 		bh = bh->b_this_page;
8701da177e4SLinus Torvalds 	} while (bh);
8711da177e4SLinus Torvalds 	tail->b_this_page = head;
87245dcfc27SGuoqing Jiang 	attach_page_private(page, head);
8731da177e4SLinus Torvalds }
8741da177e4SLinus Torvalds 
875bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
876bbec0270SLinus Torvalds {
877bbec0270SLinus Torvalds 	sector_t retval = ~((sector_t)0);
878b86058f9SChristoph Hellwig 	loff_t sz = bdev_nr_bytes(bdev);
879bbec0270SLinus Torvalds 
880bbec0270SLinus Torvalds 	if (sz) {
881bbec0270SLinus Torvalds 		unsigned int sizebits = blksize_bits(size);
882bbec0270SLinus Torvalds 		retval = (sz >> sizebits);
883bbec0270SLinus Torvalds 	}
884bbec0270SLinus Torvalds 	return retval;
885bbec0270SLinus Torvalds }
886bbec0270SLinus Torvalds 
8871da177e4SLinus Torvalds /*
8881da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
8891da177e4SLinus Torvalds  */
890676ce6d5SHugh Dickins static sector_t
8911da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
8921da177e4SLinus Torvalds 			sector_t block, int size)
8931da177e4SLinus Torvalds {
8941da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
8951da177e4SLinus Torvalds 	struct buffer_head *bh = head;
8961da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
897bcd1d063SChristoph Hellwig 	sector_t end_block = blkdev_max_block(bdev, size);
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds 	do {
9001da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
90101950a34SEric Biggers 			bh->b_end_io = NULL;
90201950a34SEric Biggers 			bh->b_private = NULL;
9031da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9041da177e4SLinus Torvalds 			bh->b_blocknr = block;
9051da177e4SLinus Torvalds 			if (uptodate)
9061da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
907080399aaSJeff Moyer 			if (block < end_block)
9081da177e4SLinus Torvalds 				set_buffer_mapped(bh);
9091da177e4SLinus Torvalds 		}
9101da177e4SLinus Torvalds 		block++;
9111da177e4SLinus Torvalds 		bh = bh->b_this_page;
9121da177e4SLinus Torvalds 	} while (bh != head);
913676ce6d5SHugh Dickins 
914676ce6d5SHugh Dickins 	/*
915676ce6d5SHugh Dickins 	 * Caller needs to validate requested block against end of device.
916676ce6d5SHugh Dickins 	 */
917676ce6d5SHugh Dickins 	return end_block;
9181da177e4SLinus Torvalds }
9191da177e4SLinus Torvalds 
9201da177e4SLinus Torvalds /*
9211da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9221da177e4SLinus Torvalds  *
923676ce6d5SHugh Dickins  * This is used purely for blockdev mappings.
9241da177e4SLinus Torvalds  */
925676ce6d5SHugh Dickins static int
9261da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9273b5e6454SGioh Kim 	      pgoff_t index, int size, int sizebits, gfp_t gfp)
9281da177e4SLinus Torvalds {
9291da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9301da177e4SLinus Torvalds 	struct page *page;
9311da177e4SLinus Torvalds 	struct buffer_head *bh;
932676ce6d5SHugh Dickins 	sector_t end_block;
933c4b4c2a7SZhiqiang Liu 	int ret = 0;
93484235de3SJohannes Weiner 	gfp_t gfp_mask;
9351da177e4SLinus Torvalds 
936c62d2555SMichal Hocko 	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
9373b5e6454SGioh Kim 
93884235de3SJohannes Weiner 	/*
93984235de3SJohannes Weiner 	 * XXX: __getblk_slow() can not really deal with failure and
94084235de3SJohannes Weiner 	 * will endlessly loop on improvised global reclaim.  Prefer
94184235de3SJohannes Weiner 	 * looping in the allocator rather than here, at least that
94284235de3SJohannes Weiner 	 * code knows what it's doing.
94384235de3SJohannes Weiner 	 */
94484235de3SJohannes Weiner 	gfp_mask |= __GFP_NOFAIL;
94584235de3SJohannes Weiner 
94684235de3SJohannes Weiner 	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
9471da177e4SLinus Torvalds 
948e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
9491da177e4SLinus Torvalds 
9501da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
9511da177e4SLinus Torvalds 		bh = page_buffers(page);
9521da177e4SLinus Torvalds 		if (bh->b_size == size) {
953676ce6d5SHugh Dickins 			end_block = init_page_buffers(page, bdev,
954f2d5a944SAnton Altaparmakov 						(sector_t)index << sizebits,
955f2d5a944SAnton Altaparmakov 						size);
956676ce6d5SHugh Dickins 			goto done;
9571da177e4SLinus Torvalds 		}
95868189fefSMatthew Wilcox (Oracle) 		if (!try_to_free_buffers(page_folio(page)))
9591da177e4SLinus Torvalds 			goto failed;
9601da177e4SLinus Torvalds 	}
9611da177e4SLinus Torvalds 
9621da177e4SLinus Torvalds 	/*
9631da177e4SLinus Torvalds 	 * Allocate some buffers for this page
9641da177e4SLinus Torvalds 	 */
96594dc24c0SJens Axboe 	bh = alloc_page_buffers(page, size, true);
9661da177e4SLinus Torvalds 
9671da177e4SLinus Torvalds 	/*
9681da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
9691da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
9701da177e4SLinus Torvalds 	 * run under the page lock.
9711da177e4SLinus Torvalds 	 */
9721da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
9731da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
974f2d5a944SAnton Altaparmakov 	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
975f2d5a944SAnton Altaparmakov 			size);
9761da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
977676ce6d5SHugh Dickins done:
978676ce6d5SHugh Dickins 	ret = (block < end_block) ? 1 : -ENXIO;
9791da177e4SLinus Torvalds failed:
9801da177e4SLinus Torvalds 	unlock_page(page);
98109cbfeafSKirill A. Shutemov 	put_page(page);
982676ce6d5SHugh Dickins 	return ret;
9831da177e4SLinus Torvalds }
9841da177e4SLinus Torvalds 
9851da177e4SLinus Torvalds /*
9861da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
9871da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
9881da177e4SLinus Torvalds  */
989858119e1SArjan van de Ven static int
9903b5e6454SGioh Kim grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
9911da177e4SLinus Torvalds {
9921da177e4SLinus Torvalds 	pgoff_t index;
9931da177e4SLinus Torvalds 	int sizebits;
9941da177e4SLinus Torvalds 
99590432e60SMikulas Patocka 	sizebits = PAGE_SHIFT - __ffs(size);
9961da177e4SLinus Torvalds 	index = block >> sizebits;
9971da177e4SLinus Torvalds 
998e5657933SAndrew Morton 	/*
999e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1000e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1001e5657933SAndrew Morton 	 */
1002e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1003e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1004a1c6f057SDmitry Monakhov 			"device %pg\n",
10058e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1006a1c6f057SDmitry Monakhov 			bdev);
1007e5657933SAndrew Morton 		return -EIO;
1008e5657933SAndrew Morton 	}
1009676ce6d5SHugh Dickins 
10101da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10113b5e6454SGioh Kim 	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
10121da177e4SLinus Torvalds }
10131da177e4SLinus Torvalds 
10140026ba40SEric Biggers static struct buffer_head *
10153b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
10163b5e6454SGioh Kim 	     unsigned size, gfp_t gfp)
10171da177e4SLinus Torvalds {
10181da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1019e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
10201da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10211da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10221da177e4SLinus Torvalds 					size);
1023e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1024e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
10251da177e4SLinus Torvalds 
10261da177e4SLinus Torvalds 		dump_stack();
10271da177e4SLinus Torvalds 		return NULL;
10281da177e4SLinus Torvalds 	}
10291da177e4SLinus Torvalds 
1030676ce6d5SHugh Dickins 	for (;;) {
1031676ce6d5SHugh Dickins 		struct buffer_head *bh;
1032676ce6d5SHugh Dickins 		int ret;
1033676ce6d5SHugh Dickins 
10341da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
10351da177e4SLinus Torvalds 		if (bh)
10361da177e4SLinus Torvalds 			return bh;
10371da177e4SLinus Torvalds 
10383b5e6454SGioh Kim 		ret = grow_buffers(bdev, block, size, gfp);
1039676ce6d5SHugh Dickins 		if (ret < 0)
104091f68c89SJeff Moyer 			return NULL;
1041676ce6d5SHugh Dickins 	}
10421da177e4SLinus Torvalds }
10431da177e4SLinus Torvalds 
10441da177e4SLinus Torvalds /*
10451da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
10461da177e4SLinus Torvalds  *
10471da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1048ec82e1c1SMatthew Wilcox  * the page is tagged dirty in the page cache.
10491da177e4SLinus Torvalds  *
10501da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
10511da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
10521da177e4SLinus Torvalds  * merely a hint about the true dirty state.
10531da177e4SLinus Torvalds  *
10541da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
10551da177e4SLinus Torvalds  * (if the page has buffers).
10561da177e4SLinus Torvalds  *
10571da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
10581da177e4SLinus Torvalds  * buffers are not.
10591da177e4SLinus Torvalds  *
10601da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
10611da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
10621da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
10632c69e205SMatthew Wilcox (Oracle)  * block_read_full_folio() against that folio will discover all the uptodate
10642c69e205SMatthew Wilcox (Oracle)  * buffers, will set the folio uptodate and will perform no I/O.
10651da177e4SLinus Torvalds  */
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds /**
10681da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
106967be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
10701da177e4SLinus Torvalds  *
1071ec82e1c1SMatthew Wilcox  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1072ec82e1c1SMatthew Wilcox  * its backing page dirty, then tag the page as dirty in the page cache
1073ec82e1c1SMatthew Wilcox  * and then attach the address_space's inode to its superblock's dirty
10741da177e4SLinus Torvalds  * inode list.
10751da177e4SLinus Torvalds  *
10761da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1077b93b0163SMatthew Wilcox  * i_pages lock and mapping->host->i_lock.
10781da177e4SLinus Torvalds  */
1079fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
10801da177e4SLinus Torvalds {
1081787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
10821be62dc1SLinus Torvalds 
10835305cb83STejun Heo 	trace_block_dirty_buffer(bh);
10845305cb83STejun Heo 
10851be62dc1SLinus Torvalds 	/*
10861be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
10871be62dc1SLinus Torvalds 	 *
10881be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
10891be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
10901be62dc1SLinus Torvalds 	 */
10911be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
10921be62dc1SLinus Torvalds 		smp_mb();
10931be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
10941be62dc1SLinus Torvalds 			return;
10951be62dc1SLinus Torvalds 	}
10961be62dc1SLinus Torvalds 
1097a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1098a8e7d49aSLinus Torvalds 		struct page *page = bh->b_page;
1099c4843a75SGreg Thelen 		struct address_space *mapping = NULL;
1100c4843a75SGreg Thelen 
110162cccb8cSJohannes Weiner 		lock_page_memcg(page);
11028e9d78edSLinus Torvalds 		if (!TestSetPageDirty(page)) {
1103c4843a75SGreg Thelen 			mapping = page_mapping(page);
11048e9d78edSLinus Torvalds 			if (mapping)
110562cccb8cSJohannes Weiner 				__set_page_dirty(page, mapping, 0);
11068e9d78edSLinus Torvalds 		}
110762cccb8cSJohannes Weiner 		unlock_page_memcg(page);
1108c4843a75SGreg Thelen 		if (mapping)
1109c4843a75SGreg Thelen 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1110a8e7d49aSLinus Torvalds 	}
11111da177e4SLinus Torvalds }
11121fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
11131da177e4SLinus Torvalds 
111487354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
111587354e5dSJeff Layton {
1116485e9605SJeff Layton 	struct super_block *sb;
1117485e9605SJeff Layton 
111887354e5dSJeff Layton 	set_buffer_write_io_error(bh);
111987354e5dSJeff Layton 	/* FIXME: do we need to set this in both places? */
112087354e5dSJeff Layton 	if (bh->b_page && bh->b_page->mapping)
112187354e5dSJeff Layton 		mapping_set_error(bh->b_page->mapping, -EIO);
112287354e5dSJeff Layton 	if (bh->b_assoc_map)
112387354e5dSJeff Layton 		mapping_set_error(bh->b_assoc_map, -EIO);
1124485e9605SJeff Layton 	rcu_read_lock();
1125485e9605SJeff Layton 	sb = READ_ONCE(bh->b_bdev->bd_super);
1126485e9605SJeff Layton 	if (sb)
1127485e9605SJeff Layton 		errseq_set(&sb->s_wb_err, -EIO);
1128485e9605SJeff Layton 	rcu_read_unlock();
112987354e5dSJeff Layton }
113087354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
113187354e5dSJeff Layton 
11321da177e4SLinus Torvalds /*
11331da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11341da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11351da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11361da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11371da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11381da177e4SLinus Torvalds  */
11391da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11401da177e4SLinus Torvalds {
11411da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11421da177e4SLinus Torvalds 		put_bh(buf);
11431da177e4SLinus Torvalds 		return;
11441da177e4SLinus Torvalds 	}
11455c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11461da177e4SLinus Torvalds }
11471fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
11481da177e4SLinus Torvalds 
11491da177e4SLinus Torvalds /*
11501da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11511da177e4SLinus Torvalds  * potentially dirty data.
11521da177e4SLinus Torvalds  */
11531da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11541da177e4SLinus Torvalds {
11551da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1156535ee2fbSJan Kara 	if (bh->b_assoc_map) {
11571da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11581da177e4SLinus Torvalds 
11591da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
11601da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
116158ff407bSJan Kara 		bh->b_assoc_map = NULL;
11621da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
11631da177e4SLinus Torvalds 	}
11641da177e4SLinus Torvalds 	__brelse(bh);
11651da177e4SLinus Torvalds }
11661fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
11691da177e4SLinus Torvalds {
11701da177e4SLinus Torvalds 	lock_buffer(bh);
11711da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
11721da177e4SLinus Torvalds 		unlock_buffer(bh);
11731da177e4SLinus Torvalds 		return bh;
11741da177e4SLinus Torvalds 	} else {
11751da177e4SLinus Torvalds 		get_bh(bh);
11761da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
11771420c4a5SBart Van Assche 		submit_bh(REQ_OP_READ, bh);
11781da177e4SLinus Torvalds 		wait_on_buffer(bh);
11791da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
11801da177e4SLinus Torvalds 			return bh;
11811da177e4SLinus Torvalds 	}
11821da177e4SLinus Torvalds 	brelse(bh);
11831da177e4SLinus Torvalds 	return NULL;
11841da177e4SLinus Torvalds }
11851da177e4SLinus Torvalds 
11861da177e4SLinus Torvalds /*
11871da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
11881da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
11891da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
11901da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
11911da177e4SLinus Torvalds  * CPU's LRUs at the same time.
11921da177e4SLinus Torvalds  *
11931da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
11941da177e4SLinus Torvalds  * sb_find_get_block().
11951da177e4SLinus Torvalds  *
11961da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
11971da177e4SLinus Torvalds  * a local interrupt disable for that.
11981da177e4SLinus Torvalds  */
11991da177e4SLinus Torvalds 
120086cf78d7SSebastien Buisson #define BH_LRU_SIZE	16
12011da177e4SLinus Torvalds 
12021da177e4SLinus Torvalds struct bh_lru {
12031da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12041da177e4SLinus Torvalds };
12051da177e4SLinus Torvalds 
12061da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds #ifdef CONFIG_SMP
12091da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12101da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12111da177e4SLinus Torvalds #else
12121da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12131da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12141da177e4SLinus Torvalds #endif
12151da177e4SLinus Torvalds 
12161da177e4SLinus Torvalds static inline void check_irqs_on(void)
12171da177e4SLinus Torvalds {
12181da177e4SLinus Torvalds #ifdef irqs_disabled
12191da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12201da177e4SLinus Torvalds #endif
12211da177e4SLinus Torvalds }
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds /*
1224241f01fbSEric Biggers  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1225241f01fbSEric Biggers  * inserted at the front, and the buffer_head at the back if any is evicted.
1226241f01fbSEric Biggers  * Or, if already in the LRU it is moved to the front.
12271da177e4SLinus Torvalds  */
12281da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12291da177e4SLinus Torvalds {
1230241f01fbSEric Biggers 	struct buffer_head *evictee = bh;
1231241f01fbSEric Biggers 	struct bh_lru *b;
1232241f01fbSEric Biggers 	int i;
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds 	check_irqs_on();
1235c0226eb8SMinchan Kim 	bh_lru_lock();
1236c0226eb8SMinchan Kim 
12378cc621d2SMinchan Kim 	/*
12388cc621d2SMinchan Kim 	 * the refcount of buffer_head in bh_lru prevents dropping the
12398cc621d2SMinchan Kim 	 * attached page(i.e., try_to_free_buffers) so it could cause
12408cc621d2SMinchan Kim 	 * failing page migration.
12418cc621d2SMinchan Kim 	 * Skip putting upcoming bh into bh_lru until migration is done.
12428cc621d2SMinchan Kim 	 */
1243c0226eb8SMinchan Kim 	if (lru_cache_disabled()) {
1244c0226eb8SMinchan Kim 		bh_lru_unlock();
12458cc621d2SMinchan Kim 		return;
1246c0226eb8SMinchan Kim 	}
1247241f01fbSEric Biggers 
1248241f01fbSEric Biggers 	b = this_cpu_ptr(&bh_lrus);
1249241f01fbSEric Biggers 	for (i = 0; i < BH_LRU_SIZE; i++) {
1250241f01fbSEric Biggers 		swap(evictee, b->bhs[i]);
1251241f01fbSEric Biggers 		if (evictee == bh) {
1252241f01fbSEric Biggers 			bh_lru_unlock();
1253241f01fbSEric Biggers 			return;
1254241f01fbSEric Biggers 		}
1255241f01fbSEric Biggers 	}
12561da177e4SLinus Torvalds 
12571da177e4SLinus Torvalds 	get_bh(bh);
12581da177e4SLinus Torvalds 	bh_lru_unlock();
1259241f01fbSEric Biggers 	brelse(evictee);
12601da177e4SLinus Torvalds }
12611da177e4SLinus Torvalds 
12621da177e4SLinus Torvalds /*
12631da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
12641da177e4SLinus Torvalds  */
1265858119e1SArjan van de Ven static struct buffer_head *
12663991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
12671da177e4SLinus Torvalds {
12681da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
12693991d3bdSTomasz Kvarsin 	unsigned int i;
12701da177e4SLinus Torvalds 
12711da177e4SLinus Torvalds 	check_irqs_on();
12721da177e4SLinus Torvalds 	bh_lru_lock();
12731da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
1274c7b92516SChristoph Lameter 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
12751da177e4SLinus Torvalds 
12769470dd5dSZach Brown 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
12779470dd5dSZach Brown 		    bh->b_size == size) {
12781da177e4SLinus Torvalds 			if (i) {
12791da177e4SLinus Torvalds 				while (i) {
1280c7b92516SChristoph Lameter 					__this_cpu_write(bh_lrus.bhs[i],
1281c7b92516SChristoph Lameter 						__this_cpu_read(bh_lrus.bhs[i - 1]));
12821da177e4SLinus Torvalds 					i--;
12831da177e4SLinus Torvalds 				}
1284c7b92516SChristoph Lameter 				__this_cpu_write(bh_lrus.bhs[0], bh);
12851da177e4SLinus Torvalds 			}
12861da177e4SLinus Torvalds 			get_bh(bh);
12871da177e4SLinus Torvalds 			ret = bh;
12881da177e4SLinus Torvalds 			break;
12891da177e4SLinus Torvalds 		}
12901da177e4SLinus Torvalds 	}
12911da177e4SLinus Torvalds 	bh_lru_unlock();
12921da177e4SLinus Torvalds 	return ret;
12931da177e4SLinus Torvalds }
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds /*
12961da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
12971da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
12981da177e4SLinus Torvalds  * NULL
12991da177e4SLinus Torvalds  */
13001da177e4SLinus Torvalds struct buffer_head *
13013991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13021da177e4SLinus Torvalds {
13031da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13041da177e4SLinus Torvalds 
13051da177e4SLinus Torvalds 	if (bh == NULL) {
13062457aec6SMel Gorman 		/* __find_get_block_slow will mark the page accessed */
1307385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13081da177e4SLinus Torvalds 		if (bh)
13091da177e4SLinus Torvalds 			bh_lru_install(bh);
13102457aec6SMel Gorman 	} else
13111da177e4SLinus Torvalds 		touch_buffer(bh);
13122457aec6SMel Gorman 
13131da177e4SLinus Torvalds 	return bh;
13141da177e4SLinus Torvalds }
13151da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13161da177e4SLinus Torvalds 
13171da177e4SLinus Torvalds /*
13183b5e6454SGioh Kim  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
13191da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13201da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13211da177e4SLinus Torvalds  *
13223b5e6454SGioh Kim  * __getblk_gfp() will lock up the machine if grow_dev_page's
13233b5e6454SGioh Kim  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
13241da177e4SLinus Torvalds  */
13251da177e4SLinus Torvalds struct buffer_head *
13263b5e6454SGioh Kim __getblk_gfp(struct block_device *bdev, sector_t block,
13273b5e6454SGioh Kim 	     unsigned size, gfp_t gfp)
13281da177e4SLinus Torvalds {
13291da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 	might_sleep();
13321da177e4SLinus Torvalds 	if (bh == NULL)
13333b5e6454SGioh Kim 		bh = __getblk_slow(bdev, block, size, gfp);
13341da177e4SLinus Torvalds 	return bh;
13351da177e4SLinus Torvalds }
13363b5e6454SGioh Kim EXPORT_SYMBOL(__getblk_gfp);
13371da177e4SLinus Torvalds 
13381da177e4SLinus Torvalds /*
13391da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13401da177e4SLinus Torvalds  */
13413991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13421da177e4SLinus Torvalds {
13431da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1344a3e713b5SAndrew Morton 	if (likely(bh)) {
13451420c4a5SBart Van Assche 		ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
13461da177e4SLinus Torvalds 		brelse(bh);
13471da177e4SLinus Torvalds 	}
1348a3e713b5SAndrew Morton }
13491da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13501da177e4SLinus Torvalds 
1351d87f6392SRoman Gushchin void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
1352d87f6392SRoman Gushchin 		      gfp_t gfp)
1353d87f6392SRoman Gushchin {
1354d87f6392SRoman Gushchin 	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1355d87f6392SRoman Gushchin 	if (likely(bh)) {
13561420c4a5SBart Van Assche 		ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
1357d87f6392SRoman Gushchin 		brelse(bh);
1358d87f6392SRoman Gushchin 	}
1359d87f6392SRoman Gushchin }
1360d87f6392SRoman Gushchin EXPORT_SYMBOL(__breadahead_gfp);
1361d87f6392SRoman Gushchin 
13621da177e4SLinus Torvalds /**
13633b5e6454SGioh Kim  *  __bread_gfp() - reads a specified block and returns the bh
136467be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13651da177e4SLinus Torvalds  *  @block: number of block
13661da177e4SLinus Torvalds  *  @size: size (in bytes) to read
13673b5e6454SGioh Kim  *  @gfp: page allocation flag
13681da177e4SLinus Torvalds  *
13691da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
13703b5e6454SGioh Kim  *  The page cache can be allocated from non-movable area
13713b5e6454SGioh Kim  *  not to prevent page migration if you set gfp to zero.
13721da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
13731da177e4SLinus Torvalds  */
13741da177e4SLinus Torvalds struct buffer_head *
13753b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block,
13763b5e6454SGioh Kim 		   unsigned size, gfp_t gfp)
13771da177e4SLinus Torvalds {
13783b5e6454SGioh Kim 	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
13791da177e4SLinus Torvalds 
1380a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
13811da177e4SLinus Torvalds 		bh = __bread_slow(bh);
13821da177e4SLinus Torvalds 	return bh;
13831da177e4SLinus Torvalds }
13843b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
13851da177e4SLinus Torvalds 
13868cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
13878cc621d2SMinchan Kim {
13888cc621d2SMinchan Kim 	int i;
13898cc621d2SMinchan Kim 
13908cc621d2SMinchan Kim 	for (i = 0; i < BH_LRU_SIZE; i++) {
13918cc621d2SMinchan Kim 		brelse(b->bhs[i]);
13928cc621d2SMinchan Kim 		b->bhs[i] = NULL;
13938cc621d2SMinchan Kim 	}
13948cc621d2SMinchan Kim }
13951da177e4SLinus Torvalds /*
13961da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
13971da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
13981da177e4SLinus Torvalds  * or with preempt disabled.
13991da177e4SLinus Torvalds  */
14001da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14011da177e4SLinus Torvalds {
14021da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14031da177e4SLinus Torvalds 
14048cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
14051da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14061da177e4SLinus Torvalds }
14071da177e4SLinus Torvalds 
14088cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
140942be35d0SGilad Ben-Yossef {
141042be35d0SGilad Ben-Yossef 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
141142be35d0SGilad Ben-Yossef 	int i;
141242be35d0SGilad Ben-Yossef 
141342be35d0SGilad Ben-Yossef 	for (i = 0; i < BH_LRU_SIZE; i++) {
141442be35d0SGilad Ben-Yossef 		if (b->bhs[i])
14151d706679SSaurav Girepunje 			return true;
141642be35d0SGilad Ben-Yossef 	}
141742be35d0SGilad Ben-Yossef 
14181d706679SSaurav Girepunje 	return false;
141942be35d0SGilad Ben-Yossef }
142042be35d0SGilad Ben-Yossef 
1421f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14221da177e4SLinus Torvalds {
1423cb923159SSebastian Andrzej Siewior 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
14241da177e4SLinus Torvalds }
14259db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14261da177e4SLinus Torvalds 
1427243418e3SMinchan Kim /*
1428243418e3SMinchan Kim  * It's called from workqueue context so we need a bh_lru_lock to close
1429243418e3SMinchan Kim  * the race with preemption/irq.
1430243418e3SMinchan Kim  */
1431243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
14328cc621d2SMinchan Kim {
14338cc621d2SMinchan Kim 	struct bh_lru *b;
14348cc621d2SMinchan Kim 
14358cc621d2SMinchan Kim 	bh_lru_lock();
1436243418e3SMinchan Kim 	b = this_cpu_ptr(&bh_lrus);
14378cc621d2SMinchan Kim 	__invalidate_bh_lrus(b);
14388cc621d2SMinchan Kim 	bh_lru_unlock();
14398cc621d2SMinchan Kim }
14408cc621d2SMinchan Kim 
14411da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14421da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14431da177e4SLinus Torvalds {
14441da177e4SLinus Torvalds 	bh->b_page = page;
1445e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14461da177e4SLinus Torvalds 	if (PageHighMem(page))
14471da177e4SLinus Torvalds 		/*
14481da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14491da177e4SLinus Torvalds 		 */
14501da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14511da177e4SLinus Torvalds 	else
14521da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14531da177e4SLinus Torvalds }
14541da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14551da177e4SLinus Torvalds 
14561da177e4SLinus Torvalds /*
14571da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14581da177e4SLinus Torvalds  */
1459e7470ee8SMel Gorman 
1460e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1461e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1462e7470ee8SMel Gorman 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1463e7470ee8SMel Gorman 	 1 << BH_Delay | 1 << BH_Unwritten)
1464e7470ee8SMel Gorman 
1465858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14661da177e4SLinus Torvalds {
1467e7470ee8SMel Gorman 	unsigned long b_state, b_state_old;
1468e7470ee8SMel Gorman 
14691da177e4SLinus Torvalds 	lock_buffer(bh);
14701da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14711da177e4SLinus Torvalds 	bh->b_bdev = NULL;
1472e7470ee8SMel Gorman 	b_state = bh->b_state;
1473e7470ee8SMel Gorman 	for (;;) {
1474e7470ee8SMel Gorman 		b_state_old = cmpxchg(&bh->b_state, b_state,
1475e7470ee8SMel Gorman 				      (b_state & ~BUFFER_FLAGS_DISCARD));
1476e7470ee8SMel Gorman 		if (b_state_old == b_state)
1477e7470ee8SMel Gorman 			break;
1478e7470ee8SMel Gorman 		b_state = b_state_old;
1479e7470ee8SMel Gorman 	}
14801da177e4SLinus Torvalds 	unlock_buffer(bh);
14811da177e4SLinus Torvalds }
14821da177e4SLinus Torvalds 
14831da177e4SLinus Torvalds /**
14847ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
14857ba13abbSMatthew Wilcox (Oracle)  * @folio: The folio which is affected.
1486d47992f8SLukas Czerner  * @offset: start of the range to invalidate
1487d47992f8SLukas Czerner  * @length: length of the range to invalidate
14881da177e4SLinus Torvalds  *
14897ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() is called when all or part of the folio has been
14901da177e4SLinus Torvalds  * invalidated by a truncate operation.
14911da177e4SLinus Torvalds  *
14927ba13abbSMatthew Wilcox (Oracle)  * block_invalidate_folio() does not have to release all buffers, but it must
14931da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14941da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14951da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14961da177e4SLinus Torvalds  * blocks on-disk.
14971da177e4SLinus Torvalds  */
14987ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
14991da177e4SLinus Torvalds {
15001da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
15017ba13abbSMatthew Wilcox (Oracle) 	size_t curr_off = 0;
15027ba13abbSMatthew Wilcox (Oracle) 	size_t stop = length + offset;
15031da177e4SLinus Torvalds 
15047ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
15051da177e4SLinus Torvalds 
1506d47992f8SLukas Czerner 	/*
1507d47992f8SLukas Czerner 	 * Check for overflow
1508d47992f8SLukas Czerner 	 */
15097ba13abbSMatthew Wilcox (Oracle) 	BUG_ON(stop > folio_size(folio) || stop < length);
1510d47992f8SLukas Czerner 
15117ba13abbSMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
15127ba13abbSMatthew Wilcox (Oracle) 	if (!head)
15137ba13abbSMatthew Wilcox (Oracle) 		return;
15147ba13abbSMatthew Wilcox (Oracle) 
15151da177e4SLinus Torvalds 	bh = head;
15161da177e4SLinus Torvalds 	do {
15177ba13abbSMatthew Wilcox (Oracle) 		size_t next_off = curr_off + bh->b_size;
15181da177e4SLinus Torvalds 		next = bh->b_this_page;
15191da177e4SLinus Torvalds 
15201da177e4SLinus Torvalds 		/*
1521d47992f8SLukas Czerner 		 * Are we still fully in range ?
1522d47992f8SLukas Czerner 		 */
1523d47992f8SLukas Czerner 		if (next_off > stop)
1524d47992f8SLukas Czerner 			goto out;
1525d47992f8SLukas Czerner 
1526d47992f8SLukas Czerner 		/*
15271da177e4SLinus Torvalds 		 * is this block fully invalidated?
15281da177e4SLinus Torvalds 		 */
15291da177e4SLinus Torvalds 		if (offset <= curr_off)
15301da177e4SLinus Torvalds 			discard_buffer(bh);
15311da177e4SLinus Torvalds 		curr_off = next_off;
15321da177e4SLinus Torvalds 		bh = next;
15331da177e4SLinus Torvalds 	} while (bh != head);
15341da177e4SLinus Torvalds 
15351da177e4SLinus Torvalds 	/*
15367ba13abbSMatthew Wilcox (Oracle) 	 * We release buffers only if the entire folio is being invalidated.
15371da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15381da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15391da177e4SLinus Torvalds 	 */
15407ba13abbSMatthew Wilcox (Oracle) 	if (length == folio_size(folio))
15417ba13abbSMatthew Wilcox (Oracle) 		filemap_release_folio(folio, 0);
15421da177e4SLinus Torvalds out:
15432ff28e22SNeilBrown 	return;
15441da177e4SLinus Torvalds }
15457ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
15461da177e4SLinus Torvalds 
1547d47992f8SLukas Czerner 
15481da177e4SLinus Torvalds /*
15491da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
1550e621900aSMatthew Wilcox (Oracle)  * block_dirty_folio() via private_lock.  try_to_free_buffers
15511da177e4SLinus Torvalds  * is already excluded via the page lock.
15521da177e4SLinus Torvalds  */
15531da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15541da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15551da177e4SLinus Torvalds {
15561da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15571da177e4SLinus Torvalds 
1558640ab98fSJens Axboe 	head = alloc_page_buffers(page, blocksize, true);
15591da177e4SLinus Torvalds 	bh = head;
15601da177e4SLinus Torvalds 	do {
15611da177e4SLinus Torvalds 		bh->b_state |= b_state;
15621da177e4SLinus Torvalds 		tail = bh;
15631da177e4SLinus Torvalds 		bh = bh->b_this_page;
15641da177e4SLinus Torvalds 	} while (bh);
15651da177e4SLinus Torvalds 	tail->b_this_page = head;
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15681da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15691da177e4SLinus Torvalds 		bh = head;
15701da177e4SLinus Torvalds 		do {
15711da177e4SLinus Torvalds 			if (PageDirty(page))
15721da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15731da177e4SLinus Torvalds 			if (PageUptodate(page))
15741da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15751da177e4SLinus Torvalds 			bh = bh->b_this_page;
15761da177e4SLinus Torvalds 		} while (bh != head);
15771da177e4SLinus Torvalds 	}
157845dcfc27SGuoqing Jiang 	attach_page_private(page, head);
15791da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15801da177e4SLinus Torvalds }
15811da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15821da177e4SLinus Torvalds 
158329f3ad7dSJan Kara /**
158429f3ad7dSJan Kara  * clean_bdev_aliases: clean a range of buffers in block device
158529f3ad7dSJan Kara  * @bdev: Block device to clean buffers in
158629f3ad7dSJan Kara  * @block: Start of a range of blocks to clean
158729f3ad7dSJan Kara  * @len: Number of blocks to clean
15881da177e4SLinus Torvalds  *
158929f3ad7dSJan Kara  * We are taking a range of blocks for data and we don't want writeback of any
159029f3ad7dSJan Kara  * buffer-cache aliases starting from return from this function and until the
159129f3ad7dSJan Kara  * moment when something will explicitly mark the buffer dirty (hopefully that
159229f3ad7dSJan Kara  * will not happen until we will free that block ;-) We don't even need to mark
159329f3ad7dSJan Kara  * it not-uptodate - nobody can expect anything from a newly allocated buffer
159429f3ad7dSJan Kara  * anyway. We used to use unmap_buffer() for such invalidation, but that was
159529f3ad7dSJan Kara  * wrong. We definitely don't want to mark the alias unmapped, for example - it
159629f3ad7dSJan Kara  * would confuse anyone who might pick it with bread() afterwards...
159729f3ad7dSJan Kara  *
159829f3ad7dSJan Kara  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
159929f3ad7dSJan Kara  * writeout I/O going on against recently-freed buffers.  We don't wait on that
160029f3ad7dSJan Kara  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
160129f3ad7dSJan Kara  * need to.  That happens here.
16021da177e4SLinus Torvalds  */
160329f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
16041da177e4SLinus Torvalds {
160529f3ad7dSJan Kara 	struct inode *bd_inode = bdev->bd_inode;
160629f3ad7dSJan Kara 	struct address_space *bd_mapping = bd_inode->i_mapping;
16079e0b6f31SMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
160829f3ad7dSJan Kara 	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
160929f3ad7dSJan Kara 	pgoff_t end;
1610c10f778dSJan Kara 	int i, count;
161129f3ad7dSJan Kara 	struct buffer_head *bh;
161229f3ad7dSJan Kara 	struct buffer_head *head;
16131da177e4SLinus Torvalds 
161429f3ad7dSJan Kara 	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
16159e0b6f31SMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
16169e0b6f31SMatthew Wilcox (Oracle) 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
16179e0b6f31SMatthew Wilcox (Oracle) 		count = folio_batch_count(&fbatch);
1618c10f778dSJan Kara 		for (i = 0; i < count; i++) {
16199e0b6f31SMatthew Wilcox (Oracle) 			struct folio *folio = fbatch.folios[i];
16201da177e4SLinus Torvalds 
16219e0b6f31SMatthew Wilcox (Oracle) 			if (!folio_buffers(folio))
162229f3ad7dSJan Kara 				continue;
162329f3ad7dSJan Kara 			/*
16249e0b6f31SMatthew Wilcox (Oracle) 			 * We use folio lock instead of bd_mapping->private_lock
162529f3ad7dSJan Kara 			 * to pin buffers here since we can afford to sleep and
162629f3ad7dSJan Kara 			 * it scales better than a global spinlock lock.
162729f3ad7dSJan Kara 			 */
16289e0b6f31SMatthew Wilcox (Oracle) 			folio_lock(folio);
16299e0b6f31SMatthew Wilcox (Oracle) 			/* Recheck when the folio is locked which pins bhs */
16309e0b6f31SMatthew Wilcox (Oracle) 			head = folio_buffers(folio);
16319e0b6f31SMatthew Wilcox (Oracle) 			if (!head)
163229f3ad7dSJan Kara 				goto unlock_page;
163329f3ad7dSJan Kara 			bh = head;
163429f3ad7dSJan Kara 			do {
16356c006a9dSChandan Rajendra 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
163629f3ad7dSJan Kara 					goto next;
163729f3ad7dSJan Kara 				if (bh->b_blocknr >= block + len)
163829f3ad7dSJan Kara 					break;
163929f3ad7dSJan Kara 				clear_buffer_dirty(bh);
164029f3ad7dSJan Kara 				wait_on_buffer(bh);
164129f3ad7dSJan Kara 				clear_buffer_req(bh);
164229f3ad7dSJan Kara next:
164329f3ad7dSJan Kara 				bh = bh->b_this_page;
164429f3ad7dSJan Kara 			} while (bh != head);
164529f3ad7dSJan Kara unlock_page:
16469e0b6f31SMatthew Wilcox (Oracle) 			folio_unlock(folio);
164729f3ad7dSJan Kara 		}
16489e0b6f31SMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
164929f3ad7dSJan Kara 		cond_resched();
1650c10f778dSJan Kara 		/* End of range already reached? */
1651c10f778dSJan Kara 		if (index > end || !index)
1652c10f778dSJan Kara 			break;
16531da177e4SLinus Torvalds 	}
16541da177e4SLinus Torvalds }
165529f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
16561da177e4SLinus Torvalds 
16571da177e4SLinus Torvalds /*
165845bce8f3SLinus Torvalds  * Size is a power-of-two in the range 512..PAGE_SIZE,
165945bce8f3SLinus Torvalds  * and the case we care about most is PAGE_SIZE.
166045bce8f3SLinus Torvalds  *
166145bce8f3SLinus Torvalds  * So this *could* possibly be written with those
166245bce8f3SLinus Torvalds  * constraints in mind (relevant mostly if some
166345bce8f3SLinus Torvalds  * architecture has a slow bit-scan instruction)
166445bce8f3SLinus Torvalds  */
166545bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize)
166645bce8f3SLinus Torvalds {
166745bce8f3SLinus Torvalds 	return ilog2(blocksize);
166845bce8f3SLinus Torvalds }
166945bce8f3SLinus Torvalds 
167045bce8f3SLinus Torvalds static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
167145bce8f3SLinus Torvalds {
167245bce8f3SLinus Torvalds 	BUG_ON(!PageLocked(page));
167345bce8f3SLinus Torvalds 
167445bce8f3SLinus Torvalds 	if (!page_has_buffers(page))
16756aa7de05SMark Rutland 		create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
16766aa7de05SMark Rutland 				     b_state);
167745bce8f3SLinus Torvalds 	return page_buffers(page);
167845bce8f3SLinus Torvalds }
167945bce8f3SLinus Torvalds 
168045bce8f3SLinus Torvalds /*
16811da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
16821da177e4SLinus Torvalds  *
16831da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
16841da177e4SLinus Torvalds  *
16851da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
16861da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
16871da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
16881da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
16891da177e4SLinus Torvalds  *
16901da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
16911da177e4SLinus Torvalds  */
16921da177e4SLinus Torvalds 
16931da177e4SLinus Torvalds /*
16941da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16951da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16961da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
16971da177e4SLinus Torvalds  * state inside lock_buffer().
16981da177e4SLinus Torvalds  *
16991da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
17001da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
17011da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
17021da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
17031da177e4SLinus Torvalds  * prevents this contention from occurring.
17046e34eeddSTheodore Ts'o  *
17056e34eeddSTheodore Ts'o  * If block_write_full_page() is called with wbc->sync_mode ==
170670fd7614SChristoph Hellwig  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1707721a9602SJens Axboe  * causes the writes to be flagged as synchronous writes.
17081da177e4SLinus Torvalds  */
1709b4bba389SBenjamin Marzinski int __block_write_full_page(struct inode *inode, struct page *page,
171035c80d5fSChris Mason 			get_block_t *get_block, struct writeback_control *wbc,
171135c80d5fSChris Mason 			bh_end_io_t *handler)
17121da177e4SLinus Torvalds {
17131da177e4SLinus Torvalds 	int err;
17141da177e4SLinus Torvalds 	sector_t block;
17151da177e4SLinus Torvalds 	sector_t last_block;
1716f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
171745bce8f3SLinus Torvalds 	unsigned int blocksize, bbits;
17181da177e4SLinus Torvalds 	int nr_underway = 0;
17193ae72869SBart Van Assche 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
17201da177e4SLinus Torvalds 
172145bce8f3SLinus Torvalds 	head = create_page_buffers(page, inode,
17221da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
17231da177e4SLinus Torvalds 
17241da177e4SLinus Torvalds 	/*
1725e621900aSMatthew Wilcox (Oracle) 	 * Be very careful.  We have no exclusion from block_dirty_folio
17261da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
17271da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
17281da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
17291da177e4SLinus Torvalds 	 *
1730e621900aSMatthew Wilcox (Oracle) 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
17311da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
17321da177e4SLinus Torvalds 	 */
17331da177e4SLinus Torvalds 
17341da177e4SLinus Torvalds 	bh = head;
173545bce8f3SLinus Torvalds 	blocksize = bh->b_size;
173645bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
173745bce8f3SLinus Torvalds 
173809cbfeafSKirill A. Shutemov 	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
173945bce8f3SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> bbits;
17401da177e4SLinus Torvalds 
17411da177e4SLinus Torvalds 	/*
17421da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
17431da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
17441da177e4SLinus Torvalds 	 */
17451da177e4SLinus Torvalds 	do {
17461da177e4SLinus Torvalds 		if (block > last_block) {
17471da177e4SLinus Torvalds 			/*
17481da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
17491da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
17501da177e4SLinus Torvalds 			 * truncate in progress.
17511da177e4SLinus Torvalds 			 */
17521da177e4SLinus Torvalds 			/*
17531da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
17541da177e4SLinus Torvalds 			 */
17551da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17561da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
175729a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
175829a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1759b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
17601da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17611da177e4SLinus Torvalds 			if (err)
17621da177e4SLinus Torvalds 				goto recover;
176329a814d2SAlex Tomas 			clear_buffer_delay(bh);
17641da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17651da177e4SLinus Torvalds 				/* blockdev mappings never come here */
17661da177e4SLinus Torvalds 				clear_buffer_new(bh);
1767e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
17681da177e4SLinus Torvalds 			}
17691da177e4SLinus Torvalds 		}
17701da177e4SLinus Torvalds 		bh = bh->b_this_page;
17711da177e4SLinus Torvalds 		block++;
17721da177e4SLinus Torvalds 	} while (bh != head);
17731da177e4SLinus Torvalds 
17741da177e4SLinus Torvalds 	do {
17751da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
17761da177e4SLinus Torvalds 			continue;
17771da177e4SLinus Torvalds 		/*
17781da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
17791da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
17805b0830cbSJens Axboe 		 * potentially cause a busy-wait loop from writeback threads
17815b0830cbSJens Axboe 		 * and kswapd activity, but those code paths have their own
17825b0830cbSJens Axboe 		 * higher-level throttling.
17831da177e4SLinus Torvalds 		 */
17841b430beeSWu Fengguang 		if (wbc->sync_mode != WB_SYNC_NONE) {
17851da177e4SLinus Torvalds 			lock_buffer(bh);
1786ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
17871da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
17881da177e4SLinus Torvalds 			continue;
17891da177e4SLinus Torvalds 		}
17901da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
179135c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
17921da177e4SLinus Torvalds 		} else {
17931da177e4SLinus Torvalds 			unlock_buffer(bh);
17941da177e4SLinus Torvalds 		}
17951da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17961da177e4SLinus Torvalds 
17971da177e4SLinus Torvalds 	/*
17981da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17991da177e4SLinus Torvalds 	 * drop the bh refcounts early.
18001da177e4SLinus Torvalds 	 */
18011da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18021da177e4SLinus Torvalds 	set_page_writeback(page);
18031da177e4SLinus Torvalds 
18041da177e4SLinus Torvalds 	do {
18051da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18061da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18071420c4a5SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
18081da177e4SLinus Torvalds 			nr_underway++;
1809ad576e63SNick Piggin 		}
18101da177e4SLinus Torvalds 		bh = next;
18111da177e4SLinus Torvalds 	} while (bh != head);
181205937baaSAndrew Morton 	unlock_page(page);
18131da177e4SLinus Torvalds 
18141da177e4SLinus Torvalds 	err = 0;
18151da177e4SLinus Torvalds done:
18161da177e4SLinus Torvalds 	if (nr_underway == 0) {
18171da177e4SLinus Torvalds 		/*
18181da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
18191da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
18201da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
18211da177e4SLinus Torvalds 		 */
18221da177e4SLinus Torvalds 		end_page_writeback(page);
18233d67f2d7SNick Piggin 
18241da177e4SLinus Torvalds 		/*
18251da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
18261da177e4SLinus Torvalds 		 * here on.
18271da177e4SLinus Torvalds 		 */
18281da177e4SLinus Torvalds 	}
18291da177e4SLinus Torvalds 	return err;
18301da177e4SLinus Torvalds 
18311da177e4SLinus Torvalds recover:
18321da177e4SLinus Torvalds 	/*
18331da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
18341da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
18351da177e4SLinus Torvalds 	 * exposing stale data.
18361da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
18371da177e4SLinus Torvalds 	 */
18381da177e4SLinus Torvalds 	bh = head;
18391da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
18401da177e4SLinus Torvalds 	do {
184129a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
184229a814d2SAlex Tomas 		    !buffer_delay(bh)) {
18431da177e4SLinus Torvalds 			lock_buffer(bh);
184435c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
18451da177e4SLinus Torvalds 		} else {
18461da177e4SLinus Torvalds 			/*
18471da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
18481da177e4SLinus Torvalds 			 * attachment to a dirty page.
18491da177e4SLinus Torvalds 			 */
18501da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18511da177e4SLinus Torvalds 		}
18521da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18531da177e4SLinus Torvalds 	SetPageError(page);
18541da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18557e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
18561da177e4SLinus Torvalds 	set_page_writeback(page);
18571da177e4SLinus Torvalds 	do {
18581da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18591da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18601da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18611420c4a5SBart Van Assche 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
18621da177e4SLinus Torvalds 			nr_underway++;
1863ad576e63SNick Piggin 		}
18641da177e4SLinus Torvalds 		bh = next;
18651da177e4SLinus Torvalds 	} while (bh != head);
1866ffda9d30SNick Piggin 	unlock_page(page);
18671da177e4SLinus Torvalds 	goto done;
18681da177e4SLinus Torvalds }
1869b4bba389SBenjamin Marzinski EXPORT_SYMBOL(__block_write_full_page);
18701da177e4SLinus Torvalds 
1871afddba49SNick Piggin /*
1872afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1873afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1874afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1875afddba49SNick Piggin  */
1876afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1877afddba49SNick Piggin {
1878afddba49SNick Piggin 	unsigned int block_start, block_end;
1879afddba49SNick Piggin 	struct buffer_head *head, *bh;
1880afddba49SNick Piggin 
1881afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1882afddba49SNick Piggin 	if (!page_has_buffers(page))
1883afddba49SNick Piggin 		return;
1884afddba49SNick Piggin 
1885afddba49SNick Piggin 	bh = head = page_buffers(page);
1886afddba49SNick Piggin 	block_start = 0;
1887afddba49SNick Piggin 	do {
1888afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1889afddba49SNick Piggin 
1890afddba49SNick Piggin 		if (buffer_new(bh)) {
1891afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1892afddba49SNick Piggin 				if (!PageUptodate(page)) {
1893afddba49SNick Piggin 					unsigned start, size;
1894afddba49SNick Piggin 
1895afddba49SNick Piggin 					start = max(from, block_start);
1896afddba49SNick Piggin 					size = min(to, block_end) - start;
1897afddba49SNick Piggin 
1898eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1899afddba49SNick Piggin 					set_buffer_uptodate(bh);
1900afddba49SNick Piggin 				}
1901afddba49SNick Piggin 
1902afddba49SNick Piggin 				clear_buffer_new(bh);
1903afddba49SNick Piggin 				mark_buffer_dirty(bh);
1904afddba49SNick Piggin 			}
1905afddba49SNick Piggin 		}
1906afddba49SNick Piggin 
1907afddba49SNick Piggin 		block_start = block_end;
1908afddba49SNick Piggin 		bh = bh->b_this_page;
1909afddba49SNick Piggin 	} while (bh != head);
1910afddba49SNick Piggin }
1911afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1912afddba49SNick Piggin 
1913ae259a9cSChristoph Hellwig static void
1914ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
19156d49cc85SChristoph Hellwig 		const struct iomap *iomap)
1916ae259a9cSChristoph Hellwig {
1917ae259a9cSChristoph Hellwig 	loff_t offset = block << inode->i_blkbits;
1918ae259a9cSChristoph Hellwig 
1919ae259a9cSChristoph Hellwig 	bh->b_bdev = iomap->bdev;
1920ae259a9cSChristoph Hellwig 
1921ae259a9cSChristoph Hellwig 	/*
1922ae259a9cSChristoph Hellwig 	 * Block points to offset in file we need to map, iomap contains
1923ae259a9cSChristoph Hellwig 	 * the offset at which the map starts. If the map ends before the
1924ae259a9cSChristoph Hellwig 	 * current block, then do not map the buffer and let the caller
1925ae259a9cSChristoph Hellwig 	 * handle it.
1926ae259a9cSChristoph Hellwig 	 */
1927ae259a9cSChristoph Hellwig 	BUG_ON(offset >= iomap->offset + iomap->length);
1928ae259a9cSChristoph Hellwig 
1929ae259a9cSChristoph Hellwig 	switch (iomap->type) {
1930ae259a9cSChristoph Hellwig 	case IOMAP_HOLE:
1931ae259a9cSChristoph Hellwig 		/*
1932ae259a9cSChristoph Hellwig 		 * If the buffer is not up to date or beyond the current EOF,
1933ae259a9cSChristoph Hellwig 		 * we need to mark it as new to ensure sub-block zeroing is
1934ae259a9cSChristoph Hellwig 		 * executed if necessary.
1935ae259a9cSChristoph Hellwig 		 */
1936ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
1937ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
1938ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
1939ae259a9cSChristoph Hellwig 		break;
1940ae259a9cSChristoph Hellwig 	case IOMAP_DELALLOC:
1941ae259a9cSChristoph Hellwig 		if (!buffer_uptodate(bh) ||
1942ae259a9cSChristoph Hellwig 		    (offset >= i_size_read(inode)))
1943ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
1944ae259a9cSChristoph Hellwig 		set_buffer_uptodate(bh);
1945ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
1946ae259a9cSChristoph Hellwig 		set_buffer_delay(bh);
1947ae259a9cSChristoph Hellwig 		break;
1948ae259a9cSChristoph Hellwig 	case IOMAP_UNWRITTEN:
1949ae259a9cSChristoph Hellwig 		/*
19503d7b6b21SAndreas Gruenbacher 		 * For unwritten regions, we always need to ensure that regions
19513d7b6b21SAndreas Gruenbacher 		 * in the block we are not writing to are zeroed. Mark the
19523d7b6b21SAndreas Gruenbacher 		 * buffer as new to ensure this.
1953ae259a9cSChristoph Hellwig 		 */
1954ae259a9cSChristoph Hellwig 		set_buffer_new(bh);
1955ae259a9cSChristoph Hellwig 		set_buffer_unwritten(bh);
1956df561f66SGustavo A. R. Silva 		fallthrough;
1957ae259a9cSChristoph Hellwig 	case IOMAP_MAPPED:
19583d7b6b21SAndreas Gruenbacher 		if ((iomap->flags & IOMAP_F_NEW) ||
19593d7b6b21SAndreas Gruenbacher 		    offset >= i_size_read(inode))
1960ae259a9cSChristoph Hellwig 			set_buffer_new(bh);
196119fe5f64SAndreas Gruenbacher 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
196219fe5f64SAndreas Gruenbacher 				inode->i_blkbits;
1963ae259a9cSChristoph Hellwig 		set_buffer_mapped(bh);
1964ae259a9cSChristoph Hellwig 		break;
1965ae259a9cSChristoph Hellwig 	}
1966ae259a9cSChristoph Hellwig }
1967ae259a9cSChristoph Hellwig 
1968d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
19696d49cc85SChristoph Hellwig 		get_block_t *get_block, const struct iomap *iomap)
19701da177e4SLinus Torvalds {
197109cbfeafSKirill A. Shutemov 	unsigned from = pos & (PAGE_SIZE - 1);
1972ebdec241SChristoph Hellwig 	unsigned to = from + len;
1973d1bd0b4eSMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
19741da177e4SLinus Torvalds 	unsigned block_start, block_end;
19751da177e4SLinus Torvalds 	sector_t block;
19761da177e4SLinus Torvalds 	int err = 0;
19771da177e4SLinus Torvalds 	unsigned blocksize, bbits;
19781da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
19791da177e4SLinus Torvalds 
1980d1bd0b4eSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
198109cbfeafSKirill A. Shutemov 	BUG_ON(from > PAGE_SIZE);
198209cbfeafSKirill A. Shutemov 	BUG_ON(to > PAGE_SIZE);
19831da177e4SLinus Torvalds 	BUG_ON(from > to);
19841da177e4SLinus Torvalds 
1985d1bd0b4eSMatthew Wilcox (Oracle) 	head = create_page_buffers(&folio->page, inode, 0);
198645bce8f3SLinus Torvalds 	blocksize = head->b_size;
198745bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
19881da177e4SLinus Torvalds 
1989d1bd0b4eSMatthew Wilcox (Oracle) 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
19901da177e4SLinus Torvalds 
19911da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
19921da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
19931da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19941da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
1995d1bd0b4eSMatthew Wilcox (Oracle) 			if (folio_test_uptodate(folio)) {
19961da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
19971da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19981da177e4SLinus Torvalds 			}
19991da177e4SLinus Torvalds 			continue;
20001da177e4SLinus Torvalds 		}
20011da177e4SLinus Torvalds 		if (buffer_new(bh))
20021da177e4SLinus Torvalds 			clear_buffer_new(bh);
20031da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2004b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
2005ae259a9cSChristoph Hellwig 			if (get_block) {
20061da177e4SLinus Torvalds 				err = get_block(inode, block, bh, 1);
20071da177e4SLinus Torvalds 				if (err)
2008f3ddbdc6SNick Piggin 					break;
2009ae259a9cSChristoph Hellwig 			} else {
2010ae259a9cSChristoph Hellwig 				iomap_to_bh(inode, block, bh, iomap);
2011ae259a9cSChristoph Hellwig 			}
2012ae259a9cSChristoph Hellwig 
20131da177e4SLinus Torvalds 			if (buffer_new(bh)) {
2014e64855c6SJan Kara 				clean_bdev_bh_alias(bh);
2015d1bd0b4eSMatthew Wilcox (Oracle) 				if (folio_test_uptodate(folio)) {
2016637aff46SNick Piggin 					clear_buffer_new(bh);
20171da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
2018637aff46SNick Piggin 					mark_buffer_dirty(bh);
20191da177e4SLinus Torvalds 					continue;
20201da177e4SLinus Torvalds 				}
2021eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
2022d1bd0b4eSMatthew Wilcox (Oracle) 					folio_zero_segments(folio,
2023eebd2aa3SChristoph Lameter 						to, block_end,
2024eebd2aa3SChristoph Lameter 						block_start, from);
20251da177e4SLinus Torvalds 				continue;
20261da177e4SLinus Torvalds 			}
20271da177e4SLinus Torvalds 		}
2028d1bd0b4eSMatthew Wilcox (Oracle) 		if (folio_test_uptodate(folio)) {
20291da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
20301da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
20311da177e4SLinus Torvalds 			continue;
20321da177e4SLinus Torvalds 		}
20331da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
203433a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
20351da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
20361420c4a5SBart Van Assche 			ll_rw_block(REQ_OP_READ, 1, &bh);
20371da177e4SLinus Torvalds 			*wait_bh++=bh;
20381da177e4SLinus Torvalds 		}
20391da177e4SLinus Torvalds 	}
20401da177e4SLinus Torvalds 	/*
20411da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
20421da177e4SLinus Torvalds 	 */
20431da177e4SLinus Torvalds 	while(wait_bh > wait) {
20441da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
20451da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
2046f3ddbdc6SNick Piggin 			err = -EIO;
20471da177e4SLinus Torvalds 	}
2048f9f07b6cSJan Kara 	if (unlikely(err))
2049d1bd0b4eSMatthew Wilcox (Oracle) 		page_zero_new_buffers(&folio->page, from, to);
20501da177e4SLinus Torvalds 	return err;
20511da177e4SLinus Torvalds }
2052ae259a9cSChristoph Hellwig 
2053ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2054ae259a9cSChristoph Hellwig 		get_block_t *get_block)
2055ae259a9cSChristoph Hellwig {
2056d1bd0b4eSMatthew Wilcox (Oracle) 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2057d1bd0b4eSMatthew Wilcox (Oracle) 				       NULL);
2058ae259a9cSChristoph Hellwig }
2059ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
20601da177e4SLinus Torvalds 
20611da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
20621da177e4SLinus Torvalds 		unsigned from, unsigned to)
20631da177e4SLinus Torvalds {
20641da177e4SLinus Torvalds 	unsigned block_start, block_end;
20651da177e4SLinus Torvalds 	int partial = 0;
20661da177e4SLinus Torvalds 	unsigned blocksize;
20671da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
20681da177e4SLinus Torvalds 
206945bce8f3SLinus Torvalds 	bh = head = page_buffers(page);
207045bce8f3SLinus Torvalds 	blocksize = bh->b_size;
20711da177e4SLinus Torvalds 
207245bce8f3SLinus Torvalds 	block_start = 0;
207345bce8f3SLinus Torvalds 	do {
20741da177e4SLinus Torvalds 		block_end = block_start + blocksize;
20751da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
20761da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
20771da177e4SLinus Torvalds 				partial = 1;
20781da177e4SLinus Torvalds 		} else {
20791da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
20801da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
20811da177e4SLinus Torvalds 		}
20824ebd3aecSYang Guo 		if (buffer_new(bh))
2083afddba49SNick Piggin 			clear_buffer_new(bh);
208445bce8f3SLinus Torvalds 
208545bce8f3SLinus Torvalds 		block_start = block_end;
208645bce8f3SLinus Torvalds 		bh = bh->b_this_page;
208745bce8f3SLinus Torvalds 	} while (bh != head);
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds 	/*
20901da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
20912c69e205SMatthew Wilcox (Oracle) 	 * uptodate then we can optimize away a bogus read_folio() for
20921da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
20931da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
20941da177e4SLinus Torvalds 	 */
20951da177e4SLinus Torvalds 	if (!partial)
20961da177e4SLinus Torvalds 		SetPageUptodate(page);
20971da177e4SLinus Torvalds 	return 0;
20981da177e4SLinus Torvalds }
20991da177e4SLinus Torvalds 
21001da177e4SLinus Torvalds /*
2101155130a4SChristoph Hellwig  * block_write_begin takes care of the basic task of block allocation and
2102155130a4SChristoph Hellwig  * bringing partial write blocks uptodate first.
2103155130a4SChristoph Hellwig  *
21047bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
2105afddba49SNick Piggin  */
2106155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2107b3992d1eSMatthew Wilcox (Oracle) 		struct page **pagep, get_block_t *get_block)
2108afddba49SNick Piggin {
210909cbfeafSKirill A. Shutemov 	pgoff_t index = pos >> PAGE_SHIFT;
2110afddba49SNick Piggin 	struct page *page;
21116e1db88dSChristoph Hellwig 	int status;
2112afddba49SNick Piggin 
2113b7446e7cSMatthew Wilcox (Oracle) 	page = grab_cache_page_write_begin(mapping, index);
21146e1db88dSChristoph Hellwig 	if (!page)
21156e1db88dSChristoph Hellwig 		return -ENOMEM;
2116afddba49SNick Piggin 
21176e1db88dSChristoph Hellwig 	status = __block_write_begin(page, pos, len, get_block);
2118afddba49SNick Piggin 	if (unlikely(status)) {
2119afddba49SNick Piggin 		unlock_page(page);
212009cbfeafSKirill A. Shutemov 		put_page(page);
21216e1db88dSChristoph Hellwig 		page = NULL;
2122afddba49SNick Piggin 	}
2123afddba49SNick Piggin 
21246e1db88dSChristoph Hellwig 	*pagep = page;
2125afddba49SNick Piggin 	return status;
2126afddba49SNick Piggin }
2127afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2128afddba49SNick Piggin 
2129afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2130afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2131afddba49SNick Piggin 			struct page *page, void *fsdata)
2132afddba49SNick Piggin {
2133afddba49SNick Piggin 	struct inode *inode = mapping->host;
2134afddba49SNick Piggin 	unsigned start;
2135afddba49SNick Piggin 
213609cbfeafSKirill A. Shutemov 	start = pos & (PAGE_SIZE - 1);
2137afddba49SNick Piggin 
2138afddba49SNick Piggin 	if (unlikely(copied < len)) {
2139afddba49SNick Piggin 		/*
21402c69e205SMatthew Wilcox (Oracle) 		 * The buffers that were written will now be uptodate, so
21412c69e205SMatthew Wilcox (Oracle) 		 * we don't have to worry about a read_folio reading them
21422c69e205SMatthew Wilcox (Oracle) 		 * and overwriting a partial write. However if we have
21432c69e205SMatthew Wilcox (Oracle) 		 * encountered a short write and only partially written
21442c69e205SMatthew Wilcox (Oracle) 		 * into a buffer, it will not be marked uptodate, so a
21452c69e205SMatthew Wilcox (Oracle) 		 * read_folio might come in and destroy our partial write.
2146afddba49SNick Piggin 		 *
2147afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2148afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2149afddba49SNick Piggin 		 * caller to redo the whole thing.
2150afddba49SNick Piggin 		 */
2151afddba49SNick Piggin 		if (!PageUptodate(page))
2152afddba49SNick Piggin 			copied = 0;
2153afddba49SNick Piggin 
2154afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2155afddba49SNick Piggin 	}
2156afddba49SNick Piggin 	flush_dcache_page(page);
2157afddba49SNick Piggin 
2158afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2159afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2160afddba49SNick Piggin 
2161afddba49SNick Piggin 	return copied;
2162afddba49SNick Piggin }
2163afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2164afddba49SNick Piggin 
2165afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2166afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2167afddba49SNick Piggin 			struct page *page, void *fsdata)
2168afddba49SNick Piggin {
21698af54f29SChristoph Hellwig 	struct inode *inode = mapping->host;
21708af54f29SChristoph Hellwig 	loff_t old_size = inode->i_size;
21718af54f29SChristoph Hellwig 	bool i_size_changed = false;
21728af54f29SChristoph Hellwig 
2173afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
21748af54f29SChristoph Hellwig 
21758af54f29SChristoph Hellwig 	/*
21768af54f29SChristoph Hellwig 	 * No need to use i_size_read() here, the i_size cannot change under us
21778af54f29SChristoph Hellwig 	 * because we hold i_rwsem.
21788af54f29SChristoph Hellwig 	 *
21798af54f29SChristoph Hellwig 	 * But it's important to update i_size while still holding page lock:
21808af54f29SChristoph Hellwig 	 * page writeout could otherwise come in and zero beyond i_size.
21818af54f29SChristoph Hellwig 	 */
21828af54f29SChristoph Hellwig 	if (pos + copied > inode->i_size) {
21838af54f29SChristoph Hellwig 		i_size_write(inode, pos + copied);
21848af54f29SChristoph Hellwig 		i_size_changed = true;
21858af54f29SChristoph Hellwig 	}
21868af54f29SChristoph Hellwig 
21878af54f29SChristoph Hellwig 	unlock_page(page);
21887a77dad7SAndreas Gruenbacher 	put_page(page);
21898af54f29SChristoph Hellwig 
21908af54f29SChristoph Hellwig 	if (old_size < pos)
21918af54f29SChristoph Hellwig 		pagecache_isize_extended(inode, old_size, pos);
21928af54f29SChristoph Hellwig 	/*
21938af54f29SChristoph Hellwig 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
21948af54f29SChristoph Hellwig 	 * makes the holding time of page lock longer. Second, it forces lock
21958af54f29SChristoph Hellwig 	 * ordering of page lock and transaction start for journaling
21968af54f29SChristoph Hellwig 	 * filesystems.
21978af54f29SChristoph Hellwig 	 */
21988af54f29SChristoph Hellwig 	if (i_size_changed)
21998af54f29SChristoph Hellwig 		mark_inode_dirty(inode);
220026ddb1f4SAndreas Gruenbacher 	return copied;
2201afddba49SNick Piggin }
2202afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2203afddba49SNick Piggin 
2204afddba49SNick Piggin /*
22052e7e80f7SMatthew Wilcox (Oracle)  * block_is_partially_uptodate checks whether buffers within a folio are
22068ab22b9aSHisashi Hifumi  * uptodate or not.
22078ab22b9aSHisashi Hifumi  *
22082e7e80f7SMatthew Wilcox (Oracle)  * Returns true if all buffers which correspond to the specified part
22092e7e80f7SMatthew Wilcox (Oracle)  * of the folio are uptodate.
22108ab22b9aSHisashi Hifumi  */
22112e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
22128ab22b9aSHisashi Hifumi {
22138ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
22148ab22b9aSHisashi Hifumi 	unsigned to;
22158ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
22162e7e80f7SMatthew Wilcox (Oracle) 	bool ret = true;
22178ab22b9aSHisashi Hifumi 
22182e7e80f7SMatthew Wilcox (Oracle) 	head = folio_buffers(folio);
22192e7e80f7SMatthew Wilcox (Oracle) 	if (!head)
22202e7e80f7SMatthew Wilcox (Oracle) 		return false;
222145bce8f3SLinus Torvalds 	blocksize = head->b_size;
22222e7e80f7SMatthew Wilcox (Oracle) 	to = min_t(unsigned, folio_size(folio) - from, count);
22238ab22b9aSHisashi Hifumi 	to = from + to;
22242e7e80f7SMatthew Wilcox (Oracle) 	if (from < blocksize && to > folio_size(folio) - blocksize)
22252e7e80f7SMatthew Wilcox (Oracle) 		return false;
22268ab22b9aSHisashi Hifumi 
22278ab22b9aSHisashi Hifumi 	bh = head;
22288ab22b9aSHisashi Hifumi 	block_start = 0;
22298ab22b9aSHisashi Hifumi 	do {
22308ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
22318ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
22328ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
22332e7e80f7SMatthew Wilcox (Oracle) 				ret = false;
22348ab22b9aSHisashi Hifumi 				break;
22358ab22b9aSHisashi Hifumi 			}
22368ab22b9aSHisashi Hifumi 			if (block_end >= to)
22378ab22b9aSHisashi Hifumi 				break;
22388ab22b9aSHisashi Hifumi 		}
22398ab22b9aSHisashi Hifumi 		block_start = block_end;
22408ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
22418ab22b9aSHisashi Hifumi 	} while (bh != head);
22428ab22b9aSHisashi Hifumi 
22438ab22b9aSHisashi Hifumi 	return ret;
22448ab22b9aSHisashi Hifumi }
22458ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
22468ab22b9aSHisashi Hifumi 
22478ab22b9aSHisashi Hifumi /*
22482c69e205SMatthew Wilcox (Oracle)  * Generic "read_folio" function for block devices that have the normal
22491da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
22502c69e205SMatthew Wilcox (Oracle)  * Reads the folio asynchronously --- the unlock_buffer() and
22511da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
22522c69e205SMatthew Wilcox (Oracle)  * folio once IO has completed.
22531da177e4SLinus Torvalds  */
22542c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
22551da177e4SLinus Torvalds {
22562c69e205SMatthew Wilcox (Oracle) 	struct inode *inode = folio->mapping->host;
22571da177e4SLinus Torvalds 	sector_t iblock, lblock;
22581da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
225945bce8f3SLinus Torvalds 	unsigned int blocksize, bbits;
22601da177e4SLinus Torvalds 	int nr, i;
22611da177e4SLinus Torvalds 	int fully_mapped = 1;
2262b7a6eb22SMatthew Wilcox (Oracle) 	bool page_error = false;
22631da177e4SLinus Torvalds 
22642c69e205SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
22652c69e205SMatthew Wilcox (Oracle) 
22662c69e205SMatthew Wilcox (Oracle) 	head = create_page_buffers(&folio->page, inode, 0);
226745bce8f3SLinus Torvalds 	blocksize = head->b_size;
226845bce8f3SLinus Torvalds 	bbits = block_size_bits(blocksize);
22691da177e4SLinus Torvalds 
22702c69e205SMatthew Wilcox (Oracle) 	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
227145bce8f3SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> bbits;
22721da177e4SLinus Torvalds 	bh = head;
22731da177e4SLinus Torvalds 	nr = 0;
22741da177e4SLinus Torvalds 	i = 0;
22751da177e4SLinus Torvalds 
22761da177e4SLinus Torvalds 	do {
22771da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
22781da177e4SLinus Torvalds 			continue;
22791da177e4SLinus Torvalds 
22801da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2281c64610baSAndrew Morton 			int err = 0;
2282c64610baSAndrew Morton 
22831da177e4SLinus Torvalds 			fully_mapped = 0;
22841da177e4SLinus Torvalds 			if (iblock < lblock) {
2285b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2286c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2287b7a6eb22SMatthew Wilcox (Oracle) 				if (err) {
22882c69e205SMatthew Wilcox (Oracle) 					folio_set_error(folio);
2289b7a6eb22SMatthew Wilcox (Oracle) 					page_error = true;
2290b7a6eb22SMatthew Wilcox (Oracle) 				}
22911da177e4SLinus Torvalds 			}
22921da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
22932c69e205SMatthew Wilcox (Oracle) 				folio_zero_range(folio, i * blocksize,
22942c69e205SMatthew Wilcox (Oracle) 						blocksize);
2295c64610baSAndrew Morton 				if (!err)
22961da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
22971da177e4SLinus Torvalds 				continue;
22981da177e4SLinus Torvalds 			}
22991da177e4SLinus Torvalds 			/*
23001da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
23011da177e4SLinus Torvalds 			 * synchronously
23021da177e4SLinus Torvalds 			 */
23031da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
23041da177e4SLinus Torvalds 				continue;
23051da177e4SLinus Torvalds 		}
23061da177e4SLinus Torvalds 		arr[nr++] = bh;
23071da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
23081da177e4SLinus Torvalds 
23091da177e4SLinus Torvalds 	if (fully_mapped)
23102c69e205SMatthew Wilcox (Oracle) 		folio_set_mappedtodisk(folio);
23111da177e4SLinus Torvalds 
23121da177e4SLinus Torvalds 	if (!nr) {
23131da177e4SLinus Torvalds 		/*
23142c69e205SMatthew Wilcox (Oracle) 		 * All buffers are uptodate - we can set the folio uptodate
23151da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
23161da177e4SLinus Torvalds 		 */
2317b7a6eb22SMatthew Wilcox (Oracle) 		if (!page_error)
23182c69e205SMatthew Wilcox (Oracle) 			folio_mark_uptodate(folio);
23192c69e205SMatthew Wilcox (Oracle) 		folio_unlock(folio);
23201da177e4SLinus Torvalds 		return 0;
23211da177e4SLinus Torvalds 	}
23221da177e4SLinus Torvalds 
23231da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
23241da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
23251da177e4SLinus Torvalds 		bh = arr[i];
23261da177e4SLinus Torvalds 		lock_buffer(bh);
23271da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
23281da177e4SLinus Torvalds 	}
23291da177e4SLinus Torvalds 
23301da177e4SLinus Torvalds 	/*
23311da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
23321da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
23331da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
23341da177e4SLinus Torvalds 	 */
23351da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
23361da177e4SLinus Torvalds 		bh = arr[i];
23371da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
23381da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
23391da177e4SLinus Torvalds 		else
23401420c4a5SBart Van Assche 			submit_bh(REQ_OP_READ, bh);
23411da177e4SLinus Torvalds 	}
23421da177e4SLinus Torvalds 	return 0;
23431da177e4SLinus Torvalds }
23442c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
23451da177e4SLinus Torvalds 
23461da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
234789e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
23481da177e4SLinus Torvalds  * deal with the hole.
23491da177e4SLinus Torvalds  */
235089e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
23511da177e4SLinus Torvalds {
23521da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
235353b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
23541da177e4SLinus Torvalds 	struct page *page;
235589e10787SNick Piggin 	void *fsdata;
23561da177e4SLinus Torvalds 	int err;
23571da177e4SLinus Torvalds 
2358c08d3b0eSnpiggin@suse.de 	err = inode_newsize_ok(inode, size);
2359c08d3b0eSnpiggin@suse.de 	if (err)
23601da177e4SLinus Torvalds 		goto out;
23611da177e4SLinus Torvalds 
236253b524b8SMatthew Wilcox (Oracle) 	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
236389e10787SNick Piggin 	if (err)
236405eb0b51SOGAWA Hirofumi 		goto out;
236505eb0b51SOGAWA Hirofumi 
236653b524b8SMatthew Wilcox (Oracle) 	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
236789e10787SNick Piggin 	BUG_ON(err > 0);
236805eb0b51SOGAWA Hirofumi 
236905eb0b51SOGAWA Hirofumi out:
237005eb0b51SOGAWA Hirofumi 	return err;
237105eb0b51SOGAWA Hirofumi }
23721fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
237305eb0b51SOGAWA Hirofumi 
2374f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
237589e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
237605eb0b51SOGAWA Hirofumi {
237789e10787SNick Piggin 	struct inode *inode = mapping->host;
237853b524b8SMatthew Wilcox (Oracle) 	const struct address_space_operations *aops = mapping->a_ops;
237993407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
238089e10787SNick Piggin 	struct page *page;
238189e10787SNick Piggin 	void *fsdata;
238289e10787SNick Piggin 	pgoff_t index, curidx;
238389e10787SNick Piggin 	loff_t curpos;
238489e10787SNick Piggin 	unsigned zerofrom, offset, len;
238589e10787SNick Piggin 	int err = 0;
238605eb0b51SOGAWA Hirofumi 
238709cbfeafSKirill A. Shutemov 	index = pos >> PAGE_SHIFT;
238809cbfeafSKirill A. Shutemov 	offset = pos & ~PAGE_MASK;
238989e10787SNick Piggin 
239009cbfeafSKirill A. Shutemov 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
239109cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
239289e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
239389e10787SNick Piggin 			*bytes |= (blocksize-1);
239489e10787SNick Piggin 			(*bytes)++;
239589e10787SNick Piggin 		}
239609cbfeafSKirill A. Shutemov 		len = PAGE_SIZE - zerofrom;
239789e10787SNick Piggin 
239853b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
239989e10787SNick Piggin 					    &page, &fsdata);
240089e10787SNick Piggin 		if (err)
240189e10787SNick Piggin 			goto out;
2402eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
240353b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
240489e10787SNick Piggin 						page, fsdata);
240589e10787SNick Piggin 		if (err < 0)
240689e10787SNick Piggin 			goto out;
240789e10787SNick Piggin 		BUG_ON(err != len);
240889e10787SNick Piggin 		err = 0;
2409061e9746SOGAWA Hirofumi 
2410061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
2411c2ca0fcdSMikulas Patocka 
241208d405c8SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
2413c2ca0fcdSMikulas Patocka 			err = -EINTR;
2414c2ca0fcdSMikulas Patocka 			goto out;
2415c2ca0fcdSMikulas Patocka 		}
241689e10787SNick Piggin 	}
241789e10787SNick Piggin 
241889e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
241989e10787SNick Piggin 	if (index == curidx) {
242009cbfeafSKirill A. Shutemov 		zerofrom = curpos & ~PAGE_MASK;
242189e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
242289e10787SNick Piggin 		if (offset <= zerofrom) {
242389e10787SNick Piggin 			goto out;
242489e10787SNick Piggin 		}
242589e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
242689e10787SNick Piggin 			*bytes |= (blocksize-1);
242789e10787SNick Piggin 			(*bytes)++;
242889e10787SNick Piggin 		}
242989e10787SNick Piggin 		len = offset - zerofrom;
243089e10787SNick Piggin 
243153b524b8SMatthew Wilcox (Oracle) 		err = aops->write_begin(file, mapping, curpos, len,
243289e10787SNick Piggin 					    &page, &fsdata);
243389e10787SNick Piggin 		if (err)
243489e10787SNick Piggin 			goto out;
2435eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
243653b524b8SMatthew Wilcox (Oracle) 		err = aops->write_end(file, mapping, curpos, len, len,
243789e10787SNick Piggin 						page, fsdata);
243889e10787SNick Piggin 		if (err < 0)
243989e10787SNick Piggin 			goto out;
244089e10787SNick Piggin 		BUG_ON(err != len);
244189e10787SNick Piggin 		err = 0;
244289e10787SNick Piggin 	}
244389e10787SNick Piggin out:
244489e10787SNick Piggin 	return err;
24451da177e4SLinus Torvalds }
24461da177e4SLinus Torvalds 
24471da177e4SLinus Torvalds /*
24481da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
24491da177e4SLinus Torvalds  * We may have to extend the file.
24501da177e4SLinus Torvalds  */
2451282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2452be3bbbc5SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
245389e10787SNick Piggin 			struct page **pagep, void **fsdata,
245489e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
24551da177e4SLinus Torvalds {
24561da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
245793407472SFabian Frederick 	unsigned int blocksize = i_blocksize(inode);
245893407472SFabian Frederick 	unsigned int zerofrom;
245989e10787SNick Piggin 	int err;
24601da177e4SLinus Torvalds 
246189e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
246289e10787SNick Piggin 	if (err)
2463155130a4SChristoph Hellwig 		return err;
24641da177e4SLinus Torvalds 
246509cbfeafSKirill A. Shutemov 	zerofrom = *bytes & ~PAGE_MASK;
246689e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
24671da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
24681da177e4SLinus Torvalds 		(*bytes)++;
24691da177e4SLinus Torvalds 	}
24701da177e4SLinus Torvalds 
2471b3992d1eSMatthew Wilcox (Oracle) 	return block_write_begin(mapping, pos, len, pagep, get_block);
24721da177e4SLinus Torvalds }
24731fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
24741da177e4SLinus Torvalds 
24751da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
24761da177e4SLinus Torvalds {
24771da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
24781da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
24791da177e4SLinus Torvalds 	return 0;
24801da177e4SLinus Torvalds }
24811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
24821da177e4SLinus Torvalds 
248354171690SDavid Chinner /*
248454171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
248554171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
248654171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
248754171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
248854171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
248954171690SDavid Chinner  * support these features.
249054171690SDavid Chinner  *
249154171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
249254171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
24937bb46a67Snpiggin@suse.de  * truncate writes the inode size before removing pages, once we have the
249454171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
249554171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
249654171690SDavid Chinner  * unlock the page.
2497ea13a864SJan Kara  *
249814da9200SJan Kara  * Direct callers of this function should protect against filesystem freezing
24995c500029SRoss Zwisler  * using sb_start_pagefault() - sb_end_pagefault() functions.
250054171690SDavid Chinner  */
25015c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
250254171690SDavid Chinner 			 get_block_t get_block)
250354171690SDavid Chinner {
2504c2ec175cSNick Piggin 	struct page *page = vmf->page;
2505496ad9aaSAl Viro 	struct inode *inode = file_inode(vma->vm_file);
250654171690SDavid Chinner 	unsigned long end;
250754171690SDavid Chinner 	loff_t size;
250824da4fabSJan Kara 	int ret;
250954171690SDavid Chinner 
251054171690SDavid Chinner 	lock_page(page);
251154171690SDavid Chinner 	size = i_size_read(inode);
251254171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
251318336338SNick Piggin 	    (page_offset(page) > size)) {
251424da4fabSJan Kara 		/* We overload EFAULT to mean page got truncated */
251524da4fabSJan Kara 		ret = -EFAULT;
251624da4fabSJan Kara 		goto out_unlock;
251754171690SDavid Chinner 	}
251854171690SDavid Chinner 
251954171690SDavid Chinner 	/* page is wholly or partially inside EOF */
252009cbfeafSKirill A. Shutemov 	if (((page->index + 1) << PAGE_SHIFT) > size)
252109cbfeafSKirill A. Shutemov 		end = size & ~PAGE_MASK;
252254171690SDavid Chinner 	else
252309cbfeafSKirill A. Shutemov 		end = PAGE_SIZE;
252454171690SDavid Chinner 
2525ebdec241SChristoph Hellwig 	ret = __block_write_begin(page, 0, end, get_block);
252654171690SDavid Chinner 	if (!ret)
252754171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
252854171690SDavid Chinner 
252924da4fabSJan Kara 	if (unlikely(ret < 0))
253024da4fabSJan Kara 		goto out_unlock;
2531ea13a864SJan Kara 	set_page_dirty(page);
25321d1d1a76SDarrick J. Wong 	wait_for_stable_page(page);
253324da4fabSJan Kara 	return 0;
253424da4fabSJan Kara out_unlock:
2535b827e496SNick Piggin 	unlock_page(page);
253654171690SDavid Chinner 	return ret;
253754171690SDavid Chinner }
25381fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
25391da177e4SLinus Torvalds 
25401da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
25411da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
25421da177e4SLinus Torvalds {
254309cbfeafSKirill A. Shutemov 	pgoff_t index = from >> PAGE_SHIFT;
254409cbfeafSKirill A. Shutemov 	unsigned offset = from & (PAGE_SIZE-1);
25451da177e4SLinus Torvalds 	unsigned blocksize;
254654b21a79SAndrew Morton 	sector_t iblock;
25471da177e4SLinus Torvalds 	unsigned length, pos;
25481da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
25491da177e4SLinus Torvalds 	struct page *page;
25501da177e4SLinus Torvalds 	struct buffer_head *bh;
25511da177e4SLinus Torvalds 	int err;
25521da177e4SLinus Torvalds 
255393407472SFabian Frederick 	blocksize = i_blocksize(inode);
25541da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
25551da177e4SLinus Torvalds 
25561da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
25571da177e4SLinus Torvalds 	if (!length)
25581da177e4SLinus Torvalds 		return 0;
25591da177e4SLinus Torvalds 
25601da177e4SLinus Torvalds 	length = blocksize - length;
256109cbfeafSKirill A. Shutemov 	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
25621da177e4SLinus Torvalds 
25631da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
25641da177e4SLinus Torvalds 	err = -ENOMEM;
25651da177e4SLinus Torvalds 	if (!page)
25661da177e4SLinus Torvalds 		goto out;
25671da177e4SLinus Torvalds 
25681da177e4SLinus Torvalds 	if (!page_has_buffers(page))
25691da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
25701da177e4SLinus Torvalds 
25711da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
25721da177e4SLinus Torvalds 	bh = page_buffers(page);
25731da177e4SLinus Torvalds 	pos = blocksize;
25741da177e4SLinus Torvalds 	while (offset >= pos) {
25751da177e4SLinus Torvalds 		bh = bh->b_this_page;
25761da177e4SLinus Torvalds 		iblock++;
25771da177e4SLinus Torvalds 		pos += blocksize;
25781da177e4SLinus Torvalds 	}
25791da177e4SLinus Torvalds 
25801da177e4SLinus Torvalds 	err = 0;
25811da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2582b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
25831da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
25841da177e4SLinus Torvalds 		if (err)
25851da177e4SLinus Torvalds 			goto unlock;
25861da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
25871da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
25881da177e4SLinus Torvalds 			goto unlock;
25891da177e4SLinus Torvalds 	}
25901da177e4SLinus Torvalds 
25911da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
25921da177e4SLinus Torvalds 	if (PageUptodate(page))
25931da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
25941da177e4SLinus Torvalds 
259533a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
25961da177e4SLinus Torvalds 		err = -EIO;
25971420c4a5SBart Van Assche 		ll_rw_block(REQ_OP_READ, 1, &bh);
25981da177e4SLinus Torvalds 		wait_on_buffer(bh);
25991da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
26001da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
26011da177e4SLinus Torvalds 			goto unlock;
26021da177e4SLinus Torvalds 	}
26031da177e4SLinus Torvalds 
2604eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
26051da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
26061da177e4SLinus Torvalds 	err = 0;
26071da177e4SLinus Torvalds 
26081da177e4SLinus Torvalds unlock:
26091da177e4SLinus Torvalds 	unlock_page(page);
261009cbfeafSKirill A. Shutemov 	put_page(page);
26111da177e4SLinus Torvalds out:
26121da177e4SLinus Torvalds 	return err;
26131da177e4SLinus Torvalds }
26141fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
26151da177e4SLinus Torvalds 
26161da177e4SLinus Torvalds /*
26171da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
26181da177e4SLinus Torvalds  */
26191b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block,
26201b938c08SMatthew Wilcox 			struct writeback_control *wbc)
26211da177e4SLinus Torvalds {
26221da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
26231da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
262409cbfeafSKirill A. Shutemov 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
26251da177e4SLinus Torvalds 	unsigned offset;
26261da177e4SLinus Torvalds 
26271da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
26281da177e4SLinus Torvalds 	if (page->index < end_index)
262935c80d5fSChris Mason 		return __block_write_full_page(inode, page, get_block, wbc,
26301b938c08SMatthew Wilcox 					       end_buffer_async_write);
26311da177e4SLinus Torvalds 
26321da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
263309cbfeafSKirill A. Shutemov 	offset = i_size & (PAGE_SIZE-1);
26341da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26351da177e4SLinus Torvalds 		unlock_page(page);
26361da177e4SLinus Torvalds 		return 0; /* don't care */
26371da177e4SLinus Torvalds 	}
26381da177e4SLinus Torvalds 
26391da177e4SLinus Torvalds 	/*
26401da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26412a61aa40SAdam Buchbinder 	 * writepage invocation because it may be mmapped.  "A file is mapped
26421da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26431da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26441da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26451da177e4SLinus Torvalds 	 */
264609cbfeafSKirill A. Shutemov 	zero_user_segment(page, offset, PAGE_SIZE);
26471b938c08SMatthew Wilcox 	return __block_write_full_page(inode, page, get_block, wbc,
264835c80d5fSChris Mason 							end_buffer_async_write);
264935c80d5fSChris Mason }
26501fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page);
265135c80d5fSChris Mason 
26521da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
26531da177e4SLinus Torvalds 			    get_block_t *get_block)
26541da177e4SLinus Torvalds {
26551da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26562a527d68SAlexander Potapenko 	struct buffer_head tmp = {
26572a527d68SAlexander Potapenko 		.b_size = i_blocksize(inode),
26582a527d68SAlexander Potapenko 	};
26592a527d68SAlexander Potapenko 
26601da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
26611da177e4SLinus Torvalds 	return tmp.b_blocknr;
26621da177e4SLinus Torvalds }
26631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
26641da177e4SLinus Torvalds 
26654246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
26661da177e4SLinus Torvalds {
26671da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
26681da177e4SLinus Torvalds 
2669b7c44ed9SJens Axboe 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
267008bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
267108bafc03SKeith Mannthey 
26724e4cbee9SChristoph Hellwig 	bh->b_end_io(bh, !bio->bi_status);
26731da177e4SLinus Torvalds 	bio_put(bio);
26741da177e4SLinus Torvalds }
26751da177e4SLinus Torvalds 
26761420c4a5SBart Van Assche static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
26771420c4a5SBart Van Assche 			 struct writeback_control *wbc)
26781da177e4SLinus Torvalds {
26791420c4a5SBart Van Assche 	const enum req_op op = opf & REQ_OP_MASK;
26801da177e4SLinus Torvalds 	struct bio *bio;
26811da177e4SLinus Torvalds 
26821da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
26831da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
26841da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
26858fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_delay(bh));
26868fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_unwritten(bh));
26871da177e4SLinus Torvalds 
268848fd4f93SJens Axboe 	/*
268948fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
26901da177e4SLinus Torvalds 	 */
26912a222ca9SMike Christie 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
26921da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
26931da177e4SLinus Torvalds 
269407888c66SChristoph Hellwig 	if (buffer_meta(bh))
26951420c4a5SBart Van Assche 		opf |= REQ_META;
269607888c66SChristoph Hellwig 	if (buffer_prio(bh))
26971420c4a5SBart Van Assche 		opf |= REQ_PRIO;
269807888c66SChristoph Hellwig 
26991420c4a5SBart Van Assche 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
27001da177e4SLinus Torvalds 
27014f74d15fSEric Biggers 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
27024f74d15fSEric Biggers 
27034f024f37SKent Overstreet 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
27041da177e4SLinus Torvalds 
27056cf66b4cSKent Overstreet 	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
27066cf66b4cSKent Overstreet 	BUG_ON(bio->bi_iter.bi_size != bh->b_size);
27071da177e4SLinus Torvalds 
27081da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
27091da177e4SLinus Torvalds 	bio->bi_private = bh;
27101da177e4SLinus Torvalds 
271183c9c547SMing Lei 	/* Take care of bh's that straddle the end of the device */
271283c9c547SMing Lei 	guard_bio_eod(bio);
271383c9c547SMing Lei 
2714fd42df30SDennis Zhou 	if (wbc) {
2715fd42df30SDennis Zhou 		wbc_init_bio(wbc, bio);
271634e51a5eSTejun Heo 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2717fd42df30SDennis Zhou 	}
2718fd42df30SDennis Zhou 
27194e49ea4aSMike Christie 	submit_bio(bio);
2720f6454b04SJulia Lawall 	return 0;
27211da177e4SLinus Torvalds }
2722bafc0dbaSTejun Heo 
27231420c4a5SBart Van Assche int submit_bh(blk_opf_t opf, struct buffer_head *bh)
272471368511SDarrick J. Wong {
27251420c4a5SBart Van Assche 	return submit_bh_wbc(opf, bh, NULL);
272671368511SDarrick J. Wong }
27271fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
27281da177e4SLinus Torvalds 
27291da177e4SLinus Torvalds /**
27301da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2731f5454140SBart Van Assche  * @opf: block layer request operation and flags.
27321da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
27331da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
27341da177e4SLinus Torvalds  *
2735a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
273670246286SChristoph Hellwig  * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
2737f5454140SBart Van Assche  * @opf contains flags modifying the detailed I/O behavior, most notably
273870246286SChristoph Hellwig  * %REQ_RAHEAD.
27391da177e4SLinus Torvalds  *
27401da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
27419cb569d6SChristoph Hellwig  * BH_Lock state bit), any buffer that appears to be clean when doing a write
27429cb569d6SChristoph Hellwig  * request, and any buffer that appears to be up-to-date when doing read
27439cb569d6SChristoph Hellwig  * request.  Further it marks as clean buffers that are processed for
27449cb569d6SChristoph Hellwig  * writing (the buffer cache won't assume that they are actually clean
27459cb569d6SChristoph Hellwig  * until the buffer gets unlocked).
27461da177e4SLinus Torvalds  *
27471da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
2748e227867fSMasanari Iida  * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
27491da177e4SLinus Torvalds  * any waiters.
27501da177e4SLinus Torvalds  *
27511da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
27521da177e4SLinus Torvalds  * multiple of the current approved size for the device.
27531da177e4SLinus Torvalds  */
27541420c4a5SBart Van Assche void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[])
27551da177e4SLinus Torvalds {
27561420c4a5SBart Van Assche 	const enum req_op op = opf & REQ_OP_MASK;
27571da177e4SLinus Torvalds 	int i;
27581da177e4SLinus Torvalds 
27591da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
27601da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
27611da177e4SLinus Torvalds 
27629cb569d6SChristoph Hellwig 		if (!trylock_buffer(bh))
27631da177e4SLinus Torvalds 			continue;
27643ae72869SBart Van Assche 		if (op == REQ_OP_WRITE) {
27651da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
276676c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2767e60e5c50SOGAWA Hirofumi 				get_bh(bh);
27681420c4a5SBart Van Assche 				submit_bh(opf, bh);
27691da177e4SLinus Torvalds 				continue;
27701da177e4SLinus Torvalds 			}
27711da177e4SLinus Torvalds 		} else {
27721da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
277376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2774e60e5c50SOGAWA Hirofumi 				get_bh(bh);
27751420c4a5SBart Van Assche 				submit_bh(opf, bh);
27761da177e4SLinus Torvalds 				continue;
27771da177e4SLinus Torvalds 			}
27781da177e4SLinus Torvalds 		}
27791da177e4SLinus Torvalds 		unlock_buffer(bh);
27801da177e4SLinus Torvalds 	}
27811da177e4SLinus Torvalds }
27821fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(ll_rw_block);
27831da177e4SLinus Torvalds 
27843ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
27859cb569d6SChristoph Hellwig {
27869cb569d6SChristoph Hellwig 	lock_buffer(bh);
27879cb569d6SChristoph Hellwig 	if (!test_clear_buffer_dirty(bh)) {
27889cb569d6SChristoph Hellwig 		unlock_buffer(bh);
27899cb569d6SChristoph Hellwig 		return;
27909cb569d6SChristoph Hellwig 	}
27919cb569d6SChristoph Hellwig 	bh->b_end_io = end_buffer_write_sync;
27929cb569d6SChristoph Hellwig 	get_bh(bh);
27931420c4a5SBart Van Assche 	submit_bh(REQ_OP_WRITE | op_flags, bh);
27949cb569d6SChristoph Hellwig }
27959cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
27969cb569d6SChristoph Hellwig 
27971da177e4SLinus Torvalds /*
27981da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
27991da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
28001da177e4SLinus Torvalds  * the buffer_head.
28011da177e4SLinus Torvalds  */
28023ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28031da177e4SLinus Torvalds {
28041da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
28051da177e4SLinus Torvalds 	lock_buffer(bh);
28061da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
2807377254b2SXianting Tian 		/*
2808377254b2SXianting Tian 		 * The bh should be mapped, but it might not be if the
2809377254b2SXianting Tian 		 * device was hot-removed. Not much we can do but fail the I/O.
2810377254b2SXianting Tian 		 */
2811377254b2SXianting Tian 		if (!buffer_mapped(bh)) {
2812377254b2SXianting Tian 			unlock_buffer(bh);
2813377254b2SXianting Tian 			return -EIO;
2814377254b2SXianting Tian 		}
2815377254b2SXianting Tian 
28161da177e4SLinus Torvalds 		get_bh(bh);
28171da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
2818*ab620620SRitesh Harjani (IBM) 		submit_bh(REQ_OP_WRITE | op_flags, bh);
28191da177e4SLinus Torvalds 		wait_on_buffer(bh);
2820*ab620620SRitesh Harjani (IBM) 		if (!buffer_uptodate(bh))
2821*ab620620SRitesh Harjani (IBM) 			return -EIO;
28221da177e4SLinus Torvalds 	} else {
28231da177e4SLinus Torvalds 		unlock_buffer(bh);
28241da177e4SLinus Torvalds 	}
2825*ab620620SRitesh Harjani (IBM) 	return 0;
28261da177e4SLinus Torvalds }
282787e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
282887e99511SChristoph Hellwig 
282987e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
283087e99511SChristoph Hellwig {
283170fd7614SChristoph Hellwig 	return __sync_dirty_buffer(bh, REQ_SYNC);
283287e99511SChristoph Hellwig }
28331fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28341da177e4SLinus Torvalds 
28351da177e4SLinus Torvalds /*
283668189fefSMatthew Wilcox (Oracle)  * try_to_free_buffers() checks if all the buffers on this particular folio
28371da177e4SLinus Torvalds  * are unused, and releases them if so.
28381da177e4SLinus Torvalds  *
28391da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
284068189fefSMatthew Wilcox (Oracle)  * locking the folio or by holding its mapping's private_lock.
28411da177e4SLinus Torvalds  *
284268189fefSMatthew Wilcox (Oracle)  * If the folio is dirty but all the buffers are clean then we need to
284368189fefSMatthew Wilcox (Oracle)  * be sure to mark the folio clean as well.  This is because the folio
28441da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
284568189fefSMatthew Wilcox (Oracle)  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
28461da177e4SLinus Torvalds  * filesystem data on the same device.
28471da177e4SLinus Torvalds  *
284868189fefSMatthew Wilcox (Oracle)  * The same applies to regular filesystem folios: if all the buffers are
284968189fefSMatthew Wilcox (Oracle)  * clean then we set the folio clean and proceed.  To do that, we require
2850e621900aSMatthew Wilcox (Oracle)  * total exclusion from block_dirty_folio().  That is obtained with
28511da177e4SLinus Torvalds  * private_lock.
28521da177e4SLinus Torvalds  *
28531da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
28541da177e4SLinus Torvalds  */
28551da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28561da177e4SLinus Torvalds {
28571da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
28581da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28591da177e4SLinus Torvalds }
28601da177e4SLinus Torvalds 
286164394763SMatthew Wilcox (Oracle) static bool
286264394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
28631da177e4SLinus Torvalds {
286464394763SMatthew Wilcox (Oracle) 	struct buffer_head *head = folio_buffers(folio);
28651da177e4SLinus Torvalds 	struct buffer_head *bh;
28661da177e4SLinus Torvalds 
28671da177e4SLinus Torvalds 	bh = head;
28681da177e4SLinus Torvalds 	do {
28691da177e4SLinus Torvalds 		if (buffer_busy(bh))
28701da177e4SLinus Torvalds 			goto failed;
28711da177e4SLinus Torvalds 		bh = bh->b_this_page;
28721da177e4SLinus Torvalds 	} while (bh != head);
28731da177e4SLinus Torvalds 
28741da177e4SLinus Torvalds 	do {
28751da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
28761da177e4SLinus Torvalds 
2877535ee2fbSJan Kara 		if (bh->b_assoc_map)
28781da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
28791da177e4SLinus Torvalds 		bh = next;
28801da177e4SLinus Torvalds 	} while (bh != head);
28811da177e4SLinus Torvalds 	*buffers_to_free = head;
288264394763SMatthew Wilcox (Oracle) 	folio_detach_private(folio);
288364394763SMatthew Wilcox (Oracle) 	return true;
28841da177e4SLinus Torvalds failed:
288564394763SMatthew Wilcox (Oracle) 	return false;
28861da177e4SLinus Torvalds }
28871da177e4SLinus Torvalds 
288868189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
28891da177e4SLinus Torvalds {
289068189fefSMatthew Wilcox (Oracle) 	struct address_space * const mapping = folio->mapping;
28911da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
289268189fefSMatthew Wilcox (Oracle) 	bool ret = 0;
28931da177e4SLinus Torvalds 
289468189fefSMatthew Wilcox (Oracle) 	BUG_ON(!folio_test_locked(folio));
289568189fefSMatthew Wilcox (Oracle) 	if (folio_test_writeback(folio))
289668189fefSMatthew Wilcox (Oracle) 		return false;
28971da177e4SLinus Torvalds 
28981da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
289964394763SMatthew Wilcox (Oracle) 		ret = drop_buffers(folio, &buffers_to_free);
29001da177e4SLinus Torvalds 		goto out;
29011da177e4SLinus Torvalds 	}
29021da177e4SLinus Torvalds 
29031da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
290464394763SMatthew Wilcox (Oracle) 	ret = drop_buffers(folio, &buffers_to_free);
2905ecdfc978SLinus Torvalds 
2906ecdfc978SLinus Torvalds 	/*
2907ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
290868189fefSMatthew Wilcox (Oracle) 	 * then we can have clean buffers against a dirty folio.  We
290968189fefSMatthew Wilcox (Oracle) 	 * clean the folio here; otherwise the VM will never notice
2910ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2911ecdfc978SLinus Torvalds 	 *
2912ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
291368189fefSMatthew Wilcox (Oracle) 	 * the folio's buffers clean.  We discover that here and clean
291468189fefSMatthew Wilcox (Oracle) 	 * the folio also.
291587df7241SNick Piggin 	 *
291687df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
2917e621900aSMatthew Wilcox (Oracle) 	 * to synchronise against block_dirty_folio and prevent the
291887df7241SNick Piggin 	 * dirty bit from being lost.
2919ecdfc978SLinus Torvalds 	 */
292011f81becSTejun Heo 	if (ret)
292168189fefSMatthew Wilcox (Oracle) 		folio_cancel_dirty(folio);
292287df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
29231da177e4SLinus Torvalds out:
29241da177e4SLinus Torvalds 	if (buffers_to_free) {
29251da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
29261da177e4SLinus Torvalds 
29271da177e4SLinus Torvalds 		do {
29281da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
29291da177e4SLinus Torvalds 			free_buffer_head(bh);
29301da177e4SLinus Torvalds 			bh = next;
29311da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
29321da177e4SLinus Torvalds 	}
29331da177e4SLinus Torvalds 	return ret;
29341da177e4SLinus Torvalds }
29351da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29361da177e4SLinus Torvalds 
29371da177e4SLinus Torvalds /*
29381da177e4SLinus Torvalds  * Buffer-head allocation
29391da177e4SLinus Torvalds  */
2940a0a9b043SShai Fultheim static struct kmem_cache *bh_cachep __read_mostly;
29411da177e4SLinus Torvalds 
29421da177e4SLinus Torvalds /*
29431da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
29441da177e4SLinus Torvalds  * stripping them in writeback.
29451da177e4SLinus Torvalds  */
294643be594aSZhang Yanfei static unsigned long max_buffer_heads;
29471da177e4SLinus Torvalds 
29481da177e4SLinus Torvalds int buffer_heads_over_limit;
29491da177e4SLinus Torvalds 
29501da177e4SLinus Torvalds struct bh_accounting {
29511da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
29521da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
29531da177e4SLinus Torvalds };
29541da177e4SLinus Torvalds 
29551da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
29561da177e4SLinus Torvalds 
29571da177e4SLinus Torvalds static void recalc_bh_state(void)
29581da177e4SLinus Torvalds {
29591da177e4SLinus Torvalds 	int i;
29601da177e4SLinus Torvalds 	int tot = 0;
29611da177e4SLinus Torvalds 
2962ee1be862SChristoph Lameter 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
29631da177e4SLinus Torvalds 		return;
2964c7b92516SChristoph Lameter 	__this_cpu_write(bh_accounting.ratelimit, 0);
29658a143426SEric Dumazet 	for_each_online_cpu(i)
29661da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
29671da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
29681da177e4SLinus Torvalds }
29691da177e4SLinus Torvalds 
2970dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
29711da177e4SLinus Torvalds {
2972019b4d12SRichard Kennedy 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
29731da177e4SLinus Torvalds 	if (ret) {
2974a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2975f1e67e35SThomas Gleixner 		spin_lock_init(&ret->b_uptodate_lock);
2976c7b92516SChristoph Lameter 		preempt_disable();
2977c7b92516SChristoph Lameter 		__this_cpu_inc(bh_accounting.nr);
29781da177e4SLinus Torvalds 		recalc_bh_state();
2979c7b92516SChristoph Lameter 		preempt_enable();
29801da177e4SLinus Torvalds 	}
29811da177e4SLinus Torvalds 	return ret;
29821da177e4SLinus Torvalds }
29831da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
29841da177e4SLinus Torvalds 
29851da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
29861da177e4SLinus Torvalds {
29871da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
29881da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
2989c7b92516SChristoph Lameter 	preempt_disable();
2990c7b92516SChristoph Lameter 	__this_cpu_dec(bh_accounting.nr);
29911da177e4SLinus Torvalds 	recalc_bh_state();
2992c7b92516SChristoph Lameter 	preempt_enable();
29931da177e4SLinus Torvalds }
29941da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
29951da177e4SLinus Torvalds 
2996fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
29971da177e4SLinus Torvalds {
29981da177e4SLinus Torvalds 	int i;
29991da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30001da177e4SLinus Torvalds 
30011da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
30021da177e4SLinus Torvalds 		brelse(b->bhs[i]);
30031da177e4SLinus Torvalds 		b->bhs[i] = NULL;
30041da177e4SLinus Torvalds 	}
3005c7b92516SChristoph Lameter 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
30068a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
3007fc4d24c9SSebastian Andrzej Siewior 	return 0;
30081da177e4SLinus Torvalds }
30091da177e4SLinus Torvalds 
3010389d1b08SAneesh Kumar K.V /**
3011a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3012389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3013389d1b08SAneesh Kumar K.V  *
3014389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3015389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3016389d1b08SAneesh Kumar K.V  */
3017389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3018389d1b08SAneesh Kumar K.V {
3019389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3020389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3021389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3022389d1b08SAneesh Kumar K.V 			return 0;
3023389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3024389d1b08SAneesh Kumar K.V 	}
3025389d1b08SAneesh Kumar K.V 	return 1;
3026389d1b08SAneesh Kumar K.V }
3027389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3028389d1b08SAneesh Kumar K.V 
3029389d1b08SAneesh Kumar K.V /**
3030a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3031389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3032389d1b08SAneesh Kumar K.V  *
3033389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3034389d1b08SAneesh Kumar K.V  */
3035389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3036389d1b08SAneesh Kumar K.V {
3037389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3038389d1b08SAneesh Kumar K.V 
3039389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3040389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3041389d1b08SAneesh Kumar K.V 		return 0;
3042389d1b08SAneesh Kumar K.V 	}
3043389d1b08SAneesh Kumar K.V 
3044389d1b08SAneesh Kumar K.V 	get_bh(bh);
3045389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
30461420c4a5SBart Van Assche 	submit_bh(REQ_OP_READ, bh);
3047389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3048389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3049389d1b08SAneesh Kumar K.V 		return 0;
3050389d1b08SAneesh Kumar K.V 	return -EIO;
3051389d1b08SAneesh Kumar K.V }
3052389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3053389d1b08SAneesh Kumar K.V 
30541da177e4SLinus Torvalds void __init buffer_init(void)
30551da177e4SLinus Torvalds {
305643be594aSZhang Yanfei 	unsigned long nrpages;
3057fc4d24c9SSebastian Andrzej Siewior 	int ret;
30581da177e4SLinus Torvalds 
3059b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3060b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3061b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3062b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3063019b4d12SRichard Kennedy 				NULL);
30641da177e4SLinus Torvalds 
30651da177e4SLinus Torvalds 	/*
30661da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
30671da177e4SLinus Torvalds 	 */
30681da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
30691da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3070fc4d24c9SSebastian Andrzej Siewior 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3071fc4d24c9SSebastian Andrzej Siewior 					NULL, buffer_exit_cpu_dead);
3072fc4d24c9SSebastian Andrzej Siewior 	WARN_ON(ret < 0);
30731da177e4SLinus Torvalds }
3074