xref: /linux/fs/buffer.c (revision d4cf109f05ff04c6f5065c3e14165ef01a57dd53)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
70fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
77fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7951b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
801da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
811da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
821da177e4SLinus Torvalds }
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds /*
851da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
861da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
871da177e4SLinus Torvalds  * if you want to preserve its state.
881da177e4SLinus Torvalds  */
891da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
901da177e4SLinus Torvalds {
911da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
921da177e4SLinus Torvalds }
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds static void
951da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
961da177e4SLinus Torvalds {
971da177e4SLinus Torvalds 	ClearPagePrivate(page);
984c21e2f2SHugh Dickins 	set_page_private(page, 0);
991da177e4SLinus Torvalds 	page_cache_release(page);
1001da177e4SLinus Torvalds }
1011da177e4SLinus Torvalds 
10208bafc03SKeith Mannthey 
10308bafc03SKeith Mannthey static int quiet_error(struct buffer_head *bh)
10408bafc03SKeith Mannthey {
10508bafc03SKeith Mannthey 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
10608bafc03SKeith Mannthey 		return 0;
10708bafc03SKeith Mannthey 	return 1;
10808bafc03SKeith Mannthey }
10908bafc03SKeith Mannthey 
11008bafc03SKeith Mannthey 
1111da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1121da177e4SLinus Torvalds {
1131da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1141da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1151da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1161da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1171da177e4SLinus Torvalds }
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /*
12068671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
12168671f35SDmitry Monakhov  * unlocking it.
12268671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
12368671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
12468671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
12568671f35SDmitry Monakhov  * itself.
1261da177e4SLinus Torvalds  */
12768671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1281da177e4SLinus Torvalds {
1291da177e4SLinus Torvalds 	if (uptodate) {
1301da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1311da177e4SLinus Torvalds 	} else {
1321da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1331da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1341da177e4SLinus Torvalds 	}
1351da177e4SLinus Torvalds 	unlock_buffer(bh);
13668671f35SDmitry Monakhov }
13768671f35SDmitry Monakhov 
13868671f35SDmitry Monakhov /*
13968671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
14068671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
14168671f35SDmitry Monakhov  */
14268671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
14368671f35SDmitry Monakhov {
14468671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1451da177e4SLinus Torvalds 	put_bh(bh);
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1491da177e4SLinus Torvalds {
1501da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds 	if (uptodate) {
1531da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1541da177e4SLinus Torvalds 	} else {
15508bafc03SKeith Mannthey 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1561da177e4SLinus Torvalds 			buffer_io_error(bh);
1571da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1581da177e4SLinus Torvalds 					"I/O error on %s\n",
1591da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1601da177e4SLinus Torvalds 		}
1611da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1621da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1631da177e4SLinus Torvalds 	}
1641da177e4SLinus Torvalds 	unlock_buffer(bh);
1651da177e4SLinus Torvalds 	put_bh(bh);
1661da177e4SLinus Torvalds }
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds /*
1691da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1701da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1711da177e4SLinus Torvalds  */
1721da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1731da177e4SLinus Torvalds {
1741da177e4SLinus Torvalds 	int ret = 0;
1751da177e4SLinus Torvalds 
17628fd1298SOGAWA Hirofumi 	if (bdev)
17728fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1781da177e4SLinus Torvalds 	return ret;
1791da177e4SLinus Torvalds }
1801da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1811da177e4SLinus Torvalds 
1821da177e4SLinus Torvalds /*
1831da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1841da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1851da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1861da177e4SLinus Torvalds  */
1871da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1881da177e4SLinus Torvalds {
1891da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1901da177e4SLinus Torvalds 	if (sb) {
1911da177e4SLinus Torvalds 		int res = fsync_super(sb);
1921da177e4SLinus Torvalds 		drop_super(sb);
1931da177e4SLinus Torvalds 		return res;
1941da177e4SLinus Torvalds 	}
1951da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1961da177e4SLinus Torvalds }
1971da177e4SLinus Torvalds 
1981da177e4SLinus Torvalds /**
1991da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
2001da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
2011da177e4SLinus Torvalds  *
202f73ca1b7SDavid Chinner  * This takes the block device bd_mount_sem to make sure no new mounts
2031da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
2041da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
2051da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
206fcccf502STakashi Sato  * The reference counter (bd_fsfreeze_count) guarantees that only the last
207fcccf502STakashi Sato  * unfreeze process can unfreeze the frozen filesystem actually when multiple
208fcccf502STakashi Sato  * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209fcccf502STakashi Sato  * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
210fcccf502STakashi Sato  * actually.
2111da177e4SLinus Torvalds  */
2121da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
2131da177e4SLinus Torvalds {
2141da177e4SLinus Torvalds 	struct super_block *sb;
215fcccf502STakashi Sato 	int error = 0;
216fcccf502STakashi Sato 
217fcccf502STakashi Sato 	mutex_lock(&bdev->bd_fsfreeze_mutex);
218fcccf502STakashi Sato 	if (bdev->bd_fsfreeze_count > 0) {
219fcccf502STakashi Sato 		bdev->bd_fsfreeze_count++;
220fcccf502STakashi Sato 		sb = get_super(bdev);
221fcccf502STakashi Sato 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
222fcccf502STakashi Sato 		return sb;
223fcccf502STakashi Sato 	}
224fcccf502STakashi Sato 	bdev->bd_fsfreeze_count++;
2251da177e4SLinus Torvalds 
226f73ca1b7SDavid Chinner 	down(&bdev->bd_mount_sem);
2271da177e4SLinus Torvalds 	sb = get_super(bdev);
2281da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
2291da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
230d59dd462Sakpm@osdl.org 		smp_wmb();
2311da177e4SLinus Torvalds 
232d25b9a1fSOGAWA Hirofumi 		__fsync_super(sb);
2331da177e4SLinus Torvalds 
2341da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
235d59dd462Sakpm@osdl.org 		smp_wmb();
2361da177e4SLinus Torvalds 
2371da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2381da177e4SLinus Torvalds 
239fcccf502STakashi Sato 		if (sb->s_op->freeze_fs) {
240fcccf502STakashi Sato 			error = sb->s_op->freeze_fs(sb);
241fcccf502STakashi Sato 			if (error) {
242fcccf502STakashi Sato 				printk(KERN_ERR
243fcccf502STakashi Sato 					"VFS:Filesystem freeze failed\n");
244fcccf502STakashi Sato 				sb->s_frozen = SB_UNFROZEN;
245fcccf502STakashi Sato 				drop_super(sb);
246fcccf502STakashi Sato 				up(&bdev->bd_mount_sem);
247fcccf502STakashi Sato 				bdev->bd_fsfreeze_count--;
248fcccf502STakashi Sato 				mutex_unlock(&bdev->bd_fsfreeze_mutex);
249fcccf502STakashi Sato 				return ERR_PTR(error);
250fcccf502STakashi Sato 			}
251fcccf502STakashi Sato 		}
2521da177e4SLinus Torvalds 	}
2531da177e4SLinus Torvalds 
2541da177e4SLinus Torvalds 	sync_blockdev(bdev);
255fcccf502STakashi Sato 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
256fcccf502STakashi Sato 
2571da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2581da177e4SLinus Torvalds }
2591da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2601da177e4SLinus Torvalds 
2611da177e4SLinus Torvalds /**
2621da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2631da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2641da177e4SLinus Torvalds  * @sb:		associated superblock
2651da177e4SLinus Torvalds  *
2661da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2671da177e4SLinus Torvalds  */
268fcccf502STakashi Sato int thaw_bdev(struct block_device *bdev, struct super_block *sb)
2691da177e4SLinus Torvalds {
270fcccf502STakashi Sato 	int error = 0;
271fcccf502STakashi Sato 
272fcccf502STakashi Sato 	mutex_lock(&bdev->bd_fsfreeze_mutex);
273fcccf502STakashi Sato 	if (!bdev->bd_fsfreeze_count) {
274fcccf502STakashi Sato 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
275fcccf502STakashi Sato 		return -EINVAL;
276fcccf502STakashi Sato 	}
277fcccf502STakashi Sato 
278fcccf502STakashi Sato 	bdev->bd_fsfreeze_count--;
279fcccf502STakashi Sato 	if (bdev->bd_fsfreeze_count > 0) {
280fcccf502STakashi Sato 		if (sb)
281fcccf502STakashi Sato 			drop_super(sb);
282fcccf502STakashi Sato 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
283fcccf502STakashi Sato 		return 0;
284fcccf502STakashi Sato 	}
285fcccf502STakashi Sato 
2861da177e4SLinus Torvalds 	if (sb) {
2871da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
288fcccf502STakashi Sato 		if (!(sb->s_flags & MS_RDONLY)) {
289fcccf502STakashi Sato 			if (sb->s_op->unfreeze_fs) {
290fcccf502STakashi Sato 				error = sb->s_op->unfreeze_fs(sb);
291fcccf502STakashi Sato 				if (error) {
292fcccf502STakashi Sato 					printk(KERN_ERR
293fcccf502STakashi Sato 						"VFS:Filesystem thaw failed\n");
294fcccf502STakashi Sato 					sb->s_frozen = SB_FREEZE_TRANS;
295fcccf502STakashi Sato 					bdev->bd_fsfreeze_count++;
296fcccf502STakashi Sato 					mutex_unlock(&bdev->bd_fsfreeze_mutex);
297fcccf502STakashi Sato 					return error;
298fcccf502STakashi Sato 				}
299fcccf502STakashi Sato 			}
3001da177e4SLinus Torvalds 			sb->s_frozen = SB_UNFROZEN;
301d59dd462Sakpm@osdl.org 			smp_wmb();
3021da177e4SLinus Torvalds 			wake_up(&sb->s_wait_unfrozen);
303fcccf502STakashi Sato 		}
3041da177e4SLinus Torvalds 		drop_super(sb);
3051da177e4SLinus Torvalds 	}
3061da177e4SLinus Torvalds 
307f73ca1b7SDavid Chinner 	up(&bdev->bd_mount_sem);
308fcccf502STakashi Sato 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
309fcccf502STakashi Sato 	return 0;
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds /*
3141da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
3151da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
3161da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
3171da177e4SLinus Torvalds  * private_lock.
3181da177e4SLinus Torvalds  *
3191da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
3201da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
3211da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
3221da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
3231da177e4SLinus Torvalds  */
3241da177e4SLinus Torvalds static struct buffer_head *
325385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
3261da177e4SLinus Torvalds {
3271da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
3281da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
3291da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
3301da177e4SLinus Torvalds 	pgoff_t index;
3311da177e4SLinus Torvalds 	struct buffer_head *bh;
3321da177e4SLinus Torvalds 	struct buffer_head *head;
3331da177e4SLinus Torvalds 	struct page *page;
3341da177e4SLinus Torvalds 	int all_mapped = 1;
3351da177e4SLinus Torvalds 
3361da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
3371da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
3381da177e4SLinus Torvalds 	if (!page)
3391da177e4SLinus Torvalds 		goto out;
3401da177e4SLinus Torvalds 
3411da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
3421da177e4SLinus Torvalds 	if (!page_has_buffers(page))
3431da177e4SLinus Torvalds 		goto out_unlock;
3441da177e4SLinus Torvalds 	head = page_buffers(page);
3451da177e4SLinus Torvalds 	bh = head;
3461da177e4SLinus Torvalds 	do {
3471da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
3481da177e4SLinus Torvalds 			ret = bh;
3491da177e4SLinus Torvalds 			get_bh(bh);
3501da177e4SLinus Torvalds 			goto out_unlock;
3511da177e4SLinus Torvalds 		}
3521da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
3531da177e4SLinus Torvalds 			all_mapped = 0;
3541da177e4SLinus Torvalds 		bh = bh->b_this_page;
3551da177e4SLinus Torvalds 	} while (bh != head);
3561da177e4SLinus Torvalds 
3571da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
3581da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
3591da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
3601da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
3611da177e4SLinus Torvalds 	 */
3621da177e4SLinus Torvalds 	if (all_mapped) {
3631da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
3641da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
365205f87f6SBadari Pulavarty 			(unsigned long long)block,
366205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
367205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
368205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
3691da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
3701da177e4SLinus Torvalds 	}
3711da177e4SLinus Torvalds out_unlock:
3721da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
3731da177e4SLinus Torvalds 	page_cache_release(page);
3741da177e4SLinus Torvalds out:
3751da177e4SLinus Torvalds 	return ret;
3761da177e4SLinus Torvalds }
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
3791da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
3801da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
3811da177e4SLinus Torvalds    by the user.
3821da177e4SLinus Torvalds 
3831da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
3841da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
3851da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
3861da177e4SLinus Torvalds 
3871da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
3881da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
3891da177e4SLinus Torvalds 
3901da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
3911da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
3921da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
3931da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
3941da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
3951da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
3961da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
3971da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
3981da177e4SLinus Torvalds 
3991da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
4001da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
4011da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
4021da177e4SLinus Torvalds 
4031da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
4041da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
4051da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
4061da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
4071da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
4081da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
4091da177e4SLinus Torvalds    pass does the actual I/O. */
410f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
4111da177e4SLinus Torvalds {
4120e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
4130e1dfc66SAndrew Morton 
4140e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
4150e1dfc66SAndrew Morton 		return;
4160e1dfc66SAndrew Morton 
4171da177e4SLinus Torvalds 	invalidate_bh_lrus();
418fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
4191da177e4SLinus Torvalds }
4201da177e4SLinus Torvalds 
4211da177e4SLinus Torvalds /*
4221da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
4231da177e4SLinus Torvalds  */
4241da177e4SLinus Torvalds static void free_more_memory(void)
4251da177e4SLinus Torvalds {
42619770b32SMel Gorman 	struct zone *zone;
4270e88460dSMel Gorman 	int nid;
4281da177e4SLinus Torvalds 
429687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
4301da177e4SLinus Torvalds 	yield();
4311da177e4SLinus Torvalds 
4320e88460dSMel Gorman 	for_each_online_node(nid) {
43319770b32SMel Gorman 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
43419770b32SMel Gorman 						gfp_zone(GFP_NOFS), NULL,
43519770b32SMel Gorman 						&zone);
43619770b32SMel Gorman 		if (zone)
43754a6eb5cSMel Gorman 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
43854a6eb5cSMel Gorman 						GFP_NOFS);
4391da177e4SLinus Torvalds 	}
4401da177e4SLinus Torvalds }
4411da177e4SLinus Torvalds 
4421da177e4SLinus Torvalds /*
4431da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
4441da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
4451da177e4SLinus Torvalds  */
4461da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
4471da177e4SLinus Torvalds {
4481da177e4SLinus Torvalds 	unsigned long flags;
449a3972203SNick Piggin 	struct buffer_head *first;
4501da177e4SLinus Torvalds 	struct buffer_head *tmp;
4511da177e4SLinus Torvalds 	struct page *page;
4521da177e4SLinus Torvalds 	int page_uptodate = 1;
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
4551da177e4SLinus Torvalds 
4561da177e4SLinus Torvalds 	page = bh->b_page;
4571da177e4SLinus Torvalds 	if (uptodate) {
4581da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
4591da177e4SLinus Torvalds 	} else {
4601da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
46108bafc03SKeith Mannthey 		if (!quiet_error(bh))
4621da177e4SLinus Torvalds 			buffer_io_error(bh);
4631da177e4SLinus Torvalds 		SetPageError(page);
4641da177e4SLinus Torvalds 	}
4651da177e4SLinus Torvalds 
4661da177e4SLinus Torvalds 	/*
4671da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
4681da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
4691da177e4SLinus Torvalds 	 * decide that the page is now completely done.
4701da177e4SLinus Torvalds 	 */
471a3972203SNick Piggin 	first = page_buffers(page);
472a3972203SNick Piggin 	local_irq_save(flags);
473a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
4741da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
4751da177e4SLinus Torvalds 	unlock_buffer(bh);
4761da177e4SLinus Torvalds 	tmp = bh;
4771da177e4SLinus Torvalds 	do {
4781da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
4791da177e4SLinus Torvalds 			page_uptodate = 0;
4801da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
4811da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4821da177e4SLinus Torvalds 			goto still_busy;
4831da177e4SLinus Torvalds 		}
4841da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4851da177e4SLinus Torvalds 	} while (tmp != bh);
486a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
487a3972203SNick Piggin 	local_irq_restore(flags);
4881da177e4SLinus Torvalds 
4891da177e4SLinus Torvalds 	/*
4901da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
4911da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
4921da177e4SLinus Torvalds 	 */
4931da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
4941da177e4SLinus Torvalds 		SetPageUptodate(page);
4951da177e4SLinus Torvalds 	unlock_page(page);
4961da177e4SLinus Torvalds 	return;
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds still_busy:
499a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
500a3972203SNick Piggin 	local_irq_restore(flags);
5011da177e4SLinus Torvalds 	return;
5021da177e4SLinus Torvalds }
5031da177e4SLinus Torvalds 
5041da177e4SLinus Torvalds /*
5051da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
5061da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
5071da177e4SLinus Torvalds  */
508b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
5091da177e4SLinus Torvalds {
5101da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
5111da177e4SLinus Torvalds 	unsigned long flags;
512a3972203SNick Piggin 	struct buffer_head *first;
5131da177e4SLinus Torvalds 	struct buffer_head *tmp;
5141da177e4SLinus Torvalds 	struct page *page;
5151da177e4SLinus Torvalds 
5161da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
5171da177e4SLinus Torvalds 
5181da177e4SLinus Torvalds 	page = bh->b_page;
5191da177e4SLinus Torvalds 	if (uptodate) {
5201da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
5211da177e4SLinus Torvalds 	} else {
52208bafc03SKeith Mannthey 		if (!quiet_error(bh)) {
5231da177e4SLinus Torvalds 			buffer_io_error(bh);
5241da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
5251da177e4SLinus Torvalds 					"I/O error on %s\n",
5261da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
5271da177e4SLinus Torvalds 		}
5281da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
52958ff407bSJan Kara 		set_buffer_write_io_error(bh);
5301da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
5311da177e4SLinus Torvalds 		SetPageError(page);
5321da177e4SLinus Torvalds 	}
5331da177e4SLinus Torvalds 
534a3972203SNick Piggin 	first = page_buffers(page);
535a3972203SNick Piggin 	local_irq_save(flags);
536a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
537a3972203SNick Piggin 
5381da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
5391da177e4SLinus Torvalds 	unlock_buffer(bh);
5401da177e4SLinus Torvalds 	tmp = bh->b_this_page;
5411da177e4SLinus Torvalds 	while (tmp != bh) {
5421da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
5431da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
5441da177e4SLinus Torvalds 			goto still_busy;
5451da177e4SLinus Torvalds 		}
5461da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
5471da177e4SLinus Torvalds 	}
548a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
549a3972203SNick Piggin 	local_irq_restore(flags);
5501da177e4SLinus Torvalds 	end_page_writeback(page);
5511da177e4SLinus Torvalds 	return;
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds still_busy:
554a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
555a3972203SNick Piggin 	local_irq_restore(flags);
5561da177e4SLinus Torvalds 	return;
5571da177e4SLinus Torvalds }
5581da177e4SLinus Torvalds 
5591da177e4SLinus Torvalds /*
5601da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
5611da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
5621da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
5631da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
5641da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
5651da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
5661da177e4SLinus Torvalds  * that this buffer is not under async I/O.
5671da177e4SLinus Torvalds  *
5681da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
5691da177e4SLinus Torvalds  * left.
5701da177e4SLinus Torvalds  *
5711da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
5721da177e4SLinus Torvalds  * the buffers.
5731da177e4SLinus Torvalds  *
5741da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
5751da177e4SLinus Torvalds  * page.
5761da177e4SLinus Torvalds  *
5771da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
5781da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
5791da177e4SLinus Torvalds  */
5801da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
5811da177e4SLinus Torvalds {
5821da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
5831da177e4SLinus Torvalds 	set_buffer_async_read(bh);
5841da177e4SLinus Torvalds }
5851da177e4SLinus Torvalds 
5861da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
5871da177e4SLinus Torvalds {
5881da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
5891da177e4SLinus Torvalds 	set_buffer_async_write(bh);
5901da177e4SLinus Torvalds }
5911da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
5921da177e4SLinus Torvalds 
5931da177e4SLinus Torvalds 
5941da177e4SLinus Torvalds /*
5951da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
5961da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
5971da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
5981da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
5991da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
6001da177e4SLinus Torvalds  *
6011da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
6021da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
6031da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
6041da177e4SLinus Torvalds  *
6051da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
6061da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
6071da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
6081da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
6091da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
6101da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
6111da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
6121da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
6131da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
6141da177e4SLinus Torvalds  * ->private_lock.
6151da177e4SLinus Torvalds  *
6161da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
6171da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
6181da177e4SLinus Torvalds  *
6191da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
6201da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
6211da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
6221da177e4SLinus Torvalds  * be true at clear_inode() time.
6231da177e4SLinus Torvalds  *
6241da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
6251da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
6261da177e4SLinus Torvalds  * BUG_ON(!list_empty).
6271da177e4SLinus Torvalds  *
6281da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
6291da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
6301da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
6311da177e4SLinus Torvalds  * queued up.
6321da177e4SLinus Torvalds  *
6331da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
6341da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
6351da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
6361da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
6371da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
6381da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
6391da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
6401da177e4SLinus Torvalds  * b_inode back.
6411da177e4SLinus Torvalds  */
6421da177e4SLinus Torvalds 
6431da177e4SLinus Torvalds /*
6441da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
6451da177e4SLinus Torvalds  */
646dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
6471da177e4SLinus Torvalds {
6481da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
64958ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
65058ff407bSJan Kara 	if (buffer_write_io_error(bh))
65158ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
65258ff407bSJan Kara 	bh->b_assoc_map = NULL;
6531da177e4SLinus Torvalds }
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
6561da177e4SLinus Torvalds {
6571da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
6581da177e4SLinus Torvalds }
6591da177e4SLinus Torvalds 
6601da177e4SLinus Torvalds /*
6611da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
6621da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
6631da177e4SLinus Torvalds  * writes to the disk.
6641da177e4SLinus Torvalds  *
6651da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
6661da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
6671da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
6681da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
6691da177e4SLinus Torvalds  */
6701da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	struct buffer_head *bh;
6731da177e4SLinus Torvalds 	struct list_head *p;
6741da177e4SLinus Torvalds 	int err = 0;
6751da177e4SLinus Torvalds 
6761da177e4SLinus Torvalds 	spin_lock(lock);
6771da177e4SLinus Torvalds repeat:
6781da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
6791da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
6801da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
6811da177e4SLinus Torvalds 			get_bh(bh);
6821da177e4SLinus Torvalds 			spin_unlock(lock);
6831da177e4SLinus Torvalds 			wait_on_buffer(bh);
6841da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
6851da177e4SLinus Torvalds 				err = -EIO;
6861da177e4SLinus Torvalds 			brelse(bh);
6871da177e4SLinus Torvalds 			spin_lock(lock);
6881da177e4SLinus Torvalds 			goto repeat;
6891da177e4SLinus Torvalds 		}
6901da177e4SLinus Torvalds 	}
6911da177e4SLinus Torvalds 	spin_unlock(lock);
6921da177e4SLinus Torvalds 	return err;
6931da177e4SLinus Torvalds }
6941da177e4SLinus Torvalds 
6951da177e4SLinus Torvalds /**
69678a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
69767be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
6981da177e4SLinus Torvalds  *
6991da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
7001da177e4SLinus Torvalds  * that I/O.
7011da177e4SLinus Torvalds  *
70267be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
70367be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
70467be2dd1SMartin Waitz  * a successful fsync().
7051da177e4SLinus Torvalds  */
7061da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
7071da177e4SLinus Torvalds {
7081da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
7111da177e4SLinus Torvalds 		return 0;
7121da177e4SLinus Torvalds 
7131da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
7141da177e4SLinus Torvalds 					&mapping->private_list);
7151da177e4SLinus Torvalds }
7161da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
7171da177e4SLinus Torvalds 
7181da177e4SLinus Torvalds /*
7191da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
7201da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
7211da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
7221da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
7231da177e4SLinus Torvalds  */
7241da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
7251da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
7261da177e4SLinus Torvalds {
7271da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
7281da177e4SLinus Torvalds 	if (bh) {
7291da177e4SLinus Torvalds 		if (buffer_dirty(bh))
7301da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
7311da177e4SLinus Torvalds 		put_bh(bh);
7321da177e4SLinus Torvalds 	}
7331da177e4SLinus Torvalds }
7341da177e4SLinus Torvalds 
7351da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
7361da177e4SLinus Torvalds {
7371da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
7381da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
7391da177e4SLinus Torvalds 
7401da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
7411da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
7421da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
7431da177e4SLinus Torvalds 	} else {
744e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
7451da177e4SLinus Torvalds 	}
746535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
7471da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
7481da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
7491da177e4SLinus Torvalds 				&mapping->private_list);
75058ff407bSJan Kara 		bh->b_assoc_map = mapping;
7511da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
7521da177e4SLinus Torvalds 	}
7531da177e4SLinus Torvalds }
7541da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
7551da177e4SLinus Torvalds 
7561da177e4SLinus Torvalds /*
757787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
758787d2214SNick Piggin  * dirty.
759787d2214SNick Piggin  *
760787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
761787d2214SNick Piggin  * not been truncated.
762787d2214SNick Piggin  */
763787d2214SNick Piggin static int __set_page_dirty(struct page *page,
764787d2214SNick Piggin 		struct address_space *mapping, int warn)
765787d2214SNick Piggin {
766787d2214SNick Piggin 	if (unlikely(!mapping))
767787d2214SNick Piggin 		return !TestSetPageDirty(page);
768787d2214SNick Piggin 
769787d2214SNick Piggin 	if (TestSetPageDirty(page))
770787d2214SNick Piggin 		return 0;
771787d2214SNick Piggin 
77219fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
773787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
774787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
775787d2214SNick Piggin 
776787d2214SNick Piggin 		if (mapping_cap_account_dirty(mapping)) {
777787d2214SNick Piggin 			__inc_zone_page_state(page, NR_FILE_DIRTY);
778c9e51e41SPeter Zijlstra 			__inc_bdi_stat(mapping->backing_dev_info,
779c9e51e41SPeter Zijlstra 					BDI_RECLAIMABLE);
780787d2214SNick Piggin 			task_io_account_write(PAGE_CACHE_SIZE);
781787d2214SNick Piggin 		}
782787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
783787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
784787d2214SNick Piggin 	}
78519fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
786787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787787d2214SNick Piggin 
788787d2214SNick Piggin 	return 1;
789787d2214SNick Piggin }
790787d2214SNick Piggin 
791787d2214SNick Piggin /*
7921da177e4SLinus Torvalds  * Add a page to the dirty page list.
7931da177e4SLinus Torvalds  *
7941da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
7951da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
7961da177e4SLinus Torvalds  *
7971da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
7981da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
7991da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
8001da177e4SLinus Torvalds  * dirty.
8011da177e4SLinus Torvalds  *
8021da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
8031da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
8041da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
8051da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
8061da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
8071da177e4SLinus Torvalds  * page on the dirty page list.
8081da177e4SLinus Torvalds  *
8091da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
8101da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
8111da177e4SLinus Torvalds  * added to the page after it was set dirty.
8121da177e4SLinus Torvalds  *
8131da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
8141da177e4SLinus Torvalds  * address_space though.
8151da177e4SLinus Torvalds  */
8161da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
8171da177e4SLinus Torvalds {
818787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
819ebf7a227SNick Piggin 
820ebf7a227SNick Piggin 	if (unlikely(!mapping))
821ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
8221da177e4SLinus Torvalds 
8231da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
8241da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
8251da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
8261da177e4SLinus Torvalds 		struct buffer_head *bh = head;
8271da177e4SLinus Torvalds 
8281da177e4SLinus Torvalds 		do {
8291da177e4SLinus Torvalds 			set_buffer_dirty(bh);
8301da177e4SLinus Torvalds 			bh = bh->b_this_page;
8311da177e4SLinus Torvalds 		} while (bh != head);
8321da177e4SLinus Torvalds 	}
8331da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
8341da177e4SLinus Torvalds 
835787d2214SNick Piggin 	return __set_page_dirty(page, mapping, 1);
8361da177e4SLinus Torvalds }
8371da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds /*
8401da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
8411da177e4SLinus Torvalds  *
8421da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
8431da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
8441da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
8451da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
8461da177e4SLinus Torvalds  *
8471da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
8481da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
8491da177e4SLinus Torvalds  * up, waiting for those writes to complete.
8501da177e4SLinus Torvalds  *
8511da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
8521da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
8531da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
8541da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
8551da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
8561da177e4SLinus Torvalds  * any newly dirty buffers for write.
8571da177e4SLinus Torvalds  */
8581da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
8591da177e4SLinus Torvalds {
8601da177e4SLinus Torvalds 	struct buffer_head *bh;
8611da177e4SLinus Torvalds 	struct list_head tmp;
862535ee2fbSJan Kara 	struct address_space *mapping;
8631da177e4SLinus Torvalds 	int err = 0, err2;
8641da177e4SLinus Torvalds 
8651da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
8661da177e4SLinus Torvalds 
8671da177e4SLinus Torvalds 	spin_lock(lock);
8681da177e4SLinus Torvalds 	while (!list_empty(list)) {
8691da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
870535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
87158ff407bSJan Kara 		__remove_assoc_queue(bh);
872535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
873535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
874535ee2fbSJan Kara 		smp_mb();
8751da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
8761da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
877535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
8781da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8791da177e4SLinus Torvalds 				get_bh(bh);
8801da177e4SLinus Torvalds 				spin_unlock(lock);
8811da177e4SLinus Torvalds 				/*
8821da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
8831da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
8841da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
8851da177e4SLinus Torvalds 				 * flight on potentially older contents.
8861da177e4SLinus Torvalds 				 */
88718ce3751SJens Axboe 				ll_rw_block(SWRITE_SYNC, 1, &bh);
8881da177e4SLinus Torvalds 				brelse(bh);
8891da177e4SLinus Torvalds 				spin_lock(lock);
8901da177e4SLinus Torvalds 			}
8911da177e4SLinus Torvalds 		}
8921da177e4SLinus Torvalds 	}
8931da177e4SLinus Torvalds 
8941da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
8951da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
8961da177e4SLinus Torvalds 		get_bh(bh);
897535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
898535ee2fbSJan Kara 		__remove_assoc_queue(bh);
899535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
900535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
901535ee2fbSJan Kara 		smp_mb();
902535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
903535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
904e3892296SJan Kara 				 &mapping->private_list);
905535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
906535ee2fbSJan Kara 		}
9071da177e4SLinus Torvalds 		spin_unlock(lock);
9081da177e4SLinus Torvalds 		wait_on_buffer(bh);
9091da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
9101da177e4SLinus Torvalds 			err = -EIO;
9111da177e4SLinus Torvalds 		brelse(bh);
9121da177e4SLinus Torvalds 		spin_lock(lock);
9131da177e4SLinus Torvalds 	}
9141da177e4SLinus Torvalds 
9151da177e4SLinus Torvalds 	spin_unlock(lock);
9161da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
9171da177e4SLinus Torvalds 	if (err)
9181da177e4SLinus Torvalds 		return err;
9191da177e4SLinus Torvalds 	else
9201da177e4SLinus Torvalds 		return err2;
9211da177e4SLinus Torvalds }
9221da177e4SLinus Torvalds 
9231da177e4SLinus Torvalds /*
9241da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
9251da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
9261da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
9271da177e4SLinus Torvalds  *
9281da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
9291da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
9301da177e4SLinus Torvalds  * for reiserfs.
9311da177e4SLinus Torvalds  */
9321da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
9331da177e4SLinus Torvalds {
9341da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
9351da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
9361da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
9371da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
9381da177e4SLinus Torvalds 
9391da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
9401da177e4SLinus Torvalds 		while (!list_empty(list))
9411da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
9421da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
9431da177e4SLinus Torvalds 	}
9441da177e4SLinus Torvalds }
94552b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
9461da177e4SLinus Torvalds 
9471da177e4SLinus Torvalds /*
9481da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
9491da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
9501da177e4SLinus Torvalds  *
9511da177e4SLinus Torvalds  * Returns true if all buffers were removed.
9521da177e4SLinus Torvalds  */
9531da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
9541da177e4SLinus Torvalds {
9551da177e4SLinus Torvalds 	int ret = 1;
9561da177e4SLinus Torvalds 
9571da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
9581da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
9591da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
9601da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
9611da177e4SLinus Torvalds 
9621da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
9631da177e4SLinus Torvalds 		while (!list_empty(list)) {
9641da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
9651da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
9661da177e4SLinus Torvalds 				ret = 0;
9671da177e4SLinus Torvalds 				break;
9681da177e4SLinus Torvalds 			}
9691da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
9701da177e4SLinus Torvalds 		}
9711da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
9721da177e4SLinus Torvalds 	}
9731da177e4SLinus Torvalds 	return ret;
9741da177e4SLinus Torvalds }
9751da177e4SLinus Torvalds 
9761da177e4SLinus Torvalds /*
9771da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
9781da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
9791da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
9801da177e4SLinus Torvalds  * buffers.
9811da177e4SLinus Torvalds  *
9821da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
9831da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
9841da177e4SLinus Torvalds  */
9851da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
9861da177e4SLinus Torvalds 		int retry)
9871da177e4SLinus Torvalds {
9881da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
9891da177e4SLinus Torvalds 	long offset;
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds try_again:
9921da177e4SLinus Torvalds 	head = NULL;
9931da177e4SLinus Torvalds 	offset = PAGE_SIZE;
9941da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
9951da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
9961da177e4SLinus Torvalds 		if (!bh)
9971da177e4SLinus Torvalds 			goto no_grow;
9981da177e4SLinus Torvalds 
9991da177e4SLinus Torvalds 		bh->b_bdev = NULL;
10001da177e4SLinus Torvalds 		bh->b_this_page = head;
10011da177e4SLinus Torvalds 		bh->b_blocknr = -1;
10021da177e4SLinus Torvalds 		head = bh;
10031da177e4SLinus Torvalds 
10041da177e4SLinus Torvalds 		bh->b_state = 0;
10051da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
1006fc5cd582SChris Mason 		bh->b_private = NULL;
10071da177e4SLinus Torvalds 		bh->b_size = size;
10081da177e4SLinus Torvalds 
10091da177e4SLinus Torvalds 		/* Link the buffer to its page */
10101da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
10111da177e4SLinus Torvalds 
101201ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
10131da177e4SLinus Torvalds 	}
10141da177e4SLinus Torvalds 	return head;
10151da177e4SLinus Torvalds /*
10161da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
10171da177e4SLinus Torvalds  */
10181da177e4SLinus Torvalds no_grow:
10191da177e4SLinus Torvalds 	if (head) {
10201da177e4SLinus Torvalds 		do {
10211da177e4SLinus Torvalds 			bh = head;
10221da177e4SLinus Torvalds 			head = head->b_this_page;
10231da177e4SLinus Torvalds 			free_buffer_head(bh);
10241da177e4SLinus Torvalds 		} while (head);
10251da177e4SLinus Torvalds 	}
10261da177e4SLinus Torvalds 
10271da177e4SLinus Torvalds 	/*
10281da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
10291da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
10301da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
10311da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
10321da177e4SLinus Torvalds 	 */
10331da177e4SLinus Torvalds 	if (!retry)
10341da177e4SLinus Torvalds 		return NULL;
10351da177e4SLinus Torvalds 
10361da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
10371da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
10381da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
10391da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
10401da177e4SLinus Torvalds 	 * async buffer heads in use.
10411da177e4SLinus Torvalds 	 */
10421da177e4SLinus Torvalds 	free_more_memory();
10431da177e4SLinus Torvalds 	goto try_again;
10441da177e4SLinus Torvalds }
10451da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
10461da177e4SLinus Torvalds 
10471da177e4SLinus Torvalds static inline void
10481da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
10491da177e4SLinus Torvalds {
10501da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds 	bh = head;
10531da177e4SLinus Torvalds 	do {
10541da177e4SLinus Torvalds 		tail = bh;
10551da177e4SLinus Torvalds 		bh = bh->b_this_page;
10561da177e4SLinus Torvalds 	} while (bh);
10571da177e4SLinus Torvalds 	tail->b_this_page = head;
10581da177e4SLinus Torvalds 	attach_page_buffers(page, head);
10591da177e4SLinus Torvalds }
10601da177e4SLinus Torvalds 
10611da177e4SLinus Torvalds /*
10621da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
10631da177e4SLinus Torvalds  */
10641da177e4SLinus Torvalds static void
10651da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
10661da177e4SLinus Torvalds 			sector_t block, int size)
10671da177e4SLinus Torvalds {
10681da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
10691da177e4SLinus Torvalds 	struct buffer_head *bh = head;
10701da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
10711da177e4SLinus Torvalds 
10721da177e4SLinus Torvalds 	do {
10731da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
10741da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
10751da177e4SLinus Torvalds 			bh->b_bdev = bdev;
10761da177e4SLinus Torvalds 			bh->b_blocknr = block;
10771da177e4SLinus Torvalds 			if (uptodate)
10781da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
10791da177e4SLinus Torvalds 			set_buffer_mapped(bh);
10801da177e4SLinus Torvalds 		}
10811da177e4SLinus Torvalds 		block++;
10821da177e4SLinus Torvalds 		bh = bh->b_this_page;
10831da177e4SLinus Torvalds 	} while (bh != head);
10841da177e4SLinus Torvalds }
10851da177e4SLinus Torvalds 
10861da177e4SLinus Torvalds /*
10871da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
10881da177e4SLinus Torvalds  *
10891da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
10901da177e4SLinus Torvalds  */
10911da177e4SLinus Torvalds static struct page *
10921da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
10931da177e4SLinus Torvalds 		pgoff_t index, int size)
10941da177e4SLinus Torvalds {
10951da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
10961da177e4SLinus Torvalds 	struct page *page;
10971da177e4SLinus Torvalds 	struct buffer_head *bh;
10981da177e4SLinus Torvalds 
1099ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
1100769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
11011da177e4SLinus Torvalds 	if (!page)
11021da177e4SLinus Torvalds 		return NULL;
11031da177e4SLinus Torvalds 
1104e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
11051da177e4SLinus Torvalds 
11061da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
11071da177e4SLinus Torvalds 		bh = page_buffers(page);
11081da177e4SLinus Torvalds 		if (bh->b_size == size) {
11091da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
11101da177e4SLinus Torvalds 			return page;
11111da177e4SLinus Torvalds 		}
11121da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
11131da177e4SLinus Torvalds 			goto failed;
11141da177e4SLinus Torvalds 	}
11151da177e4SLinus Torvalds 
11161da177e4SLinus Torvalds 	/*
11171da177e4SLinus Torvalds 	 * Allocate some buffers for this page
11181da177e4SLinus Torvalds 	 */
11191da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
11201da177e4SLinus Torvalds 	if (!bh)
11211da177e4SLinus Torvalds 		goto failed;
11221da177e4SLinus Torvalds 
11231da177e4SLinus Torvalds 	/*
11241da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
11251da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
11261da177e4SLinus Torvalds 	 * run under the page lock.
11271da177e4SLinus Torvalds 	 */
11281da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
11291da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
11301da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
11311da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
11321da177e4SLinus Torvalds 	return page;
11331da177e4SLinus Torvalds 
11341da177e4SLinus Torvalds failed:
11351da177e4SLinus Torvalds 	BUG();
11361da177e4SLinus Torvalds 	unlock_page(page);
11371da177e4SLinus Torvalds 	page_cache_release(page);
11381da177e4SLinus Torvalds 	return NULL;
11391da177e4SLinus Torvalds }
11401da177e4SLinus Torvalds 
11411da177e4SLinus Torvalds /*
11421da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
11431da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
11441da177e4SLinus Torvalds  */
1145858119e1SArjan van de Ven static int
11461da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
11471da177e4SLinus Torvalds {
11481da177e4SLinus Torvalds 	struct page *page;
11491da177e4SLinus Torvalds 	pgoff_t index;
11501da177e4SLinus Torvalds 	int sizebits;
11511da177e4SLinus Torvalds 
11521da177e4SLinus Torvalds 	sizebits = -1;
11531da177e4SLinus Torvalds 	do {
11541da177e4SLinus Torvalds 		sizebits++;
11551da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 	index = block >> sizebits;
11581da177e4SLinus Torvalds 
1159e5657933SAndrew Morton 	/*
1160e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1161e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1162e5657933SAndrew Morton 	 */
1163e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1164e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1165e5657933SAndrew Morton 
1166e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1167e5657933SAndrew Morton 			"device %s\n",
11688e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1169e5657933SAndrew Morton 			bdevname(bdev, b));
1170e5657933SAndrew Morton 		return -EIO;
1171e5657933SAndrew Morton 	}
1172e5657933SAndrew Morton 	block = index << sizebits;
11731da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
11741da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
11751da177e4SLinus Torvalds 	if (!page)
11761da177e4SLinus Torvalds 		return 0;
11771da177e4SLinus Torvalds 	unlock_page(page);
11781da177e4SLinus Torvalds 	page_cache_release(page);
11791da177e4SLinus Torvalds 	return 1;
11801da177e4SLinus Torvalds }
11811da177e4SLinus Torvalds 
118275c96f85SAdrian Bunk static struct buffer_head *
11831da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
11841da177e4SLinus Torvalds {
11851da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
11861da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
11871da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11881da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11891da177e4SLinus Torvalds 					size);
11901da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
11911da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds 		dump_stack();
11941da177e4SLinus Torvalds 		return NULL;
11951da177e4SLinus Torvalds 	}
11961da177e4SLinus Torvalds 
11971da177e4SLinus Torvalds 	for (;;) {
11981da177e4SLinus Torvalds 		struct buffer_head * bh;
1199e5657933SAndrew Morton 		int ret;
12001da177e4SLinus Torvalds 
12011da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
12021da177e4SLinus Torvalds 		if (bh)
12031da177e4SLinus Torvalds 			return bh;
12041da177e4SLinus Torvalds 
1205e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1206e5657933SAndrew Morton 		if (ret < 0)
1207e5657933SAndrew Morton 			return NULL;
1208e5657933SAndrew Morton 		if (ret == 0)
12091da177e4SLinus Torvalds 			free_more_memory();
12101da177e4SLinus Torvalds 	}
12111da177e4SLinus Torvalds }
12121da177e4SLinus Torvalds 
12131da177e4SLinus Torvalds /*
12141da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
12151da177e4SLinus Torvalds  *
12161da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
12171da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
12181da177e4SLinus Torvalds  *
12191da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
12201da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
12211da177e4SLinus Torvalds  * merely a hint about the true dirty state.
12221da177e4SLinus Torvalds  *
12231da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
12241da177e4SLinus Torvalds  * (if the page has buffers).
12251da177e4SLinus Torvalds  *
12261da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
12271da177e4SLinus Torvalds  * buffers are not.
12281da177e4SLinus Torvalds  *
12291da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
12301da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
12311da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
12321da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
12331da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
12341da177e4SLinus Torvalds  */
12351da177e4SLinus Torvalds 
12361da177e4SLinus Torvalds /**
12371da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
123867be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
12391da177e4SLinus Torvalds  *
12401da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
12411da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
12421da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
12431da177e4SLinus Torvalds  * inode list.
12441da177e4SLinus Torvalds  *
12451da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
12461da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
12471da177e4SLinus Torvalds  */
1248fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
12491da177e4SLinus Torvalds {
1250787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
12511be62dc1SLinus Torvalds 
12521be62dc1SLinus Torvalds 	/*
12531be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
12541be62dc1SLinus Torvalds 	 *
12551be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
12561be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
12571be62dc1SLinus Torvalds 	 */
12581be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
12591be62dc1SLinus Torvalds 		smp_mb();
12601be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
12611be62dc1SLinus Torvalds 			return;
12621be62dc1SLinus Torvalds 	}
12631be62dc1SLinus Torvalds 
12641be62dc1SLinus Torvalds 	if (!test_set_buffer_dirty(bh))
1265787d2214SNick Piggin 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
12661da177e4SLinus Torvalds }
12671da177e4SLinus Torvalds 
12681da177e4SLinus Torvalds /*
12691da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
12701da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
12711da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
12721da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
12731da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
12741da177e4SLinus Torvalds  */
12751da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
12761da177e4SLinus Torvalds {
12771da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
12781da177e4SLinus Torvalds 		put_bh(buf);
12791da177e4SLinus Torvalds 		return;
12801da177e4SLinus Torvalds 	}
12815c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12821da177e4SLinus Torvalds }
12831da177e4SLinus Torvalds 
12841da177e4SLinus Torvalds /*
12851da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
12861da177e4SLinus Torvalds  * potentially dirty data.
12871da177e4SLinus Torvalds  */
12881da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12891da177e4SLinus Torvalds {
12901da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1291535ee2fbSJan Kara 	if (bh->b_assoc_map) {
12921da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
12931da177e4SLinus Torvalds 
12941da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12951da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
129658ff407bSJan Kara 		bh->b_assoc_map = NULL;
12971da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12981da177e4SLinus Torvalds 	}
12991da177e4SLinus Torvalds 	__brelse(bh);
13001da177e4SLinus Torvalds }
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
13031da177e4SLinus Torvalds {
13041da177e4SLinus Torvalds 	lock_buffer(bh);
13051da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
13061da177e4SLinus Torvalds 		unlock_buffer(bh);
13071da177e4SLinus Torvalds 		return bh;
13081da177e4SLinus Torvalds 	} else {
13091da177e4SLinus Torvalds 		get_bh(bh);
13101da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
13111da177e4SLinus Torvalds 		submit_bh(READ, bh);
13121da177e4SLinus Torvalds 		wait_on_buffer(bh);
13131da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
13141da177e4SLinus Torvalds 			return bh;
13151da177e4SLinus Torvalds 	}
13161da177e4SLinus Torvalds 	brelse(bh);
13171da177e4SLinus Torvalds 	return NULL;
13181da177e4SLinus Torvalds }
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds /*
13211da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
13221da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
13231da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
13241da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
13251da177e4SLinus Torvalds  * CPU's LRUs at the same time.
13261da177e4SLinus Torvalds  *
13271da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
13281da177e4SLinus Torvalds  * sb_find_get_block().
13291da177e4SLinus Torvalds  *
13301da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
13311da177e4SLinus Torvalds  * a local interrupt disable for that.
13321da177e4SLinus Torvalds  */
13331da177e4SLinus Torvalds 
13341da177e4SLinus Torvalds #define BH_LRU_SIZE	8
13351da177e4SLinus Torvalds 
13361da177e4SLinus Torvalds struct bh_lru {
13371da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
13381da177e4SLinus Torvalds };
13391da177e4SLinus Torvalds 
13401da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
13411da177e4SLinus Torvalds 
13421da177e4SLinus Torvalds #ifdef CONFIG_SMP
13431da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
13441da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
13451da177e4SLinus Torvalds #else
13461da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
13471da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
13481da177e4SLinus Torvalds #endif
13491da177e4SLinus Torvalds 
13501da177e4SLinus Torvalds static inline void check_irqs_on(void)
13511da177e4SLinus Torvalds {
13521da177e4SLinus Torvalds #ifdef irqs_disabled
13531da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
13541da177e4SLinus Torvalds #endif
13551da177e4SLinus Torvalds }
13561da177e4SLinus Torvalds 
13571da177e4SLinus Torvalds /*
13581da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
13591da177e4SLinus Torvalds  */
13601da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13611da177e4SLinus Torvalds {
13621da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
13631da177e4SLinus Torvalds 	struct bh_lru *lru;
13641da177e4SLinus Torvalds 
13651da177e4SLinus Torvalds 	check_irqs_on();
13661da177e4SLinus Torvalds 	bh_lru_lock();
13671da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13681da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
13691da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
13701da177e4SLinus Torvalds 		int in;
13711da177e4SLinus Torvalds 		int out = 0;
13721da177e4SLinus Torvalds 
13731da177e4SLinus Torvalds 		get_bh(bh);
13741da177e4SLinus Torvalds 		bhs[out++] = bh;
13751da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
13761da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
13771da177e4SLinus Torvalds 
13781da177e4SLinus Torvalds 			if (bh2 == bh) {
13791da177e4SLinus Torvalds 				__brelse(bh2);
13801da177e4SLinus Torvalds 			} else {
13811da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
13821da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
13831da177e4SLinus Torvalds 					evictee = bh2;
13841da177e4SLinus Torvalds 				} else {
13851da177e4SLinus Torvalds 					bhs[out++] = bh2;
13861da177e4SLinus Torvalds 				}
13871da177e4SLinus Torvalds 			}
13881da177e4SLinus Torvalds 		}
13891da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
13901da177e4SLinus Torvalds 			bhs[out++] = NULL;
13911da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
13921da177e4SLinus Torvalds 	}
13931da177e4SLinus Torvalds 	bh_lru_unlock();
13941da177e4SLinus Torvalds 
13951da177e4SLinus Torvalds 	if (evictee)
13961da177e4SLinus Torvalds 		__brelse(evictee);
13971da177e4SLinus Torvalds }
13981da177e4SLinus Torvalds 
13991da177e4SLinus Torvalds /*
14001da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
14011da177e4SLinus Torvalds  */
1402858119e1SArjan van de Ven static struct buffer_head *
14033991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
14041da177e4SLinus Torvalds {
14051da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
14061da177e4SLinus Torvalds 	struct bh_lru *lru;
14073991d3bdSTomasz Kvarsin 	unsigned int i;
14081da177e4SLinus Torvalds 
14091da177e4SLinus Torvalds 	check_irqs_on();
14101da177e4SLinus Torvalds 	bh_lru_lock();
14111da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
14121da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14131da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
14161da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
14171da177e4SLinus Torvalds 			if (i) {
14181da177e4SLinus Torvalds 				while (i) {
14191da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
14201da177e4SLinus Torvalds 					i--;
14211da177e4SLinus Torvalds 				}
14221da177e4SLinus Torvalds 				lru->bhs[0] = bh;
14231da177e4SLinus Torvalds 			}
14241da177e4SLinus Torvalds 			get_bh(bh);
14251da177e4SLinus Torvalds 			ret = bh;
14261da177e4SLinus Torvalds 			break;
14271da177e4SLinus Torvalds 		}
14281da177e4SLinus Torvalds 	}
14291da177e4SLinus Torvalds 	bh_lru_unlock();
14301da177e4SLinus Torvalds 	return ret;
14311da177e4SLinus Torvalds }
14321da177e4SLinus Torvalds 
14331da177e4SLinus Torvalds /*
14341da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
14351da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
14361da177e4SLinus Torvalds  * NULL
14371da177e4SLinus Torvalds  */
14381da177e4SLinus Torvalds struct buffer_head *
14393991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
14401da177e4SLinus Torvalds {
14411da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
14421da177e4SLinus Torvalds 
14431da177e4SLinus Torvalds 	if (bh == NULL) {
1444385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
14451da177e4SLinus Torvalds 		if (bh)
14461da177e4SLinus Torvalds 			bh_lru_install(bh);
14471da177e4SLinus Torvalds 	}
14481da177e4SLinus Torvalds 	if (bh)
14491da177e4SLinus Torvalds 		touch_buffer(bh);
14501da177e4SLinus Torvalds 	return bh;
14511da177e4SLinus Torvalds }
14521da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14531da177e4SLinus Torvalds 
14541da177e4SLinus Torvalds /*
14551da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
14561da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
14571da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
14581da177e4SLinus Torvalds  *
14591da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
14601da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
14611da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
14621da177e4SLinus Torvalds  *
14631da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
14641da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
14651da177e4SLinus Torvalds  */
14661da177e4SLinus Torvalds struct buffer_head *
14673991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
14681da177e4SLinus Torvalds {
14691da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds 	might_sleep();
14721da177e4SLinus Torvalds 	if (bh == NULL)
14731da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
14741da177e4SLinus Torvalds 	return bh;
14751da177e4SLinus Torvalds }
14761da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
14771da177e4SLinus Torvalds 
14781da177e4SLinus Torvalds /*
14791da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
14801da177e4SLinus Torvalds  */
14813991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14821da177e4SLinus Torvalds {
14831da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1484a3e713b5SAndrew Morton 	if (likely(bh)) {
14851da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
14861da177e4SLinus Torvalds 		brelse(bh);
14871da177e4SLinus Torvalds 	}
1488a3e713b5SAndrew Morton }
14891da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14901da177e4SLinus Torvalds 
14911da177e4SLinus Torvalds /**
14921da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
149367be2dd1SMartin Waitz  *  @bdev: the block_device to read from
14941da177e4SLinus Torvalds  *  @block: number of block
14951da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14961da177e4SLinus Torvalds  *
14971da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14981da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14991da177e4SLinus Torvalds  */
15001da177e4SLinus Torvalds struct buffer_head *
15013991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
15021da177e4SLinus Torvalds {
15031da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
15041da177e4SLinus Torvalds 
1505a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
15061da177e4SLinus Torvalds 		bh = __bread_slow(bh);
15071da177e4SLinus Torvalds 	return bh;
15081da177e4SLinus Torvalds }
15091da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
15101da177e4SLinus Torvalds 
15111da177e4SLinus Torvalds /*
15121da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
15131da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
15141da177e4SLinus Torvalds  * or with preempt disabled.
15151da177e4SLinus Torvalds  */
15161da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
15171da177e4SLinus Torvalds {
15181da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
15191da177e4SLinus Torvalds 	int i;
15201da177e4SLinus Torvalds 
15211da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
15221da177e4SLinus Torvalds 		brelse(b->bhs[i]);
15231da177e4SLinus Torvalds 		b->bhs[i] = NULL;
15241da177e4SLinus Torvalds 	}
15251da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
15261da177e4SLinus Torvalds }
15271da177e4SLinus Torvalds 
1528f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
15291da177e4SLinus Torvalds {
153015c8b6c1SJens Axboe 	on_each_cpu(invalidate_bh_lru, NULL, 1);
15311da177e4SLinus Torvalds }
15329db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
15351da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
15361da177e4SLinus Torvalds {
15371da177e4SLinus Torvalds 	bh->b_page = page;
1538e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
15391da177e4SLinus Torvalds 	if (PageHighMem(page))
15401da177e4SLinus Torvalds 		/*
15411da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
15421da177e4SLinus Torvalds 		 */
15431da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
15441da177e4SLinus Torvalds 	else
15451da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
15461da177e4SLinus Torvalds }
15471da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
15481da177e4SLinus Torvalds 
15491da177e4SLinus Torvalds /*
15501da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
15511da177e4SLinus Torvalds  */
1552858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
15531da177e4SLinus Torvalds {
15541da177e4SLinus Torvalds 	lock_buffer(bh);
15551da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
15561da177e4SLinus Torvalds 	bh->b_bdev = NULL;
15571da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
15581da177e4SLinus Torvalds 	clear_buffer_req(bh);
15591da177e4SLinus Torvalds 	clear_buffer_new(bh);
15601da177e4SLinus Torvalds 	clear_buffer_delay(bh);
156133a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
15621da177e4SLinus Torvalds 	unlock_buffer(bh);
15631da177e4SLinus Torvalds }
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds /**
15661da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
15671da177e4SLinus Torvalds  *
15681da177e4SLinus Torvalds  * @page: the page which is affected
15691da177e4SLinus Torvalds  * @offset: the index of the truncation point
15701da177e4SLinus Torvalds  *
15711da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
15721da177e4SLinus Torvalds  * invalidatedby a truncate operation.
15731da177e4SLinus Torvalds  *
15741da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
15751da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
15761da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
15771da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
15781da177e4SLinus Torvalds  * blocks on-disk.
15791da177e4SLinus Torvalds  */
15802ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
15811da177e4SLinus Torvalds {
15821da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
15831da177e4SLinus Torvalds 	unsigned int curr_off = 0;
15841da177e4SLinus Torvalds 
15851da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
15861da177e4SLinus Torvalds 	if (!page_has_buffers(page))
15871da177e4SLinus Torvalds 		goto out;
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds 	head = page_buffers(page);
15901da177e4SLinus Torvalds 	bh = head;
15911da177e4SLinus Torvalds 	do {
15921da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
15931da177e4SLinus Torvalds 		next = bh->b_this_page;
15941da177e4SLinus Torvalds 
15951da177e4SLinus Torvalds 		/*
15961da177e4SLinus Torvalds 		 * is this block fully invalidated?
15971da177e4SLinus Torvalds 		 */
15981da177e4SLinus Torvalds 		if (offset <= curr_off)
15991da177e4SLinus Torvalds 			discard_buffer(bh);
16001da177e4SLinus Torvalds 		curr_off = next_off;
16011da177e4SLinus Torvalds 		bh = next;
16021da177e4SLinus Torvalds 	} while (bh != head);
16031da177e4SLinus Torvalds 
16041da177e4SLinus Torvalds 	/*
16051da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
16061da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
16071da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
16081da177e4SLinus Torvalds 	 */
16091da177e4SLinus Torvalds 	if (offset == 0)
16102ff28e22SNeilBrown 		try_to_release_page(page, 0);
16111da177e4SLinus Torvalds out:
16122ff28e22SNeilBrown 	return;
16131da177e4SLinus Torvalds }
16141da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
16151da177e4SLinus Torvalds 
16161da177e4SLinus Torvalds /*
16171da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
16181da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
16191da177e4SLinus Torvalds  * is already excluded via the page lock.
16201da177e4SLinus Torvalds  */
16211da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
16221da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
16231da177e4SLinus Torvalds {
16241da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
16271da177e4SLinus Torvalds 	bh = head;
16281da177e4SLinus Torvalds 	do {
16291da177e4SLinus Torvalds 		bh->b_state |= b_state;
16301da177e4SLinus Torvalds 		tail = bh;
16311da177e4SLinus Torvalds 		bh = bh->b_this_page;
16321da177e4SLinus Torvalds 	} while (bh);
16331da177e4SLinus Torvalds 	tail->b_this_page = head;
16341da177e4SLinus Torvalds 
16351da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
16361da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
16371da177e4SLinus Torvalds 		bh = head;
16381da177e4SLinus Torvalds 		do {
16391da177e4SLinus Torvalds 			if (PageDirty(page))
16401da177e4SLinus Torvalds 				set_buffer_dirty(bh);
16411da177e4SLinus Torvalds 			if (PageUptodate(page))
16421da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
16431da177e4SLinus Torvalds 			bh = bh->b_this_page;
16441da177e4SLinus Torvalds 		} while (bh != head);
16451da177e4SLinus Torvalds 	}
16461da177e4SLinus Torvalds 	attach_page_buffers(page, head);
16471da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
16481da177e4SLinus Torvalds }
16491da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16501da177e4SLinus Torvalds 
16511da177e4SLinus Torvalds /*
16521da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
16531da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
16541da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
16551da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
16561da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
16571da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
16581da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
16591da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
16601da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
16611da177e4SLinus Torvalds  *
16621da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
16631da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
16641da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
16651da177e4SLinus Torvalds  * only if we really need to.  That happens here.
16661da177e4SLinus Torvalds  */
16671da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
16681da177e4SLinus Torvalds {
16691da177e4SLinus Torvalds 	struct buffer_head *old_bh;
16701da177e4SLinus Torvalds 
16711da177e4SLinus Torvalds 	might_sleep();
16721da177e4SLinus Torvalds 
1673385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
16741da177e4SLinus Torvalds 	if (old_bh) {
16751da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
16761da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
16771da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
16781da177e4SLinus Torvalds 		__brelse(old_bh);
16791da177e4SLinus Torvalds 	}
16801da177e4SLinus Torvalds }
16811da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
16821da177e4SLinus Torvalds 
16831da177e4SLinus Torvalds /*
16841da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
16851da177e4SLinus Torvalds  *
16861da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
16871da177e4SLinus Torvalds  *
16881da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
16891da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
16901da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
16911da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
16921da177e4SLinus Torvalds  *
16931da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
16941da177e4SLinus Torvalds  */
16951da177e4SLinus Torvalds 
16961da177e4SLinus Torvalds /*
16971da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16981da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16991da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
17001da177e4SLinus Torvalds  * state inside lock_buffer().
17011da177e4SLinus Torvalds  *
17021da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
17031da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
17041da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
17051da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
17061da177e4SLinus Torvalds  * prevents this contention from occurring.
17071da177e4SLinus Torvalds  */
17081da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
17091da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
17101da177e4SLinus Torvalds {
17111da177e4SLinus Torvalds 	int err;
17121da177e4SLinus Torvalds 	sector_t block;
17131da177e4SLinus Torvalds 	sector_t last_block;
1714f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1715b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
17161da177e4SLinus Torvalds 	int nr_underway = 0;
17171da177e4SLinus Torvalds 
17181da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
17191da177e4SLinus Torvalds 
17201da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
17211da177e4SLinus Torvalds 
17221da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1723b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
17241da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
17251da177e4SLinus Torvalds 	}
17261da177e4SLinus Torvalds 
17271da177e4SLinus Torvalds 	/*
17281da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
17291da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
17301da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
17311da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
17321da177e4SLinus Torvalds 	 *
17331da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
17341da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
17351da177e4SLinus Torvalds 	 */
17361da177e4SLinus Torvalds 
173754b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
17381da177e4SLinus Torvalds 	head = page_buffers(page);
17391da177e4SLinus Torvalds 	bh = head;
17401da177e4SLinus Torvalds 
17411da177e4SLinus Torvalds 	/*
17421da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
17431da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
17441da177e4SLinus Torvalds 	 */
17451da177e4SLinus Torvalds 	do {
17461da177e4SLinus Torvalds 		if (block > last_block) {
17471da177e4SLinus Torvalds 			/*
17481da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
17491da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
17501da177e4SLinus Torvalds 			 * truncate in progress.
17511da177e4SLinus Torvalds 			 */
17521da177e4SLinus Torvalds 			/*
17531da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
17541da177e4SLinus Torvalds 			 */
17551da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17561da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
175729a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
175829a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1759b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
17601da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17611da177e4SLinus Torvalds 			if (err)
17621da177e4SLinus Torvalds 				goto recover;
176329a814d2SAlex Tomas 			clear_buffer_delay(bh);
17641da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17651da177e4SLinus Torvalds 				/* blockdev mappings never come here */
17661da177e4SLinus Torvalds 				clear_buffer_new(bh);
17671da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
17681da177e4SLinus Torvalds 							bh->b_blocknr);
17691da177e4SLinus Torvalds 			}
17701da177e4SLinus Torvalds 		}
17711da177e4SLinus Torvalds 		bh = bh->b_this_page;
17721da177e4SLinus Torvalds 		block++;
17731da177e4SLinus Torvalds 	} while (bh != head);
17741da177e4SLinus Torvalds 
17751da177e4SLinus Torvalds 	do {
17761da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
17771da177e4SLinus Torvalds 			continue;
17781da177e4SLinus Torvalds 		/*
17791da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
17801da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
17811da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
17821da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
17831da177e4SLinus Torvalds 		 * throttling.
17841da177e4SLinus Torvalds 		 */
17851da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
17861da177e4SLinus Torvalds 			lock_buffer(bh);
1787ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
17881da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
17891da177e4SLinus Torvalds 			continue;
17901da177e4SLinus Torvalds 		}
17911da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
17921da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17931da177e4SLinus Torvalds 		} else {
17941da177e4SLinus Torvalds 			unlock_buffer(bh);
17951da177e4SLinus Torvalds 		}
17961da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17971da177e4SLinus Torvalds 
17981da177e4SLinus Torvalds 	/*
17991da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
18001da177e4SLinus Torvalds 	 * drop the bh refcounts early.
18011da177e4SLinus Torvalds 	 */
18021da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18031da177e4SLinus Torvalds 	set_page_writeback(page);
18041da177e4SLinus Torvalds 
18051da177e4SLinus Torvalds 	do {
18061da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18071da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18081da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
18091da177e4SLinus Torvalds 			nr_underway++;
1810ad576e63SNick Piggin 		}
18111da177e4SLinus Torvalds 		bh = next;
18121da177e4SLinus Torvalds 	} while (bh != head);
181305937baaSAndrew Morton 	unlock_page(page);
18141da177e4SLinus Torvalds 
18151da177e4SLinus Torvalds 	err = 0;
18161da177e4SLinus Torvalds done:
18171da177e4SLinus Torvalds 	if (nr_underway == 0) {
18181da177e4SLinus Torvalds 		/*
18191da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
18201da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
18211da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
18221da177e4SLinus Torvalds 		 */
18231da177e4SLinus Torvalds 		end_page_writeback(page);
18243d67f2d7SNick Piggin 
18251da177e4SLinus Torvalds 		/*
18261da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
18271da177e4SLinus Torvalds 		 * here on.
18281da177e4SLinus Torvalds 		 */
18291da177e4SLinus Torvalds 	}
18301da177e4SLinus Torvalds 	return err;
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds recover:
18331da177e4SLinus Torvalds 	/*
18341da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
18351da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
18361da177e4SLinus Torvalds 	 * exposing stale data.
18371da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
18381da177e4SLinus Torvalds 	 */
18391da177e4SLinus Torvalds 	bh = head;
18401da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
18411da177e4SLinus Torvalds 	do {
184229a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
184329a814d2SAlex Tomas 		    !buffer_delay(bh)) {
18441da177e4SLinus Torvalds 			lock_buffer(bh);
18451da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
18461da177e4SLinus Torvalds 		} else {
18471da177e4SLinus Torvalds 			/*
18481da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
18491da177e4SLinus Torvalds 			 * attachment to a dirty page.
18501da177e4SLinus Torvalds 			 */
18511da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18521da177e4SLinus Torvalds 		}
18531da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18541da177e4SLinus Torvalds 	SetPageError(page);
18551da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18567e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
18571da177e4SLinus Torvalds 	set_page_writeback(page);
18581da177e4SLinus Torvalds 	do {
18591da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18601da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18611da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18621da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
18631da177e4SLinus Torvalds 			nr_underway++;
1864ad576e63SNick Piggin 		}
18651da177e4SLinus Torvalds 		bh = next;
18661da177e4SLinus Torvalds 	} while (bh != head);
1867ffda9d30SNick Piggin 	unlock_page(page);
18681da177e4SLinus Torvalds 	goto done;
18691da177e4SLinus Torvalds }
18701da177e4SLinus Torvalds 
1871afddba49SNick Piggin /*
1872afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1873afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1874afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1875afddba49SNick Piggin  */
1876afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1877afddba49SNick Piggin {
1878afddba49SNick Piggin 	unsigned int block_start, block_end;
1879afddba49SNick Piggin 	struct buffer_head *head, *bh;
1880afddba49SNick Piggin 
1881afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1882afddba49SNick Piggin 	if (!page_has_buffers(page))
1883afddba49SNick Piggin 		return;
1884afddba49SNick Piggin 
1885afddba49SNick Piggin 	bh = head = page_buffers(page);
1886afddba49SNick Piggin 	block_start = 0;
1887afddba49SNick Piggin 	do {
1888afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1889afddba49SNick Piggin 
1890afddba49SNick Piggin 		if (buffer_new(bh)) {
1891afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1892afddba49SNick Piggin 				if (!PageUptodate(page)) {
1893afddba49SNick Piggin 					unsigned start, size;
1894afddba49SNick Piggin 
1895afddba49SNick Piggin 					start = max(from, block_start);
1896afddba49SNick Piggin 					size = min(to, block_end) - start;
1897afddba49SNick Piggin 
1898eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1899afddba49SNick Piggin 					set_buffer_uptodate(bh);
1900afddba49SNick Piggin 				}
1901afddba49SNick Piggin 
1902afddba49SNick Piggin 				clear_buffer_new(bh);
1903afddba49SNick Piggin 				mark_buffer_dirty(bh);
1904afddba49SNick Piggin 			}
1905afddba49SNick Piggin 		}
1906afddba49SNick Piggin 
1907afddba49SNick Piggin 		block_start = block_end;
1908afddba49SNick Piggin 		bh = bh->b_this_page;
1909afddba49SNick Piggin 	} while (bh != head);
1910afddba49SNick Piggin }
1911afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1912afddba49SNick Piggin 
19131da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
19141da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
19151da177e4SLinus Torvalds {
19161da177e4SLinus Torvalds 	unsigned block_start, block_end;
19171da177e4SLinus Torvalds 	sector_t block;
19181da177e4SLinus Torvalds 	int err = 0;
19191da177e4SLinus Torvalds 	unsigned blocksize, bbits;
19201da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
19211da177e4SLinus Torvalds 
19221da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
19231da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
19241da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
19251da177e4SLinus Torvalds 	BUG_ON(from > to);
19261da177e4SLinus Torvalds 
19271da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19281da177e4SLinus Torvalds 	if (!page_has_buffers(page))
19291da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
19301da177e4SLinus Torvalds 	head = page_buffers(page);
19311da177e4SLinus Torvalds 
19321da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
19331da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
19341da177e4SLinus Torvalds 
19351da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
19361da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
19371da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19381da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19391da177e4SLinus Torvalds 			if (PageUptodate(page)) {
19401da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
19411da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19421da177e4SLinus Torvalds 			}
19431da177e4SLinus Torvalds 			continue;
19441da177e4SLinus Torvalds 		}
19451da177e4SLinus Torvalds 		if (buffer_new(bh))
19461da177e4SLinus Torvalds 			clear_buffer_new(bh);
19471da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1948b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
19491da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
19501da177e4SLinus Torvalds 			if (err)
1951f3ddbdc6SNick Piggin 				break;
19521da177e4SLinus Torvalds 			if (buffer_new(bh)) {
19531da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
19541da177e4SLinus Torvalds 							bh->b_blocknr);
19551da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1956637aff46SNick Piggin 					clear_buffer_new(bh);
19571da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1958637aff46SNick Piggin 					mark_buffer_dirty(bh);
19591da177e4SLinus Torvalds 					continue;
19601da177e4SLinus Torvalds 				}
1961eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1962eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1963eebd2aa3SChristoph Lameter 						to, block_end,
1964eebd2aa3SChristoph Lameter 						block_start, from);
19651da177e4SLinus Torvalds 				continue;
19661da177e4SLinus Torvalds 			}
19671da177e4SLinus Torvalds 		}
19681da177e4SLinus Torvalds 		if (PageUptodate(page)) {
19691da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19701da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
19711da177e4SLinus Torvalds 			continue;
19721da177e4SLinus Torvalds 		}
19731da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
197433a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
19751da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
19761da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
19771da177e4SLinus Torvalds 			*wait_bh++=bh;
19781da177e4SLinus Torvalds 		}
19791da177e4SLinus Torvalds 	}
19801da177e4SLinus Torvalds 	/*
19811da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
19821da177e4SLinus Torvalds 	 */
19831da177e4SLinus Torvalds 	while(wait_bh > wait) {
19841da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
19851da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1986f3ddbdc6SNick Piggin 			err = -EIO;
19871da177e4SLinus Torvalds 	}
1988afddba49SNick Piggin 	if (unlikely(err))
1989afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
19901da177e4SLinus Torvalds 	return err;
19911da177e4SLinus Torvalds }
19921da177e4SLinus Torvalds 
19931da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
19941da177e4SLinus Torvalds 		unsigned from, unsigned to)
19951da177e4SLinus Torvalds {
19961da177e4SLinus Torvalds 	unsigned block_start, block_end;
19971da177e4SLinus Torvalds 	int partial = 0;
19981da177e4SLinus Torvalds 	unsigned blocksize;
19991da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
20001da177e4SLinus Torvalds 
20011da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
20021da177e4SLinus Torvalds 
20031da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
20041da177e4SLinus Torvalds 	    bh != head || !block_start;
20051da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
20061da177e4SLinus Torvalds 		block_end = block_start + blocksize;
20071da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
20081da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
20091da177e4SLinus Torvalds 				partial = 1;
20101da177e4SLinus Torvalds 		} else {
20111da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
20121da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
20131da177e4SLinus Torvalds 		}
2014afddba49SNick Piggin 		clear_buffer_new(bh);
20151da177e4SLinus Torvalds 	}
20161da177e4SLinus Torvalds 
20171da177e4SLinus Torvalds 	/*
20181da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
20191da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
20201da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
20211da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
20221da177e4SLinus Torvalds 	 */
20231da177e4SLinus Torvalds 	if (!partial)
20241da177e4SLinus Torvalds 		SetPageUptodate(page);
20251da177e4SLinus Torvalds 	return 0;
20261da177e4SLinus Torvalds }
20271da177e4SLinus Torvalds 
20281da177e4SLinus Torvalds /*
2029afddba49SNick Piggin  * block_write_begin takes care of the basic task of block allocation and
2030afddba49SNick Piggin  * bringing partial write blocks uptodate first.
2031afddba49SNick Piggin  *
2032afddba49SNick Piggin  * If *pagep is not NULL, then block_write_begin uses the locked page
2033afddba49SNick Piggin  * at *pagep rather than allocating its own. In this case, the page will
2034afddba49SNick Piggin  * not be unlocked or deallocated on failure.
2035afddba49SNick Piggin  */
2036afddba49SNick Piggin int block_write_begin(struct file *file, struct address_space *mapping,
2037afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
2038afddba49SNick Piggin 			struct page **pagep, void **fsdata,
2039afddba49SNick Piggin 			get_block_t *get_block)
2040afddba49SNick Piggin {
2041afddba49SNick Piggin 	struct inode *inode = mapping->host;
2042afddba49SNick Piggin 	int status = 0;
2043afddba49SNick Piggin 	struct page *page;
2044afddba49SNick Piggin 	pgoff_t index;
2045afddba49SNick Piggin 	unsigned start, end;
2046afddba49SNick Piggin 	int ownpage = 0;
2047afddba49SNick Piggin 
2048afddba49SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
2049afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
2050afddba49SNick Piggin 	end = start + len;
2051afddba49SNick Piggin 
2052afddba49SNick Piggin 	page = *pagep;
2053afddba49SNick Piggin 	if (page == NULL) {
2054afddba49SNick Piggin 		ownpage = 1;
205554566b2cSNick Piggin 		page = grab_cache_page_write_begin(mapping, index, flags);
2056afddba49SNick Piggin 		if (!page) {
2057afddba49SNick Piggin 			status = -ENOMEM;
2058afddba49SNick Piggin 			goto out;
2059afddba49SNick Piggin 		}
2060afddba49SNick Piggin 		*pagep = page;
2061afddba49SNick Piggin 	} else
2062afddba49SNick Piggin 		BUG_ON(!PageLocked(page));
2063afddba49SNick Piggin 
2064afddba49SNick Piggin 	status = __block_prepare_write(inode, page, start, end, get_block);
2065afddba49SNick Piggin 	if (unlikely(status)) {
2066afddba49SNick Piggin 		ClearPageUptodate(page);
2067afddba49SNick Piggin 
2068afddba49SNick Piggin 		if (ownpage) {
2069afddba49SNick Piggin 			unlock_page(page);
2070afddba49SNick Piggin 			page_cache_release(page);
2071afddba49SNick Piggin 			*pagep = NULL;
2072afddba49SNick Piggin 
2073afddba49SNick Piggin 			/*
2074afddba49SNick Piggin 			 * prepare_write() may have instantiated a few blocks
2075afddba49SNick Piggin 			 * outside i_size.  Trim these off again. Don't need
2076afddba49SNick Piggin 			 * i_size_read because we hold i_mutex.
2077afddba49SNick Piggin 			 */
2078afddba49SNick Piggin 			if (pos + len > inode->i_size)
2079afddba49SNick Piggin 				vmtruncate(inode, inode->i_size);
2080afddba49SNick Piggin 		}
2081afddba49SNick Piggin 	}
2082afddba49SNick Piggin 
2083afddba49SNick Piggin out:
2084afddba49SNick Piggin 	return status;
2085afddba49SNick Piggin }
2086afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2087afddba49SNick Piggin 
2088afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2089afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2090afddba49SNick Piggin 			struct page *page, void *fsdata)
2091afddba49SNick Piggin {
2092afddba49SNick Piggin 	struct inode *inode = mapping->host;
2093afddba49SNick Piggin 	unsigned start;
2094afddba49SNick Piggin 
2095afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
2096afddba49SNick Piggin 
2097afddba49SNick Piggin 	if (unlikely(copied < len)) {
2098afddba49SNick Piggin 		/*
2099afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
2100afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
2101afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
2102afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
2103afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
2104afddba49SNick Piggin 		 * destroy our partial write.
2105afddba49SNick Piggin 		 *
2106afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2107afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2108afddba49SNick Piggin 		 * caller to redo the whole thing.
2109afddba49SNick Piggin 		 */
2110afddba49SNick Piggin 		if (!PageUptodate(page))
2111afddba49SNick Piggin 			copied = 0;
2112afddba49SNick Piggin 
2113afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2114afddba49SNick Piggin 	}
2115afddba49SNick Piggin 	flush_dcache_page(page);
2116afddba49SNick Piggin 
2117afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2118afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2119afddba49SNick Piggin 
2120afddba49SNick Piggin 	return copied;
2121afddba49SNick Piggin }
2122afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2123afddba49SNick Piggin 
2124afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2125afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2126afddba49SNick Piggin 			struct page *page, void *fsdata)
2127afddba49SNick Piggin {
2128afddba49SNick Piggin 	struct inode *inode = mapping->host;
2129c7d206b3SJan Kara 	int i_size_changed = 0;
2130afddba49SNick Piggin 
2131afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2132afddba49SNick Piggin 
2133afddba49SNick Piggin 	/*
2134afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
2135afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
2136afddba49SNick Piggin 	 *
2137afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
2138afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
2139afddba49SNick Piggin 	 */
2140afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
2141afddba49SNick Piggin 		i_size_write(inode, pos+copied);
2142c7d206b3SJan Kara 		i_size_changed = 1;
2143afddba49SNick Piggin 	}
2144afddba49SNick Piggin 
2145afddba49SNick Piggin 	unlock_page(page);
2146afddba49SNick Piggin 	page_cache_release(page);
2147afddba49SNick Piggin 
2148c7d206b3SJan Kara 	/*
2149c7d206b3SJan Kara 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2150c7d206b3SJan Kara 	 * makes the holding time of page lock longer. Second, it forces lock
2151c7d206b3SJan Kara 	 * ordering of page lock and transaction start for journaling
2152c7d206b3SJan Kara 	 * filesystems.
2153c7d206b3SJan Kara 	 */
2154c7d206b3SJan Kara 	if (i_size_changed)
2155c7d206b3SJan Kara 		mark_inode_dirty(inode);
2156c7d206b3SJan Kara 
2157afddba49SNick Piggin 	return copied;
2158afddba49SNick Piggin }
2159afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2160afddba49SNick Piggin 
2161afddba49SNick Piggin /*
21628ab22b9aSHisashi Hifumi  * block_is_partially_uptodate checks whether buffers within a page are
21638ab22b9aSHisashi Hifumi  * uptodate or not.
21648ab22b9aSHisashi Hifumi  *
21658ab22b9aSHisashi Hifumi  * Returns true if all buffers which correspond to a file portion
21668ab22b9aSHisashi Hifumi  * we want to read are uptodate.
21678ab22b9aSHisashi Hifumi  */
21688ab22b9aSHisashi Hifumi int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
21698ab22b9aSHisashi Hifumi 					unsigned long from)
21708ab22b9aSHisashi Hifumi {
21718ab22b9aSHisashi Hifumi 	struct inode *inode = page->mapping->host;
21728ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
21738ab22b9aSHisashi Hifumi 	unsigned to;
21748ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
21758ab22b9aSHisashi Hifumi 	int ret = 1;
21768ab22b9aSHisashi Hifumi 
21778ab22b9aSHisashi Hifumi 	if (!page_has_buffers(page))
21788ab22b9aSHisashi Hifumi 		return 0;
21798ab22b9aSHisashi Hifumi 
21808ab22b9aSHisashi Hifumi 	blocksize = 1 << inode->i_blkbits;
21818ab22b9aSHisashi Hifumi 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
21828ab22b9aSHisashi Hifumi 	to = from + to;
21838ab22b9aSHisashi Hifumi 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
21848ab22b9aSHisashi Hifumi 		return 0;
21858ab22b9aSHisashi Hifumi 
21868ab22b9aSHisashi Hifumi 	head = page_buffers(page);
21878ab22b9aSHisashi Hifumi 	bh = head;
21888ab22b9aSHisashi Hifumi 	block_start = 0;
21898ab22b9aSHisashi Hifumi 	do {
21908ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
21918ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
21928ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
21938ab22b9aSHisashi Hifumi 				ret = 0;
21948ab22b9aSHisashi Hifumi 				break;
21958ab22b9aSHisashi Hifumi 			}
21968ab22b9aSHisashi Hifumi 			if (block_end >= to)
21978ab22b9aSHisashi Hifumi 				break;
21988ab22b9aSHisashi Hifumi 		}
21998ab22b9aSHisashi Hifumi 		block_start = block_end;
22008ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
22018ab22b9aSHisashi Hifumi 	} while (bh != head);
22028ab22b9aSHisashi Hifumi 
22038ab22b9aSHisashi Hifumi 	return ret;
22048ab22b9aSHisashi Hifumi }
22058ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
22068ab22b9aSHisashi Hifumi 
22078ab22b9aSHisashi Hifumi /*
22081da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
22091da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
22101da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
22111da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
22121da177e4SLinus Torvalds  * page struct once IO has completed.
22131da177e4SLinus Torvalds  */
22141da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
22151da177e4SLinus Torvalds {
22161da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22171da177e4SLinus Torvalds 	sector_t iblock, lblock;
22181da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
22191da177e4SLinus Torvalds 	unsigned int blocksize;
22201da177e4SLinus Torvalds 	int nr, i;
22211da177e4SLinus Torvalds 	int fully_mapped = 1;
22221da177e4SLinus Torvalds 
2223cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
22241da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
22251da177e4SLinus Torvalds 	if (!page_has_buffers(page))
22261da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
22271da177e4SLinus Torvalds 	head = page_buffers(page);
22281da177e4SLinus Torvalds 
22291da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
22301da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
22311da177e4SLinus Torvalds 	bh = head;
22321da177e4SLinus Torvalds 	nr = 0;
22331da177e4SLinus Torvalds 	i = 0;
22341da177e4SLinus Torvalds 
22351da177e4SLinus Torvalds 	do {
22361da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
22371da177e4SLinus Torvalds 			continue;
22381da177e4SLinus Torvalds 
22391da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2240c64610baSAndrew Morton 			int err = 0;
2241c64610baSAndrew Morton 
22421da177e4SLinus Torvalds 			fully_mapped = 0;
22431da177e4SLinus Torvalds 			if (iblock < lblock) {
2244b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2245c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2246c64610baSAndrew Morton 				if (err)
22471da177e4SLinus Torvalds 					SetPageError(page);
22481da177e4SLinus Torvalds 			}
22491da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2250eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2251c64610baSAndrew Morton 				if (!err)
22521da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
22531da177e4SLinus Torvalds 				continue;
22541da177e4SLinus Torvalds 			}
22551da177e4SLinus Torvalds 			/*
22561da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
22571da177e4SLinus Torvalds 			 * synchronously
22581da177e4SLinus Torvalds 			 */
22591da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
22601da177e4SLinus Torvalds 				continue;
22611da177e4SLinus Torvalds 		}
22621da177e4SLinus Torvalds 		arr[nr++] = bh;
22631da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
22641da177e4SLinus Torvalds 
22651da177e4SLinus Torvalds 	if (fully_mapped)
22661da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
22671da177e4SLinus Torvalds 
22681da177e4SLinus Torvalds 	if (!nr) {
22691da177e4SLinus Torvalds 		/*
22701da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
22711da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
22721da177e4SLinus Torvalds 		 */
22731da177e4SLinus Torvalds 		if (!PageError(page))
22741da177e4SLinus Torvalds 			SetPageUptodate(page);
22751da177e4SLinus Torvalds 		unlock_page(page);
22761da177e4SLinus Torvalds 		return 0;
22771da177e4SLinus Torvalds 	}
22781da177e4SLinus Torvalds 
22791da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
22801da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
22811da177e4SLinus Torvalds 		bh = arr[i];
22821da177e4SLinus Torvalds 		lock_buffer(bh);
22831da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
22841da177e4SLinus Torvalds 	}
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds 	/*
22871da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
22881da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
22891da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
22901da177e4SLinus Torvalds 	 */
22911da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
22921da177e4SLinus Torvalds 		bh = arr[i];
22931da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
22941da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
22951da177e4SLinus Torvalds 		else
22961da177e4SLinus Torvalds 			submit_bh(READ, bh);
22971da177e4SLinus Torvalds 	}
22981da177e4SLinus Torvalds 	return 0;
22991da177e4SLinus Torvalds }
23001da177e4SLinus Torvalds 
23011da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
230289e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
23031da177e4SLinus Torvalds  * deal with the hole.
23041da177e4SLinus Torvalds  */
230589e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
23061da177e4SLinus Torvalds {
23071da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
23081da177e4SLinus Torvalds 	struct page *page;
230989e10787SNick Piggin 	void *fsdata;
231005eb0b51SOGAWA Hirofumi 	unsigned long limit;
23111da177e4SLinus Torvalds 	int err;
23121da177e4SLinus Torvalds 
23131da177e4SLinus Torvalds 	err = -EFBIG;
23141da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
23151da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
23161da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
23171da177e4SLinus Torvalds 		goto out;
23181da177e4SLinus Torvalds 	}
23191da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
23201da177e4SLinus Torvalds 		goto out;
23211da177e4SLinus Torvalds 
232289e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
232389e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
232489e10787SNick Piggin 				&page, &fsdata);
232589e10787SNick Piggin 	if (err)
232605eb0b51SOGAWA Hirofumi 		goto out;
232705eb0b51SOGAWA Hirofumi 
232889e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
232989e10787SNick Piggin 	BUG_ON(err > 0);
233005eb0b51SOGAWA Hirofumi 
233105eb0b51SOGAWA Hirofumi out:
233205eb0b51SOGAWA Hirofumi 	return err;
233305eb0b51SOGAWA Hirofumi }
233405eb0b51SOGAWA Hirofumi 
2335f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
233689e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
233705eb0b51SOGAWA Hirofumi {
233889e10787SNick Piggin 	struct inode *inode = mapping->host;
233989e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
234089e10787SNick Piggin 	struct page *page;
234189e10787SNick Piggin 	void *fsdata;
234289e10787SNick Piggin 	pgoff_t index, curidx;
234389e10787SNick Piggin 	loff_t curpos;
234489e10787SNick Piggin 	unsigned zerofrom, offset, len;
234589e10787SNick Piggin 	int err = 0;
234605eb0b51SOGAWA Hirofumi 
234789e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
234889e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
234989e10787SNick Piggin 
235089e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
235189e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
235289e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
235389e10787SNick Piggin 			*bytes |= (blocksize-1);
235489e10787SNick Piggin 			(*bytes)++;
235589e10787SNick Piggin 		}
235689e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
235789e10787SNick Piggin 
235889e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
235989e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
236089e10787SNick Piggin 						&page, &fsdata);
236189e10787SNick Piggin 		if (err)
236289e10787SNick Piggin 			goto out;
2363eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
236489e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
236589e10787SNick Piggin 						page, fsdata);
236689e10787SNick Piggin 		if (err < 0)
236789e10787SNick Piggin 			goto out;
236889e10787SNick Piggin 		BUG_ON(err != len);
236989e10787SNick Piggin 		err = 0;
2370061e9746SOGAWA Hirofumi 
2371061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
237289e10787SNick Piggin 	}
237389e10787SNick Piggin 
237489e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
237589e10787SNick Piggin 	if (index == curidx) {
237689e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
237789e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
237889e10787SNick Piggin 		if (offset <= zerofrom) {
237989e10787SNick Piggin 			goto out;
238089e10787SNick Piggin 		}
238189e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
238289e10787SNick Piggin 			*bytes |= (blocksize-1);
238389e10787SNick Piggin 			(*bytes)++;
238489e10787SNick Piggin 		}
238589e10787SNick Piggin 		len = offset - zerofrom;
238689e10787SNick Piggin 
238789e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
238889e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
238989e10787SNick Piggin 						&page, &fsdata);
239089e10787SNick Piggin 		if (err)
239189e10787SNick Piggin 			goto out;
2392eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
239389e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
239489e10787SNick Piggin 						page, fsdata);
239589e10787SNick Piggin 		if (err < 0)
239689e10787SNick Piggin 			goto out;
239789e10787SNick Piggin 		BUG_ON(err != len);
239889e10787SNick Piggin 		err = 0;
239989e10787SNick Piggin 	}
240089e10787SNick Piggin out:
240189e10787SNick Piggin 	return err;
24021da177e4SLinus Torvalds }
24031da177e4SLinus Torvalds 
24041da177e4SLinus Torvalds /*
24051da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
24061da177e4SLinus Torvalds  * We may have to extend the file.
24071da177e4SLinus Torvalds  */
240889e10787SNick Piggin int cont_write_begin(struct file *file, struct address_space *mapping,
240989e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
241089e10787SNick Piggin 			struct page **pagep, void **fsdata,
241189e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
24121da177e4SLinus Torvalds {
24131da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
24141da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
241589e10787SNick Piggin 	unsigned zerofrom;
241689e10787SNick Piggin 	int err;
24171da177e4SLinus Torvalds 
241889e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
241989e10787SNick Piggin 	if (err)
24201da177e4SLinus Torvalds 		goto out;
24211da177e4SLinus Torvalds 
24221da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
242389e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
24241da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
24251da177e4SLinus Torvalds 		(*bytes)++;
24261da177e4SLinus Torvalds 	}
24271da177e4SLinus Torvalds 
242889e10787SNick Piggin 	*pagep = NULL;
242989e10787SNick Piggin 	err = block_write_begin(file, mapping, pos, len,
243089e10787SNick Piggin 				flags, pagep, fsdata, get_block);
24311da177e4SLinus Torvalds out:
243289e10787SNick Piggin 	return err;
24331da177e4SLinus Torvalds }
24341da177e4SLinus Torvalds 
24351da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
24361da177e4SLinus Torvalds 			get_block_t *get_block)
24371da177e4SLinus Torvalds {
24381da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
24391da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
24401da177e4SLinus Torvalds 	if (err)
24411da177e4SLinus Torvalds 		ClearPageUptodate(page);
24421da177e4SLinus Torvalds 	return err;
24431da177e4SLinus Torvalds }
24441da177e4SLinus Torvalds 
24451da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
24461da177e4SLinus Torvalds {
24471da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
24481da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
24491da177e4SLinus Torvalds 	return 0;
24501da177e4SLinus Torvalds }
24511da177e4SLinus Torvalds 
245254171690SDavid Chinner /*
245354171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
245454171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
245554171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
245654171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
245754171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
245854171690SDavid Chinner  * support these features.
245954171690SDavid Chinner  *
246054171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
246154171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
246254171690SDavid Chinner  * vmtruncate() writes the inode size before removing pages, once we have the
246354171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
246454171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
246554171690SDavid Chinner  * unlock the page.
246654171690SDavid Chinner  */
246754171690SDavid Chinner int
246854171690SDavid Chinner block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
246954171690SDavid Chinner 		   get_block_t get_block)
247054171690SDavid Chinner {
247154171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
247254171690SDavid Chinner 	unsigned long end;
247354171690SDavid Chinner 	loff_t size;
247454171690SDavid Chinner 	int ret = -EINVAL;
247554171690SDavid Chinner 
247654171690SDavid Chinner 	lock_page(page);
247754171690SDavid Chinner 	size = i_size_read(inode);
247854171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
247918336338SNick Piggin 	    (page_offset(page) > size)) {
248054171690SDavid Chinner 		/* page got truncated out from underneath us */
248154171690SDavid Chinner 		goto out_unlock;
248254171690SDavid Chinner 	}
248354171690SDavid Chinner 
248454171690SDavid Chinner 	/* page is wholly or partially inside EOF */
248554171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
248654171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
248754171690SDavid Chinner 	else
248854171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
248954171690SDavid Chinner 
249054171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
249154171690SDavid Chinner 	if (!ret)
249254171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
249354171690SDavid Chinner 
249454171690SDavid Chinner out_unlock:
249554171690SDavid Chinner 	unlock_page(page);
249654171690SDavid Chinner 	return ret;
249754171690SDavid Chinner }
24981da177e4SLinus Torvalds 
24991da177e4SLinus Torvalds /*
250003158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
25011da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
25021da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
25031da177e4SLinus Torvalds  */
25041da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
25051da177e4SLinus Torvalds {
250668671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
25071da177e4SLinus Torvalds }
25081da177e4SLinus Torvalds 
25091da177e4SLinus Torvalds /*
251003158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
251103158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
251203158cd7SNick Piggin  * dirty races).
251303158cd7SNick Piggin  */
251403158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
251503158cd7SNick Piggin {
251603158cd7SNick Piggin 	struct buffer_head *bh;
251703158cd7SNick Piggin 
251803158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
251903158cd7SNick Piggin 
252003158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
252103158cd7SNick Piggin 	bh = head;
252203158cd7SNick Piggin 	do {
252303158cd7SNick Piggin 		if (PageDirty(page))
252403158cd7SNick Piggin 			set_buffer_dirty(bh);
252503158cd7SNick Piggin 		if (!bh->b_this_page)
252603158cd7SNick Piggin 			bh->b_this_page = head;
252703158cd7SNick Piggin 		bh = bh->b_this_page;
252803158cd7SNick Piggin 	} while (bh != head);
252903158cd7SNick Piggin 	attach_page_buffers(page, head);
253003158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
253103158cd7SNick Piggin }
253203158cd7SNick Piggin 
253303158cd7SNick Piggin /*
25341da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
25351da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
25361da177e4SLinus Torvalds  */
253703158cd7SNick Piggin int nobh_write_begin(struct file *file, struct address_space *mapping,
253803158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
253903158cd7SNick Piggin 			struct page **pagep, void **fsdata,
25401da177e4SLinus Torvalds 			get_block_t *get_block)
25411da177e4SLinus Torvalds {
254203158cd7SNick Piggin 	struct inode *inode = mapping->host;
25431da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
25441da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2545a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
254603158cd7SNick Piggin 	struct page *page;
254703158cd7SNick Piggin 	pgoff_t index;
254803158cd7SNick Piggin 	unsigned from, to;
25491da177e4SLinus Torvalds 	unsigned block_in_page;
2550a4b0672dSNick Piggin 	unsigned block_start, block_end;
25511da177e4SLinus Torvalds 	sector_t block_in_file;
25521da177e4SLinus Torvalds 	int nr_reads = 0;
25531da177e4SLinus Torvalds 	int ret = 0;
25541da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
25551da177e4SLinus Torvalds 
255603158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
255703158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
255803158cd7SNick Piggin 	to = from + len;
255903158cd7SNick Piggin 
256054566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
256103158cd7SNick Piggin 	if (!page)
256203158cd7SNick Piggin 		return -ENOMEM;
256303158cd7SNick Piggin 	*pagep = page;
256403158cd7SNick Piggin 	*fsdata = NULL;
256503158cd7SNick Piggin 
256603158cd7SNick Piggin 	if (page_has_buffers(page)) {
256703158cd7SNick Piggin 		unlock_page(page);
256803158cd7SNick Piggin 		page_cache_release(page);
256903158cd7SNick Piggin 		*pagep = NULL;
257003158cd7SNick Piggin 		return block_write_begin(file, mapping, pos, len, flags, pagep,
257103158cd7SNick Piggin 					fsdata, get_block);
257203158cd7SNick Piggin 	}
2573a4b0672dSNick Piggin 
25741da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
25751da177e4SLinus Torvalds 		return 0;
25761da177e4SLinus Torvalds 
2577a4b0672dSNick Piggin 	/*
2578a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2579a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2580a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2581a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2582a4b0672dSNick Piggin 	 *
2583a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2584a4b0672dSNick Piggin 	 * than the circular one we're used to.
2585a4b0672dSNick Piggin 	 */
2586a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
258703158cd7SNick Piggin 	if (!head) {
258803158cd7SNick Piggin 		ret = -ENOMEM;
258903158cd7SNick Piggin 		goto out_release;
259003158cd7SNick Piggin 	}
2591a4b0672dSNick Piggin 
25921da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
25931da177e4SLinus Torvalds 
25941da177e4SLinus Torvalds 	/*
25951da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
25961da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
25971da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
25981da177e4SLinus Torvalds 	 */
2599a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
26001da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2601a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
26021da177e4SLinus Torvalds 		int create;
26031da177e4SLinus Torvalds 
2604a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2605a4b0672dSNick Piggin 		bh->b_state = 0;
26061da177e4SLinus Torvalds 		create = 1;
26071da177e4SLinus Torvalds 		if (block_start >= to)
26081da177e4SLinus Torvalds 			create = 0;
26091da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2610a4b0672dSNick Piggin 					bh, create);
26111da177e4SLinus Torvalds 		if (ret)
26121da177e4SLinus Torvalds 			goto failed;
2613a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
26141da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2615a4b0672dSNick Piggin 		if (buffer_new(bh))
2616a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2617a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2618a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
26191da177e4SLinus Torvalds 			continue;
2620a4b0672dSNick Piggin 		}
2621a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2622eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2623eebd2aa3SChristoph Lameter 							to, block_end);
26241da177e4SLinus Torvalds 			continue;
26251da177e4SLinus Torvalds 		}
2626a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
26271da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
26281da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2629a4b0672dSNick Piggin 			lock_buffer(bh);
2630a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2631a4b0672dSNick Piggin 			submit_bh(READ, bh);
2632a4b0672dSNick Piggin 			nr_reads++;
26331da177e4SLinus Torvalds 		}
26341da177e4SLinus Torvalds 	}
26351da177e4SLinus Torvalds 
26361da177e4SLinus Torvalds 	if (nr_reads) {
26371da177e4SLinus Torvalds 		/*
26381da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
26391da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
26401da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
26411da177e4SLinus Torvalds 		 */
2642a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
26431da177e4SLinus Torvalds 			wait_on_buffer(bh);
26441da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
26451da177e4SLinus Torvalds 				ret = -EIO;
26461da177e4SLinus Torvalds 		}
26471da177e4SLinus Torvalds 		if (ret)
26481da177e4SLinus Torvalds 			goto failed;
26491da177e4SLinus Torvalds 	}
26501da177e4SLinus Torvalds 
26511da177e4SLinus Torvalds 	if (is_mapped_to_disk)
26521da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
26531da177e4SLinus Torvalds 
265403158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2655a4b0672dSNick Piggin 
26561da177e4SLinus Torvalds 	return 0;
26571da177e4SLinus Torvalds 
26581da177e4SLinus Torvalds failed:
265903158cd7SNick Piggin 	BUG_ON(!ret);
26601da177e4SLinus Torvalds 	/*
2661a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2662a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2663a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2664a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2665a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
26661da177e4SLinus Torvalds 	 */
266703158cd7SNick Piggin 	attach_nobh_buffers(page, head);
266803158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2669a4b0672dSNick Piggin 
267003158cd7SNick Piggin out_release:
267103158cd7SNick Piggin 	unlock_page(page);
267203158cd7SNick Piggin 	page_cache_release(page);
267303158cd7SNick Piggin 	*pagep = NULL;
2674a4b0672dSNick Piggin 
267503158cd7SNick Piggin 	if (pos + len > inode->i_size)
267603158cd7SNick Piggin 		vmtruncate(inode, inode->i_size);
2677a4b0672dSNick Piggin 
26781da177e4SLinus Torvalds 	return ret;
26791da177e4SLinus Torvalds }
268003158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
26811da177e4SLinus Torvalds 
268203158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
268303158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
268403158cd7SNick Piggin 			struct page *page, void *fsdata)
26851da177e4SLinus Torvalds {
26861da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2687efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
268803158cd7SNick Piggin 	struct buffer_head *bh;
26895b41e74aSDmitri Monakhov 	BUG_ON(fsdata != NULL && page_has_buffers(page));
26901da177e4SLinus Torvalds 
2691*d4cf109fSDave Kleikamp 	if (unlikely(copied < len) && head)
269203158cd7SNick Piggin 		attach_nobh_buffers(page, head);
2693a4b0672dSNick Piggin 	if (page_has_buffers(page))
269403158cd7SNick Piggin 		return generic_write_end(file, mapping, pos, len,
269503158cd7SNick Piggin 					copied, page, fsdata);
2696a4b0672dSNick Piggin 
269722c8ca78SNick Piggin 	SetPageUptodate(page);
26981da177e4SLinus Torvalds 	set_page_dirty(page);
269903158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
270003158cd7SNick Piggin 		i_size_write(inode, pos+copied);
27011da177e4SLinus Torvalds 		mark_inode_dirty(inode);
27021da177e4SLinus Torvalds 	}
270303158cd7SNick Piggin 
270403158cd7SNick Piggin 	unlock_page(page);
270503158cd7SNick Piggin 	page_cache_release(page);
270603158cd7SNick Piggin 
270703158cd7SNick Piggin 	while (head) {
270803158cd7SNick Piggin 		bh = head;
270903158cd7SNick Piggin 		head = head->b_this_page;
271003158cd7SNick Piggin 		free_buffer_head(bh);
27111da177e4SLinus Torvalds 	}
271203158cd7SNick Piggin 
271303158cd7SNick Piggin 	return copied;
271403158cd7SNick Piggin }
271503158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
27161da177e4SLinus Torvalds 
27171da177e4SLinus Torvalds /*
27181da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
27191da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
27201da177e4SLinus Torvalds  * the page.
27211da177e4SLinus Torvalds  */
27221da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
27231da177e4SLinus Torvalds 			struct writeback_control *wbc)
27241da177e4SLinus Torvalds {
27251da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
27261da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27271da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
27281da177e4SLinus Torvalds 	unsigned offset;
27291da177e4SLinus Torvalds 	int ret;
27301da177e4SLinus Torvalds 
27311da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
27321da177e4SLinus Torvalds 	if (page->index < end_index)
27331da177e4SLinus Torvalds 		goto out;
27341da177e4SLinus Torvalds 
27351da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
27361da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
27371da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
27381da177e4SLinus Torvalds 		/*
27391da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
27401da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
27411da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
27421da177e4SLinus Torvalds 		 */
27431da177e4SLinus Torvalds #if 0
27441da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
27451da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
27461da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
27471da177e4SLinus Torvalds #endif
27481da177e4SLinus Torvalds 		unlock_page(page);
27491da177e4SLinus Torvalds 		return 0; /* don't care */
27501da177e4SLinus Torvalds 	}
27511da177e4SLinus Torvalds 
27521da177e4SLinus Torvalds 	/*
27531da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
27541da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
27551da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
27561da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
27571da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
27581da177e4SLinus Torvalds 	 */
2759eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
27601da177e4SLinus Torvalds out:
27611da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
27621da177e4SLinus Torvalds 	if (ret == -EAGAIN)
27631da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
27641da177e4SLinus Torvalds 	return ret;
27651da177e4SLinus Torvalds }
27661da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
27671da177e4SLinus Torvalds 
276803158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
276903158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
27701da177e4SLinus Torvalds {
27711da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27721da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
277303158cd7SNick Piggin 	unsigned blocksize;
277403158cd7SNick Piggin 	sector_t iblock;
277503158cd7SNick Piggin 	unsigned length, pos;
277603158cd7SNick Piggin 	struct inode *inode = mapping->host;
27771da177e4SLinus Torvalds 	struct page *page;
277803158cd7SNick Piggin 	struct buffer_head map_bh;
277903158cd7SNick Piggin 	int err;
27801da177e4SLinus Torvalds 
278103158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
278203158cd7SNick Piggin 	length = offset & (blocksize - 1);
27831da177e4SLinus Torvalds 
278403158cd7SNick Piggin 	/* Block boundary? Nothing to do */
278503158cd7SNick Piggin 	if (!length)
278603158cd7SNick Piggin 		return 0;
278703158cd7SNick Piggin 
278803158cd7SNick Piggin 	length = blocksize - length;
278903158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
279003158cd7SNick Piggin 
27911da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
279203158cd7SNick Piggin 	err = -ENOMEM;
27931da177e4SLinus Torvalds 	if (!page)
27941da177e4SLinus Torvalds 		goto out;
27951da177e4SLinus Torvalds 
279603158cd7SNick Piggin 	if (page_has_buffers(page)) {
279703158cd7SNick Piggin has_buffers:
279803158cd7SNick Piggin 		unlock_page(page);
279903158cd7SNick Piggin 		page_cache_release(page);
280003158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
28011da177e4SLinus Torvalds 	}
280203158cd7SNick Piggin 
280303158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
280403158cd7SNick Piggin 	pos = blocksize;
280503158cd7SNick Piggin 	while (offset >= pos) {
280603158cd7SNick Piggin 		iblock++;
280703158cd7SNick Piggin 		pos += blocksize;
280803158cd7SNick Piggin 	}
280903158cd7SNick Piggin 
281003158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
281103158cd7SNick Piggin 	if (err)
281203158cd7SNick Piggin 		goto unlock;
281303158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
281403158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
281503158cd7SNick Piggin 		goto unlock;
281603158cd7SNick Piggin 
281703158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
281803158cd7SNick Piggin 	if (!PageUptodate(page)) {
281903158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
282003158cd7SNick Piggin 		if (err) {
282103158cd7SNick Piggin 			page_cache_release(page);
282203158cd7SNick Piggin 			goto out;
282303158cd7SNick Piggin 		}
282403158cd7SNick Piggin 		lock_page(page);
282503158cd7SNick Piggin 		if (!PageUptodate(page)) {
282603158cd7SNick Piggin 			err = -EIO;
282703158cd7SNick Piggin 			goto unlock;
282803158cd7SNick Piggin 		}
282903158cd7SNick Piggin 		if (page_has_buffers(page))
283003158cd7SNick Piggin 			goto has_buffers;
283103158cd7SNick Piggin 	}
2832eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
283303158cd7SNick Piggin 	set_page_dirty(page);
283403158cd7SNick Piggin 	err = 0;
283503158cd7SNick Piggin 
283603158cd7SNick Piggin unlock:
28371da177e4SLinus Torvalds 	unlock_page(page);
28381da177e4SLinus Torvalds 	page_cache_release(page);
28391da177e4SLinus Torvalds out:
284003158cd7SNick Piggin 	return err;
28411da177e4SLinus Torvalds }
28421da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
28431da177e4SLinus Torvalds 
28441da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
28451da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
28461da177e4SLinus Torvalds {
28471da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
28481da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
28491da177e4SLinus Torvalds 	unsigned blocksize;
285054b21a79SAndrew Morton 	sector_t iblock;
28511da177e4SLinus Torvalds 	unsigned length, pos;
28521da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
28531da177e4SLinus Torvalds 	struct page *page;
28541da177e4SLinus Torvalds 	struct buffer_head *bh;
28551da177e4SLinus Torvalds 	int err;
28561da177e4SLinus Torvalds 
28571da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
28581da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
28591da177e4SLinus Torvalds 
28601da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
28611da177e4SLinus Torvalds 	if (!length)
28621da177e4SLinus Torvalds 		return 0;
28631da177e4SLinus Torvalds 
28641da177e4SLinus Torvalds 	length = blocksize - length;
286554b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
28661da177e4SLinus Torvalds 
28671da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
28681da177e4SLinus Torvalds 	err = -ENOMEM;
28691da177e4SLinus Torvalds 	if (!page)
28701da177e4SLinus Torvalds 		goto out;
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds 	if (!page_has_buffers(page))
28731da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
28741da177e4SLinus Torvalds 
28751da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
28761da177e4SLinus Torvalds 	bh = page_buffers(page);
28771da177e4SLinus Torvalds 	pos = blocksize;
28781da177e4SLinus Torvalds 	while (offset >= pos) {
28791da177e4SLinus Torvalds 		bh = bh->b_this_page;
28801da177e4SLinus Torvalds 		iblock++;
28811da177e4SLinus Torvalds 		pos += blocksize;
28821da177e4SLinus Torvalds 	}
28831da177e4SLinus Torvalds 
28841da177e4SLinus Torvalds 	err = 0;
28851da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2886b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
28871da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
28881da177e4SLinus Torvalds 		if (err)
28891da177e4SLinus Torvalds 			goto unlock;
28901da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
28911da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
28921da177e4SLinus Torvalds 			goto unlock;
28931da177e4SLinus Torvalds 	}
28941da177e4SLinus Torvalds 
28951da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
28961da177e4SLinus Torvalds 	if (PageUptodate(page))
28971da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
28981da177e4SLinus Torvalds 
289933a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
29001da177e4SLinus Torvalds 		err = -EIO;
29011da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
29021da177e4SLinus Torvalds 		wait_on_buffer(bh);
29031da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
29041da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
29051da177e4SLinus Torvalds 			goto unlock;
29061da177e4SLinus Torvalds 	}
29071da177e4SLinus Torvalds 
2908eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
29091da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
29101da177e4SLinus Torvalds 	err = 0;
29111da177e4SLinus Torvalds 
29121da177e4SLinus Torvalds unlock:
29131da177e4SLinus Torvalds 	unlock_page(page);
29141da177e4SLinus Torvalds 	page_cache_release(page);
29151da177e4SLinus Torvalds out:
29161da177e4SLinus Torvalds 	return err;
29171da177e4SLinus Torvalds }
29181da177e4SLinus Torvalds 
29191da177e4SLinus Torvalds /*
29201da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
29211da177e4SLinus Torvalds  */
29221da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
29231da177e4SLinus Torvalds 			struct writeback_control *wbc)
29241da177e4SLinus Torvalds {
29251da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
29261da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
29271da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
29281da177e4SLinus Torvalds 	unsigned offset;
29291da177e4SLinus Torvalds 
29301da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
29311da177e4SLinus Torvalds 	if (page->index < end_index)
29321da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
29331da177e4SLinus Torvalds 
29341da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
29351da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
29361da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
29371da177e4SLinus Torvalds 		/*
29381da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
29391da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
29401da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
29411da177e4SLinus Torvalds 		 */
2942aaa4059bSJan Kara 		do_invalidatepage(page, 0);
29431da177e4SLinus Torvalds 		unlock_page(page);
29441da177e4SLinus Torvalds 		return 0; /* don't care */
29451da177e4SLinus Torvalds 	}
29461da177e4SLinus Torvalds 
29471da177e4SLinus Torvalds 	/*
29481da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
29491da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
29501da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
29511da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
29521da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
29531da177e4SLinus Torvalds 	 */
2954eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
29551da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
29561da177e4SLinus Torvalds }
29571da177e4SLinus Torvalds 
29581da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
29591da177e4SLinus Torvalds 			    get_block_t *get_block)
29601da177e4SLinus Torvalds {
29611da177e4SLinus Torvalds 	struct buffer_head tmp;
29621da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
29631da177e4SLinus Torvalds 	tmp.b_state = 0;
29641da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2965b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
29661da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
29671da177e4SLinus Torvalds 	return tmp.b_blocknr;
29681da177e4SLinus Torvalds }
29691da177e4SLinus Torvalds 
29706712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
29711da177e4SLinus Torvalds {
29721da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
29731da177e4SLinus Torvalds 
29741da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
29751da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
29761da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
29771da177e4SLinus Torvalds 	}
29781da177e4SLinus Torvalds 
297908bafc03SKeith Mannthey 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
298008bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
298108bafc03SKeith Mannthey 
29821da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
29831da177e4SLinus Torvalds 	bio_put(bio);
29841da177e4SLinus Torvalds }
29851da177e4SLinus Torvalds 
29861da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
29871da177e4SLinus Torvalds {
29881da177e4SLinus Torvalds 	struct bio *bio;
29891da177e4SLinus Torvalds 	int ret = 0;
29901da177e4SLinus Torvalds 
29911da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
29921da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
29931da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
29941da177e4SLinus Torvalds 
299548fd4f93SJens Axboe 	/*
299648fd4f93SJens Axboe 	 * Mask in barrier bit for a write (could be either a WRITE or a
299748fd4f93SJens Axboe 	 * WRITE_SYNC
299848fd4f93SJens Axboe 	 */
299948fd4f93SJens Axboe 	if (buffer_ordered(bh) && (rw & WRITE))
300048fd4f93SJens Axboe 		rw |= WRITE_BARRIER;
30011da177e4SLinus Torvalds 
30021da177e4SLinus Torvalds 	/*
300348fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
30041da177e4SLinus Torvalds 	 */
300548fd4f93SJens Axboe 	if (test_set_buffer_req(bh) && (rw & WRITE))
30061da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
30071da177e4SLinus Torvalds 
30081da177e4SLinus Torvalds 	/*
30091da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
30101da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
30111da177e4SLinus Torvalds 	 */
30121da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
30131da177e4SLinus Torvalds 
30141da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
30151da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
30161da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
30171da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
30181da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
30191da177e4SLinus Torvalds 
30201da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
30211da177e4SLinus Torvalds 	bio->bi_idx = 0;
30221da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
30231da177e4SLinus Torvalds 
30241da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
30251da177e4SLinus Torvalds 	bio->bi_private = bh;
30261da177e4SLinus Torvalds 
30271da177e4SLinus Torvalds 	bio_get(bio);
30281da177e4SLinus Torvalds 	submit_bio(rw, bio);
30291da177e4SLinus Torvalds 
30301da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
30311da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
30321da177e4SLinus Torvalds 
30331da177e4SLinus Torvalds 	bio_put(bio);
30341da177e4SLinus Torvalds 	return ret;
30351da177e4SLinus Torvalds }
30361da177e4SLinus Torvalds 
30371da177e4SLinus Torvalds /**
30381da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
3039a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
30401da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
30411da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
30421da177e4SLinus Torvalds  *
3043a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3044a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3045a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3046a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
3047a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
30481da177e4SLinus Torvalds  *
30491da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
3050a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3051a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
3052a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
3053a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
3054a7662236SJan Kara  * actually clean until the buffer gets unlocked).
30551da177e4SLinus Torvalds  *
30561da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
30571da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
30581da177e4SLinus Torvalds  * any waiters.
30591da177e4SLinus Torvalds  *
30601da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
30611da177e4SLinus Torvalds  * multiple of the current approved size for the device.
30621da177e4SLinus Torvalds  */
30631da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
30641da177e4SLinus Torvalds {
30651da177e4SLinus Torvalds 	int i;
30661da177e4SLinus Torvalds 
30671da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
30681da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
30691da177e4SLinus Torvalds 
307018ce3751SJens Axboe 		if (rw == SWRITE || rw == SWRITE_SYNC)
3071a7662236SJan Kara 			lock_buffer(bh);
3072ca5de404SNick Piggin 		else if (!trylock_buffer(bh))
30731da177e4SLinus Torvalds 			continue;
30741da177e4SLinus Torvalds 
307518ce3751SJens Axboe 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
30761da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
307776c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
3078e60e5c50SOGAWA Hirofumi 				get_bh(bh);
307918ce3751SJens Axboe 				if (rw == SWRITE_SYNC)
308018ce3751SJens Axboe 					submit_bh(WRITE_SYNC, bh);
308118ce3751SJens Axboe 				else
30821da177e4SLinus Torvalds 					submit_bh(WRITE, bh);
30831da177e4SLinus Torvalds 				continue;
30841da177e4SLinus Torvalds 			}
30851da177e4SLinus Torvalds 		} else {
30861da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
308776c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
3088e60e5c50SOGAWA Hirofumi 				get_bh(bh);
30891da177e4SLinus Torvalds 				submit_bh(rw, bh);
30901da177e4SLinus Torvalds 				continue;
30911da177e4SLinus Torvalds 			}
30921da177e4SLinus Torvalds 		}
30931da177e4SLinus Torvalds 		unlock_buffer(bh);
30941da177e4SLinus Torvalds 	}
30951da177e4SLinus Torvalds }
30961da177e4SLinus Torvalds 
30971da177e4SLinus Torvalds /*
30981da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
30991da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
31001da177e4SLinus Torvalds  * the buffer_head.
31011da177e4SLinus Torvalds  */
31021da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
31031da177e4SLinus Torvalds {
31041da177e4SLinus Torvalds 	int ret = 0;
31051da177e4SLinus Torvalds 
31061da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
31071da177e4SLinus Torvalds 	lock_buffer(bh);
31081da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
31091da177e4SLinus Torvalds 		get_bh(bh);
31101da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
311118ce3751SJens Axboe 		ret = submit_bh(WRITE_SYNC, bh);
31121da177e4SLinus Torvalds 		wait_on_buffer(bh);
31131da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
31141da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
31151da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
31161da177e4SLinus Torvalds 		}
31171da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
31181da177e4SLinus Torvalds 			ret = -EIO;
31191da177e4SLinus Torvalds 	} else {
31201da177e4SLinus Torvalds 		unlock_buffer(bh);
31211da177e4SLinus Torvalds 	}
31221da177e4SLinus Torvalds 	return ret;
31231da177e4SLinus Torvalds }
31241da177e4SLinus Torvalds 
31251da177e4SLinus Torvalds /*
31261da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
31271da177e4SLinus Torvalds  * are unused, and releases them if so.
31281da177e4SLinus Torvalds  *
31291da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
31301da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
31311da177e4SLinus Torvalds  *
31321da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
31331da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
31341da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
31351da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
31361da177e4SLinus Torvalds  * filesystem data on the same device.
31371da177e4SLinus Torvalds  *
31381da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
31391da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
31401da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
31411da177e4SLinus Torvalds  * private_lock.
31421da177e4SLinus Torvalds  *
31431da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
31441da177e4SLinus Torvalds  */
31451da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
31461da177e4SLinus Torvalds {
31471da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
31481da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
31491da177e4SLinus Torvalds }
31501da177e4SLinus Torvalds 
31511da177e4SLinus Torvalds static int
31521da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
31531da177e4SLinus Torvalds {
31541da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
31551da177e4SLinus Torvalds 	struct buffer_head *bh;
31561da177e4SLinus Torvalds 
31571da177e4SLinus Torvalds 	bh = head;
31581da177e4SLinus Torvalds 	do {
3159de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
31601da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
31611da177e4SLinus Torvalds 		if (buffer_busy(bh))
31621da177e4SLinus Torvalds 			goto failed;
31631da177e4SLinus Torvalds 		bh = bh->b_this_page;
31641da177e4SLinus Torvalds 	} while (bh != head);
31651da177e4SLinus Torvalds 
31661da177e4SLinus Torvalds 	do {
31671da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
31681da177e4SLinus Torvalds 
3169535ee2fbSJan Kara 		if (bh->b_assoc_map)
31701da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
31711da177e4SLinus Torvalds 		bh = next;
31721da177e4SLinus Torvalds 	} while (bh != head);
31731da177e4SLinus Torvalds 	*buffers_to_free = head;
31741da177e4SLinus Torvalds 	__clear_page_buffers(page);
31751da177e4SLinus Torvalds 	return 1;
31761da177e4SLinus Torvalds failed:
31771da177e4SLinus Torvalds 	return 0;
31781da177e4SLinus Torvalds }
31791da177e4SLinus Torvalds 
31801da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
31811da177e4SLinus Torvalds {
31821da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
31831da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
31841da177e4SLinus Torvalds 	int ret = 0;
31851da177e4SLinus Torvalds 
31861da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3187ecdfc978SLinus Torvalds 	if (PageWriteback(page))
31881da177e4SLinus Torvalds 		return 0;
31891da177e4SLinus Torvalds 
31901da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
31911da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
31921da177e4SLinus Torvalds 		goto out;
31931da177e4SLinus Torvalds 	}
31941da177e4SLinus Torvalds 
31951da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
31961da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3197ecdfc978SLinus Torvalds 
3198ecdfc978SLinus Torvalds 	/*
3199ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3200ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3201ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3202ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3203ecdfc978SLinus Torvalds 	 *
3204ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3205ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3206ecdfc978SLinus Torvalds 	 * the page also.
320787df7241SNick Piggin 	 *
320887df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
320987df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
321087df7241SNick Piggin 	 * dirty bit from being lost.
3211ecdfc978SLinus Torvalds 	 */
3212ecdfc978SLinus Torvalds 	if (ret)
3213ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
321487df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
32151da177e4SLinus Torvalds out:
32161da177e4SLinus Torvalds 	if (buffers_to_free) {
32171da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
32181da177e4SLinus Torvalds 
32191da177e4SLinus Torvalds 		do {
32201da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
32211da177e4SLinus Torvalds 			free_buffer_head(bh);
32221da177e4SLinus Torvalds 			bh = next;
32231da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
32241da177e4SLinus Torvalds 	}
32251da177e4SLinus Torvalds 	return ret;
32261da177e4SLinus Torvalds }
32271da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
32281da177e4SLinus Torvalds 
32293978d717SNeilBrown void block_sync_page(struct page *page)
32301da177e4SLinus Torvalds {
32311da177e4SLinus Torvalds 	struct address_space *mapping;
32321da177e4SLinus Torvalds 
32331da177e4SLinus Torvalds 	smp_mb();
32341da177e4SLinus Torvalds 	mapping = page_mapping(page);
32351da177e4SLinus Torvalds 	if (mapping)
32361da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
32371da177e4SLinus Torvalds }
32381da177e4SLinus Torvalds 
32391da177e4SLinus Torvalds /*
32401da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
32411da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
32421da177e4SLinus Torvalds  *
32431da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
32441da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
32451da177e4SLinus Torvalds  */
3246bdc480e3SHeiko Carstens SYSCALL_DEFINE2(bdflush, int, func, long, data)
32471da177e4SLinus Torvalds {
32481da177e4SLinus Torvalds 	static int msg_count;
32491da177e4SLinus Torvalds 
32501da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
32511da177e4SLinus Torvalds 		return -EPERM;
32521da177e4SLinus Torvalds 
32531da177e4SLinus Torvalds 	if (msg_count < 5) {
32541da177e4SLinus Torvalds 		msg_count++;
32551da177e4SLinus Torvalds 		printk(KERN_INFO
32561da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
32571da177e4SLinus Torvalds 			" system call\n", current->comm);
32581da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
32591da177e4SLinus Torvalds 	}
32601da177e4SLinus Torvalds 
32611da177e4SLinus Torvalds 	if (func == 1)
32621da177e4SLinus Torvalds 		do_exit(0);
32631da177e4SLinus Torvalds 	return 0;
32641da177e4SLinus Torvalds }
32651da177e4SLinus Torvalds 
32661da177e4SLinus Torvalds /*
32671da177e4SLinus Torvalds  * Buffer-head allocation
32681da177e4SLinus Torvalds  */
3269e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
32701da177e4SLinus Torvalds 
32711da177e4SLinus Torvalds /*
32721da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
32731da177e4SLinus Torvalds  * stripping them in writeback.
32741da177e4SLinus Torvalds  */
32751da177e4SLinus Torvalds static int max_buffer_heads;
32761da177e4SLinus Torvalds 
32771da177e4SLinus Torvalds int buffer_heads_over_limit;
32781da177e4SLinus Torvalds 
32791da177e4SLinus Torvalds struct bh_accounting {
32801da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
32811da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
32821da177e4SLinus Torvalds };
32831da177e4SLinus Torvalds 
32841da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
32851da177e4SLinus Torvalds 
32861da177e4SLinus Torvalds static void recalc_bh_state(void)
32871da177e4SLinus Torvalds {
32881da177e4SLinus Torvalds 	int i;
32891da177e4SLinus Torvalds 	int tot = 0;
32901da177e4SLinus Torvalds 
32911da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
32921da177e4SLinus Torvalds 		return;
32931da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
32948a143426SEric Dumazet 	for_each_online_cpu(i)
32951da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
32961da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
32971da177e4SLinus Torvalds }
32981da177e4SLinus Torvalds 
3299dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
33001da177e4SLinus Torvalds {
3301488514d1SChristoph Lameter 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
33021da177e4SLinus Torvalds 	if (ret) {
3303a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3304736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
33051da177e4SLinus Torvalds 		recalc_bh_state();
3306736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
33071da177e4SLinus Torvalds 	}
33081da177e4SLinus Torvalds 	return ret;
33091da177e4SLinus Torvalds }
33101da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
33111da177e4SLinus Torvalds 
33121da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
33131da177e4SLinus Torvalds {
33141da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
33151da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3316736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
33171da177e4SLinus Torvalds 	recalc_bh_state();
3318736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
33191da177e4SLinus Torvalds }
33201da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
33211da177e4SLinus Torvalds 
33221da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
33231da177e4SLinus Torvalds {
33241da177e4SLinus Torvalds 	int i;
33251da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
33261da177e4SLinus Torvalds 
33271da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
33281da177e4SLinus Torvalds 		brelse(b->bhs[i]);
33291da177e4SLinus Torvalds 		b->bhs[i] = NULL;
33301da177e4SLinus Torvalds 	}
33318a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
33328a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
33338a143426SEric Dumazet 	put_cpu_var(bh_accounting);
33341da177e4SLinus Torvalds }
33351da177e4SLinus Torvalds 
33361da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
33371da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
33381da177e4SLinus Torvalds {
33398bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
33401da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
33411da177e4SLinus Torvalds 	return NOTIFY_OK;
33421da177e4SLinus Torvalds }
33431da177e4SLinus Torvalds 
3344389d1b08SAneesh Kumar K.V /**
3345a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3346389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3347389d1b08SAneesh Kumar K.V  *
3348389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3349389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3350389d1b08SAneesh Kumar K.V  */
3351389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3352389d1b08SAneesh Kumar K.V {
3353389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3354389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3355389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3356389d1b08SAneesh Kumar K.V 			return 0;
3357389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3358389d1b08SAneesh Kumar K.V 	}
3359389d1b08SAneesh Kumar K.V 	return 1;
3360389d1b08SAneesh Kumar K.V }
3361389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3362389d1b08SAneesh Kumar K.V 
3363389d1b08SAneesh Kumar K.V /**
3364a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3365389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3366389d1b08SAneesh Kumar K.V  *
3367389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3368389d1b08SAneesh Kumar K.V  */
3369389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3370389d1b08SAneesh Kumar K.V {
3371389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3372389d1b08SAneesh Kumar K.V 
3373389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3374389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3375389d1b08SAneesh Kumar K.V 		return 0;
3376389d1b08SAneesh Kumar K.V 	}
3377389d1b08SAneesh Kumar K.V 
3378389d1b08SAneesh Kumar K.V 	get_bh(bh);
3379389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3380389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3381389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3382389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3383389d1b08SAneesh Kumar K.V 		return 0;
3384389d1b08SAneesh Kumar K.V 	return -EIO;
3385389d1b08SAneesh Kumar K.V }
3386389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3387389d1b08SAneesh Kumar K.V 
3388b98938c3SChristoph Lameter static void
338951cc5068SAlexey Dobriyan init_buffer_head(void *data)
3390b98938c3SChristoph Lameter {
3391b98938c3SChristoph Lameter 	struct buffer_head *bh = data;
3392b98938c3SChristoph Lameter 
3393b98938c3SChristoph Lameter 	memset(bh, 0, sizeof(*bh));
3394b98938c3SChristoph Lameter 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3395b98938c3SChristoph Lameter }
3396b98938c3SChristoph Lameter 
33971da177e4SLinus Torvalds void __init buffer_init(void)
33981da177e4SLinus Torvalds {
33991da177e4SLinus Torvalds 	int nrpages;
34001da177e4SLinus Torvalds 
3401b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3402b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3403b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3404b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3405b98938c3SChristoph Lameter 				init_buffer_head);
34061da177e4SLinus Torvalds 
34071da177e4SLinus Torvalds 	/*
34081da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
34091da177e4SLinus Torvalds 	 */
34101da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
34111da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
34121da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
34131da177e4SLinus Torvalds }
34141da177e4SLinus Torvalds 
34151da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
34161da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
34171da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
34181da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
34191da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
342054171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite);
34211da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
34221da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
34231da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
34241da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
342589e10787SNick Piggin EXPORT_SYMBOL(cont_write_begin);
34261da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
34271da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
34281da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
34291da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
34301da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
343105eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
34321da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
34331da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
34341da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
34351da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
34361da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
34371da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
34381da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3439