xref: /linux/fs/buffer.c (revision a4b0672db3a698d0684ee6e54f44e2e162a3da1b)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds void fastcall __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
771da177e4SLinus Torvalds void fastcall unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7972ed3d03SNick Piggin 	smp_mb__before_clear_bit();
801da177e4SLinus Torvalds 	clear_buffer_locked(bh);
811da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
821da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /*
861da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
871da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
881da177e4SLinus Torvalds  * if you want to preserve its state.
891da177e4SLinus Torvalds  */
901da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
911da177e4SLinus Torvalds {
921da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
931da177e4SLinus Torvalds }
941da177e4SLinus Torvalds 
951da177e4SLinus Torvalds static void
961da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
971da177e4SLinus Torvalds {
981da177e4SLinus Torvalds 	ClearPagePrivate(page);
994c21e2f2SHugh Dickins 	set_page_private(page, 0);
1001da177e4SLinus Torvalds 	page_cache_release(page);
1011da177e4SLinus Torvalds }
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1041da177e4SLinus Torvalds {
1051da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1081da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1091da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1101da177e4SLinus Torvalds }
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds /*
11368671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
11468671f35SDmitry Monakhov  * unlocking it.
11568671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
11668671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
11768671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
11868671f35SDmitry Monakhov  * itself.
1191da177e4SLinus Torvalds  */
12068671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1211da177e4SLinus Torvalds {
1221da177e4SLinus Torvalds 	if (uptodate) {
1231da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1241da177e4SLinus Torvalds 	} else {
1251da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1261da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1271da177e4SLinus Torvalds 	}
1281da177e4SLinus Torvalds 	unlock_buffer(bh);
12968671f35SDmitry Monakhov }
13068671f35SDmitry Monakhov 
13168671f35SDmitry Monakhov /*
13268671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
13368671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
13468671f35SDmitry Monakhov  */
13568671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
13668671f35SDmitry Monakhov {
13768671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1381da177e4SLinus Torvalds 	put_bh(bh);
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1421da177e4SLinus Torvalds {
1431da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1441da177e4SLinus Torvalds 
1451da177e4SLinus Torvalds 	if (uptodate) {
1461da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds 	} else {
1481da177e4SLinus Torvalds 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1491da177e4SLinus Torvalds 			buffer_io_error(bh);
1501da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1511da177e4SLinus Torvalds 					"I/O error on %s\n",
1521da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1531da177e4SLinus Torvalds 		}
1541da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1551da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1561da177e4SLinus Torvalds 	}
1571da177e4SLinus Torvalds 	unlock_buffer(bh);
1581da177e4SLinus Torvalds 	put_bh(bh);
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds /*
1621da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1631da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1641da177e4SLinus Torvalds  */
1651da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds 	int ret = 0;
1681da177e4SLinus Torvalds 
16928fd1298SOGAWA Hirofumi 	if (bdev)
17028fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1711da177e4SLinus Torvalds 	return ret;
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds /*
1761da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1771da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1781da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1791da177e4SLinus Torvalds  */
1801da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1811da177e4SLinus Torvalds {
1821da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1831da177e4SLinus Torvalds 	if (sb) {
1841da177e4SLinus Torvalds 		int res = fsync_super(sb);
1851da177e4SLinus Torvalds 		drop_super(sb);
1861da177e4SLinus Torvalds 		return res;
1871da177e4SLinus Torvalds 	}
1881da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1891da177e4SLinus Torvalds }
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds /**
1921da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
1931da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
1941da177e4SLinus Torvalds  *
195f73ca1b7SDavid Chinner  * This takes the block device bd_mount_sem to make sure no new mounts
1961da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
1971da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
1981da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
1991da177e4SLinus Torvalds  */
2001da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
2011da177e4SLinus Torvalds {
2021da177e4SLinus Torvalds 	struct super_block *sb;
2031da177e4SLinus Torvalds 
204f73ca1b7SDavid Chinner 	down(&bdev->bd_mount_sem);
2051da177e4SLinus Torvalds 	sb = get_super(bdev);
2061da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
2071da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
208d59dd462Sakpm@osdl.org 		smp_wmb();
2091da177e4SLinus Torvalds 
210d25b9a1fSOGAWA Hirofumi 		__fsync_super(sb);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
213d59dd462Sakpm@osdl.org 		smp_wmb();
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 		if (sb->s_op->write_super_lockfs)
2181da177e4SLinus Torvalds 			sb->s_op->write_super_lockfs(sb);
2191da177e4SLinus Torvalds 	}
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	sync_blockdev(bdev);
2221da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2231da177e4SLinus Torvalds }
2241da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds /**
2271da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2281da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2291da177e4SLinus Torvalds  * @sb:		associated superblock
2301da177e4SLinus Torvalds  *
2311da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2321da177e4SLinus Torvalds  */
2331da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb)
2341da177e4SLinus Torvalds {
2351da177e4SLinus Torvalds 	if (sb) {
2361da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds 		if (sb->s_op->unlockfs)
2391da177e4SLinus Torvalds 			sb->s_op->unlockfs(sb);
2401da177e4SLinus Torvalds 		sb->s_frozen = SB_UNFROZEN;
241d59dd462Sakpm@osdl.org 		smp_wmb();
2421da177e4SLinus Torvalds 		wake_up(&sb->s_wait_unfrozen);
2431da177e4SLinus Torvalds 		drop_super(sb);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 
246f73ca1b7SDavid Chinner 	up(&bdev->bd_mount_sem);
2471da177e4SLinus Torvalds }
2481da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /*
2511da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
2521da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
2531da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
2541da177e4SLinus Torvalds  * private_lock.
2551da177e4SLinus Torvalds  *
2561da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
2571da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
2581da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
2591da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
2601da177e4SLinus Torvalds  */
2611da177e4SLinus Torvalds static struct buffer_head *
262385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
2651da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
2661da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
2671da177e4SLinus Torvalds 	pgoff_t index;
2681da177e4SLinus Torvalds 	struct buffer_head *bh;
2691da177e4SLinus Torvalds 	struct buffer_head *head;
2701da177e4SLinus Torvalds 	struct page *page;
2711da177e4SLinus Torvalds 	int all_mapped = 1;
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
2741da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
2751da177e4SLinus Torvalds 	if (!page)
2761da177e4SLinus Torvalds 		goto out;
2771da177e4SLinus Torvalds 
2781da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2791da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2801da177e4SLinus Torvalds 		goto out_unlock;
2811da177e4SLinus Torvalds 	head = page_buffers(page);
2821da177e4SLinus Torvalds 	bh = head;
2831da177e4SLinus Torvalds 	do {
2841da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
2851da177e4SLinus Torvalds 			ret = bh;
2861da177e4SLinus Torvalds 			get_bh(bh);
2871da177e4SLinus Torvalds 			goto out_unlock;
2881da177e4SLinus Torvalds 		}
2891da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
2901da177e4SLinus Torvalds 			all_mapped = 0;
2911da177e4SLinus Torvalds 		bh = bh->b_this_page;
2921da177e4SLinus Torvalds 	} while (bh != head);
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2951da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2961da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2971da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2981da177e4SLinus Torvalds 	 */
2991da177e4SLinus Torvalds 	if (all_mapped) {
3001da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
3011da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
302205f87f6SBadari Pulavarty 			(unsigned long long)block,
303205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
304205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
305205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
3061da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
3071da177e4SLinus Torvalds 	}
3081da177e4SLinus Torvalds out_unlock:
3091da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
3101da177e4SLinus Torvalds 	page_cache_release(page);
3111da177e4SLinus Torvalds out:
3121da177e4SLinus Torvalds 	return ret;
3131da177e4SLinus Torvalds }
3141da177e4SLinus Torvalds 
3151da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
3161da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
3171da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
3181da177e4SLinus Torvalds    by the user.
3191da177e4SLinus Torvalds 
3201da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
3211da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
3221da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
3231da177e4SLinus Torvalds 
3241da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
3251da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
3281da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
3291da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
3301da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
3311da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
3321da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
3331da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
3341da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
3351da177e4SLinus Torvalds 
3361da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
3371da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
3381da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
3411da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
3421da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
3431da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
3441da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
3451da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
3461da177e4SLinus Torvalds    pass does the actual I/O. */
347f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
3481da177e4SLinus Torvalds {
3490e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3500e1dfc66SAndrew Morton 
3510e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
3520e1dfc66SAndrew Morton 		return;
3530e1dfc66SAndrew Morton 
3541da177e4SLinus Torvalds 	invalidate_bh_lrus();
355fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
3561da177e4SLinus Torvalds }
3571da177e4SLinus Torvalds 
3581da177e4SLinus Torvalds /*
3591da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
3601da177e4SLinus Torvalds  */
3611da177e4SLinus Torvalds static void free_more_memory(void)
3621da177e4SLinus Torvalds {
3631da177e4SLinus Torvalds 	struct zone **zones;
3641da177e4SLinus Torvalds 	pg_data_t *pgdat;
3651da177e4SLinus Torvalds 
366687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
3671da177e4SLinus Torvalds 	yield();
3681da177e4SLinus Torvalds 
369ec936fc5SKAMEZAWA Hiroyuki 	for_each_online_pgdat(pgdat) {
370af4ca457SAl Viro 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
3711da177e4SLinus Torvalds 		if (*zones)
3725ad333ebSAndy Whitcroft 			try_to_free_pages(zones, 0, GFP_NOFS);
3731da177e4SLinus Torvalds 	}
3741da177e4SLinus Torvalds }
3751da177e4SLinus Torvalds 
3761da177e4SLinus Torvalds /*
3771da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
3781da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3791da177e4SLinus Torvalds  */
3801da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3811da177e4SLinus Torvalds {
3821da177e4SLinus Torvalds 	unsigned long flags;
383a3972203SNick Piggin 	struct buffer_head *first;
3841da177e4SLinus Torvalds 	struct buffer_head *tmp;
3851da177e4SLinus Torvalds 	struct page *page;
3861da177e4SLinus Torvalds 	int page_uptodate = 1;
3871da177e4SLinus Torvalds 
3881da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3891da177e4SLinus Torvalds 
3901da177e4SLinus Torvalds 	page = bh->b_page;
3911da177e4SLinus Torvalds 	if (uptodate) {
3921da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3931da177e4SLinus Torvalds 	} else {
3941da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3951da177e4SLinus Torvalds 		if (printk_ratelimit())
3961da177e4SLinus Torvalds 			buffer_io_error(bh);
3971da177e4SLinus Torvalds 		SetPageError(page);
3981da177e4SLinus Torvalds 	}
3991da177e4SLinus Torvalds 
4001da177e4SLinus Torvalds 	/*
4011da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
4021da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
4031da177e4SLinus Torvalds 	 * decide that the page is now completely done.
4041da177e4SLinus Torvalds 	 */
405a3972203SNick Piggin 	first = page_buffers(page);
406a3972203SNick Piggin 	local_irq_save(flags);
407a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
4081da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
4091da177e4SLinus Torvalds 	unlock_buffer(bh);
4101da177e4SLinus Torvalds 	tmp = bh;
4111da177e4SLinus Torvalds 	do {
4121da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
4131da177e4SLinus Torvalds 			page_uptodate = 0;
4141da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
4151da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4161da177e4SLinus Torvalds 			goto still_busy;
4171da177e4SLinus Torvalds 		}
4181da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4191da177e4SLinus Torvalds 	} while (tmp != bh);
420a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421a3972203SNick Piggin 	local_irq_restore(flags);
4221da177e4SLinus Torvalds 
4231da177e4SLinus Torvalds 	/*
4241da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
4251da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
4261da177e4SLinus Torvalds 	 */
4271da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
4281da177e4SLinus Torvalds 		SetPageUptodate(page);
4291da177e4SLinus Torvalds 	unlock_page(page);
4301da177e4SLinus Torvalds 	return;
4311da177e4SLinus Torvalds 
4321da177e4SLinus Torvalds still_busy:
433a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434a3972203SNick Piggin 	local_irq_restore(flags);
4351da177e4SLinus Torvalds 	return;
4361da177e4SLinus Torvalds }
4371da177e4SLinus Torvalds 
4381da177e4SLinus Torvalds /*
4391da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
4401da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
4411da177e4SLinus Torvalds  */
442b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
4431da177e4SLinus Torvalds {
4441da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
4451da177e4SLinus Torvalds 	unsigned long flags;
446a3972203SNick Piggin 	struct buffer_head *first;
4471da177e4SLinus Torvalds 	struct buffer_head *tmp;
4481da177e4SLinus Torvalds 	struct page *page;
4491da177e4SLinus Torvalds 
4501da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
4511da177e4SLinus Torvalds 
4521da177e4SLinus Torvalds 	page = bh->b_page;
4531da177e4SLinus Torvalds 	if (uptodate) {
4541da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
4551da177e4SLinus Torvalds 	} else {
4561da177e4SLinus Torvalds 		if (printk_ratelimit()) {
4571da177e4SLinus Torvalds 			buffer_io_error(bh);
4581da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
4591da177e4SLinus Torvalds 					"I/O error on %s\n",
4601da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
4611da177e4SLinus Torvalds 		}
4621da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
46358ff407bSJan Kara 		set_buffer_write_io_error(bh);
4641da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
4651da177e4SLinus Torvalds 		SetPageError(page);
4661da177e4SLinus Torvalds 	}
4671da177e4SLinus Torvalds 
468a3972203SNick Piggin 	first = page_buffers(page);
469a3972203SNick Piggin 	local_irq_save(flags);
470a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471a3972203SNick Piggin 
4721da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4731da177e4SLinus Torvalds 	unlock_buffer(bh);
4741da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4751da177e4SLinus Torvalds 	while (tmp != bh) {
4761da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4771da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4781da177e4SLinus Torvalds 			goto still_busy;
4791da177e4SLinus Torvalds 		}
4801da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4811da177e4SLinus Torvalds 	}
482a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483a3972203SNick Piggin 	local_irq_restore(flags);
4841da177e4SLinus Torvalds 	end_page_writeback(page);
4851da177e4SLinus Torvalds 	return;
4861da177e4SLinus Torvalds 
4871da177e4SLinus Torvalds still_busy:
488a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489a3972203SNick Piggin 	local_irq_restore(flags);
4901da177e4SLinus Torvalds 	return;
4911da177e4SLinus Torvalds }
4921da177e4SLinus Torvalds 
4931da177e4SLinus Torvalds /*
4941da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4951da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4961da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4971da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4981da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4991da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
5001da177e4SLinus Torvalds  * that this buffer is not under async I/O.
5011da177e4SLinus Torvalds  *
5021da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
5031da177e4SLinus Torvalds  * left.
5041da177e4SLinus Torvalds  *
5051da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
5061da177e4SLinus Torvalds  * the buffers.
5071da177e4SLinus Torvalds  *
5081da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
5091da177e4SLinus Torvalds  * page.
5101da177e4SLinus Torvalds  *
5111da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
5121da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
5131da177e4SLinus Torvalds  */
5141da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
5151da177e4SLinus Torvalds {
5161da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
5171da177e4SLinus Torvalds 	set_buffer_async_read(bh);
5181da177e4SLinus Torvalds }
5191da177e4SLinus Torvalds 
5201da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
5211da177e4SLinus Torvalds {
5221da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
5231da177e4SLinus Torvalds 	set_buffer_async_write(bh);
5241da177e4SLinus Torvalds }
5251da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds 
5281da177e4SLinus Torvalds /*
5291da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
5301da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
5311da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
5321da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
5331da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
5341da177e4SLinus Torvalds  *
5351da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
5361da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
5371da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
5381da177e4SLinus Torvalds  *
5391da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
5401da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
5411da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
5421da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
5431da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
5441da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
5451da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
5461da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
5471da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
5481da177e4SLinus Torvalds  * ->private_lock.
5491da177e4SLinus Torvalds  *
5501da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
5511da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
5521da177e4SLinus Torvalds  *
5531da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
5541da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
5551da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
5561da177e4SLinus Torvalds  * be true at clear_inode() time.
5571da177e4SLinus Torvalds  *
5581da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
5591da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
5601da177e4SLinus Torvalds  * BUG_ON(!list_empty).
5611da177e4SLinus Torvalds  *
5621da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
5631da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
5641da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5651da177e4SLinus Torvalds  * queued up.
5661da177e4SLinus Torvalds  *
5671da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5681da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5691da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5701da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5711da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5721da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5731da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5741da177e4SLinus Torvalds  * b_inode back.
5751da177e4SLinus Torvalds  */
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds /*
5781da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5791da177e4SLinus Torvalds  */
5801da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh)
5811da177e4SLinus Torvalds {
5821da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
58358ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
58458ff407bSJan Kara 	if (buffer_write_io_error(bh))
58558ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
58658ff407bSJan Kara 	bh->b_assoc_map = NULL;
5871da177e4SLinus Torvalds }
5881da177e4SLinus Torvalds 
5891da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5901da177e4SLinus Torvalds {
5911da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5921da177e4SLinus Torvalds }
5931da177e4SLinus Torvalds 
5941da177e4SLinus Torvalds /*
5951da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5961da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5971da177e4SLinus Torvalds  * writes to the disk.
5981da177e4SLinus Torvalds  *
5991da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
6001da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
6011da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
6021da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
6031da177e4SLinus Torvalds  */
6041da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
6051da177e4SLinus Torvalds {
6061da177e4SLinus Torvalds 	struct buffer_head *bh;
6071da177e4SLinus Torvalds 	struct list_head *p;
6081da177e4SLinus Torvalds 	int err = 0;
6091da177e4SLinus Torvalds 
6101da177e4SLinus Torvalds 	spin_lock(lock);
6111da177e4SLinus Torvalds repeat:
6121da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
6131da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
6141da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
6151da177e4SLinus Torvalds 			get_bh(bh);
6161da177e4SLinus Torvalds 			spin_unlock(lock);
6171da177e4SLinus Torvalds 			wait_on_buffer(bh);
6181da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
6191da177e4SLinus Torvalds 				err = -EIO;
6201da177e4SLinus Torvalds 			brelse(bh);
6211da177e4SLinus Torvalds 			spin_lock(lock);
6221da177e4SLinus Torvalds 			goto repeat;
6231da177e4SLinus Torvalds 		}
6241da177e4SLinus Torvalds 	}
6251da177e4SLinus Torvalds 	spin_unlock(lock);
6261da177e4SLinus Torvalds 	return err;
6271da177e4SLinus Torvalds }
6281da177e4SLinus Torvalds 
6291da177e4SLinus Torvalds /**
6301da177e4SLinus Torvalds  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
6311da177e4SLinus Torvalds  *                        buffers
63267be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
6331da177e4SLinus Torvalds  *
6341da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6351da177e4SLinus Torvalds  * that I/O.
6361da177e4SLinus Torvalds  *
63767be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
63867be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
63967be2dd1SMartin Waitz  * a successful fsync().
6401da177e4SLinus Torvalds  */
6411da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6421da177e4SLinus Torvalds {
6431da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6441da177e4SLinus Torvalds 
6451da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6461da177e4SLinus Torvalds 		return 0;
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6491da177e4SLinus Torvalds 					&mapping->private_list);
6501da177e4SLinus Torvalds }
6511da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6521da177e4SLinus Torvalds 
6531da177e4SLinus Torvalds /*
6541da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6551da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6561da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6571da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6581da177e4SLinus Torvalds  */
6591da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6601da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6611da177e4SLinus Torvalds {
6621da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6631da177e4SLinus Torvalds 	if (bh) {
6641da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6651da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6661da177e4SLinus Torvalds 		put_bh(bh);
6671da177e4SLinus Torvalds 	}
6681da177e4SLinus Torvalds }
6691da177e4SLinus Torvalds 
6701da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6731da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6741da177e4SLinus Torvalds 
6751da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6761da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6771da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6781da177e4SLinus Torvalds 	} else {
679e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6801da177e4SLinus Torvalds 	}
6811da177e4SLinus Torvalds 	if (list_empty(&bh->b_assoc_buffers)) {
6821da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6831da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6841da177e4SLinus Torvalds 				&mapping->private_list);
68558ff407bSJan Kara 		bh->b_assoc_map = mapping;
6861da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6871da177e4SLinus Torvalds 	}
6881da177e4SLinus Torvalds }
6891da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6901da177e4SLinus Torvalds 
6911da177e4SLinus Torvalds /*
692787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693787d2214SNick Piggin  * dirty.
694787d2214SNick Piggin  *
695787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
696787d2214SNick Piggin  * not been truncated.
697787d2214SNick Piggin  */
698787d2214SNick Piggin static int __set_page_dirty(struct page *page,
699787d2214SNick Piggin 		struct address_space *mapping, int warn)
700787d2214SNick Piggin {
701787d2214SNick Piggin 	if (unlikely(!mapping))
702787d2214SNick Piggin 		return !TestSetPageDirty(page);
703787d2214SNick Piggin 
704787d2214SNick Piggin 	if (TestSetPageDirty(page))
705787d2214SNick Piggin 		return 0;
706787d2214SNick Piggin 
707787d2214SNick Piggin 	write_lock_irq(&mapping->tree_lock);
708787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
709787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
710787d2214SNick Piggin 
711787d2214SNick Piggin 		if (mapping_cap_account_dirty(mapping)) {
712787d2214SNick Piggin 			__inc_zone_page_state(page, NR_FILE_DIRTY);
713787d2214SNick Piggin 			task_io_account_write(PAGE_CACHE_SIZE);
714787d2214SNick Piggin 		}
715787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
716787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
717787d2214SNick Piggin 	}
718787d2214SNick Piggin 	write_unlock_irq(&mapping->tree_lock);
719787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
720787d2214SNick Piggin 
721787d2214SNick Piggin 	return 1;
722787d2214SNick Piggin }
723787d2214SNick Piggin 
724787d2214SNick Piggin /*
7251da177e4SLinus Torvalds  * Add a page to the dirty page list.
7261da177e4SLinus Torvalds  *
7271da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
7281da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
7291da177e4SLinus Torvalds  *
7301da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
7311da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
7321da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
7331da177e4SLinus Torvalds  * dirty.
7341da177e4SLinus Torvalds  *
7351da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
7361da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
7371da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
7381da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
7391da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
7401da177e4SLinus Torvalds  * page on the dirty page list.
7411da177e4SLinus Torvalds  *
7421da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
7431da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
7441da177e4SLinus Torvalds  * added to the page after it was set dirty.
7451da177e4SLinus Torvalds  *
7461da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7471da177e4SLinus Torvalds  * address_space though.
7481da177e4SLinus Torvalds  */
7491da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7501da177e4SLinus Torvalds {
751787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
752ebf7a227SNick Piggin 
753ebf7a227SNick Piggin 	if (unlikely(!mapping))
754ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7551da177e4SLinus Torvalds 
7561da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7571da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7581da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7591da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7601da177e4SLinus Torvalds 
7611da177e4SLinus Torvalds 		do {
7621da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7631da177e4SLinus Torvalds 			bh = bh->b_this_page;
7641da177e4SLinus Torvalds 		} while (bh != head);
7651da177e4SLinus Torvalds 	}
7661da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7671da177e4SLinus Torvalds 
768787d2214SNick Piggin 	return __set_page_dirty(page, mapping, 1);
7691da177e4SLinus Torvalds }
7701da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7711da177e4SLinus Torvalds 
7721da177e4SLinus Torvalds /*
7731da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7741da177e4SLinus Torvalds  *
7751da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7761da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7771da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7781da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7791da177e4SLinus Torvalds  *
7801da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7811da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7821da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7831da177e4SLinus Torvalds  *
7841da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7851da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7861da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7871da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7881da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7891da177e4SLinus Torvalds  * any newly dirty buffers for write.
7901da177e4SLinus Torvalds  */
7911da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7921da177e4SLinus Torvalds {
7931da177e4SLinus Torvalds 	struct buffer_head *bh;
7941da177e4SLinus Torvalds 	struct list_head tmp;
7951da177e4SLinus Torvalds 	int err = 0, err2;
7961da177e4SLinus Torvalds 
7971da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 	spin_lock(lock);
8001da177e4SLinus Torvalds 	while (!list_empty(list)) {
8011da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
80258ff407bSJan Kara 		__remove_assoc_queue(bh);
8031da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
8041da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
8051da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8061da177e4SLinus Torvalds 				get_bh(bh);
8071da177e4SLinus Torvalds 				spin_unlock(lock);
8081da177e4SLinus Torvalds 				/*
8091da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
8101da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
8111da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
8121da177e4SLinus Torvalds 				 * flight on potentially older contents.
8131da177e4SLinus Torvalds 				 */
814a7662236SJan Kara 				ll_rw_block(SWRITE, 1, &bh);
8151da177e4SLinus Torvalds 				brelse(bh);
8161da177e4SLinus Torvalds 				spin_lock(lock);
8171da177e4SLinus Torvalds 			}
8181da177e4SLinus Torvalds 		}
8191da177e4SLinus Torvalds 	}
8201da177e4SLinus Torvalds 
8211da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
8221da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
82358ff407bSJan Kara 		list_del_init(&bh->b_assoc_buffers);
8241da177e4SLinus Torvalds 		get_bh(bh);
8251da177e4SLinus Torvalds 		spin_unlock(lock);
8261da177e4SLinus Torvalds 		wait_on_buffer(bh);
8271da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8281da177e4SLinus Torvalds 			err = -EIO;
8291da177e4SLinus Torvalds 		brelse(bh);
8301da177e4SLinus Torvalds 		spin_lock(lock);
8311da177e4SLinus Torvalds 	}
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 	spin_unlock(lock);
8341da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8351da177e4SLinus Torvalds 	if (err)
8361da177e4SLinus Torvalds 		return err;
8371da177e4SLinus Torvalds 	else
8381da177e4SLinus Torvalds 		return err2;
8391da177e4SLinus Torvalds }
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds /*
8421da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8431da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8441da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8451da177e4SLinus Torvalds  *
8461da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8471da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8481da177e4SLinus Torvalds  * for reiserfs.
8491da177e4SLinus Torvalds  */
8501da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8511da177e4SLinus Torvalds {
8521da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8531da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8541da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8551da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8561da177e4SLinus Torvalds 
8571da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8581da177e4SLinus Torvalds 		while (!list_empty(list))
8591da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8601da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8611da177e4SLinus Torvalds 	}
8621da177e4SLinus Torvalds }
8631da177e4SLinus Torvalds 
8641da177e4SLinus Torvalds /*
8651da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8661da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8671da177e4SLinus Torvalds  *
8681da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8691da177e4SLinus Torvalds  */
8701da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8711da177e4SLinus Torvalds {
8721da177e4SLinus Torvalds 	int ret = 1;
8731da177e4SLinus Torvalds 
8741da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8751da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8761da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8771da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8781da177e4SLinus Torvalds 
8791da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8801da177e4SLinus Torvalds 		while (!list_empty(list)) {
8811da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8821da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8831da177e4SLinus Torvalds 				ret = 0;
8841da177e4SLinus Torvalds 				break;
8851da177e4SLinus Torvalds 			}
8861da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8871da177e4SLinus Torvalds 		}
8881da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8891da177e4SLinus Torvalds 	}
8901da177e4SLinus Torvalds 	return ret;
8911da177e4SLinus Torvalds }
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds /*
8941da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8951da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8961da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8971da177e4SLinus Torvalds  * buffers.
8981da177e4SLinus Torvalds  *
8991da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
9001da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
9011da177e4SLinus Torvalds  */
9021da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
9031da177e4SLinus Torvalds 		int retry)
9041da177e4SLinus Torvalds {
9051da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
9061da177e4SLinus Torvalds 	long offset;
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds try_again:
9091da177e4SLinus Torvalds 	head = NULL;
9101da177e4SLinus Torvalds 	offset = PAGE_SIZE;
9111da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
9121da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
9131da177e4SLinus Torvalds 		if (!bh)
9141da177e4SLinus Torvalds 			goto no_grow;
9151da177e4SLinus Torvalds 
9161da177e4SLinus Torvalds 		bh->b_bdev = NULL;
9171da177e4SLinus Torvalds 		bh->b_this_page = head;
9181da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9191da177e4SLinus Torvalds 		head = bh;
9201da177e4SLinus Torvalds 
9211da177e4SLinus Torvalds 		bh->b_state = 0;
9221da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
923fc5cd582SChris Mason 		bh->b_private = NULL;
9241da177e4SLinus Torvalds 		bh->b_size = size;
9251da177e4SLinus Torvalds 
9261da177e4SLinus Torvalds 		/* Link the buffer to its page */
9271da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9281da177e4SLinus Torvalds 
92901ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9301da177e4SLinus Torvalds 	}
9311da177e4SLinus Torvalds 	return head;
9321da177e4SLinus Torvalds /*
9331da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9341da177e4SLinus Torvalds  */
9351da177e4SLinus Torvalds no_grow:
9361da177e4SLinus Torvalds 	if (head) {
9371da177e4SLinus Torvalds 		do {
9381da177e4SLinus Torvalds 			bh = head;
9391da177e4SLinus Torvalds 			head = head->b_this_page;
9401da177e4SLinus Torvalds 			free_buffer_head(bh);
9411da177e4SLinus Torvalds 		} while (head);
9421da177e4SLinus Torvalds 	}
9431da177e4SLinus Torvalds 
9441da177e4SLinus Torvalds 	/*
9451da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9461da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9471da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9481da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9491da177e4SLinus Torvalds 	 */
9501da177e4SLinus Torvalds 	if (!retry)
9511da177e4SLinus Torvalds 		return NULL;
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9541da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9551da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9561da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9571da177e4SLinus Torvalds 	 * async buffer heads in use.
9581da177e4SLinus Torvalds 	 */
9591da177e4SLinus Torvalds 	free_more_memory();
9601da177e4SLinus Torvalds 	goto try_again;
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds static inline void
9651da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9661da177e4SLinus Torvalds {
9671da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9681da177e4SLinus Torvalds 
9691da177e4SLinus Torvalds 	bh = head;
9701da177e4SLinus Torvalds 	do {
9711da177e4SLinus Torvalds 		tail = bh;
9721da177e4SLinus Torvalds 		bh = bh->b_this_page;
9731da177e4SLinus Torvalds 	} while (bh);
9741da177e4SLinus Torvalds 	tail->b_this_page = head;
9751da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9761da177e4SLinus Torvalds }
9771da177e4SLinus Torvalds 
9781da177e4SLinus Torvalds /*
9791da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9801da177e4SLinus Torvalds  */
9811da177e4SLinus Torvalds static void
9821da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9831da177e4SLinus Torvalds 			sector_t block, int size)
9841da177e4SLinus Torvalds {
9851da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9861da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9871da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9881da177e4SLinus Torvalds 
9891da177e4SLinus Torvalds 	do {
9901da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9911da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9921da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9931da177e4SLinus Torvalds 			bh->b_blocknr = block;
9941da177e4SLinus Torvalds 			if (uptodate)
9951da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9961da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9971da177e4SLinus Torvalds 		}
9981da177e4SLinus Torvalds 		block++;
9991da177e4SLinus Torvalds 		bh = bh->b_this_page;
10001da177e4SLinus Torvalds 	} while (bh != head);
10011da177e4SLinus Torvalds }
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds /*
10041da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
10051da177e4SLinus Torvalds  *
10061da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
10071da177e4SLinus Torvalds  */
10081da177e4SLinus Torvalds static struct page *
10091da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
10101da177e4SLinus Torvalds 		pgoff_t index, int size)
10111da177e4SLinus Torvalds {
10121da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
10131da177e4SLinus Torvalds 	struct page *page;
10141da177e4SLinus Torvalds 	struct buffer_head *bh;
10151da177e4SLinus Torvalds 
1016ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
1017769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
10181da177e4SLinus Torvalds 	if (!page)
10191da177e4SLinus Torvalds 		return NULL;
10201da177e4SLinus Torvalds 
1021e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
10241da177e4SLinus Torvalds 		bh = page_buffers(page);
10251da177e4SLinus Torvalds 		if (bh->b_size == size) {
10261da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
10271da177e4SLinus Torvalds 			return page;
10281da177e4SLinus Torvalds 		}
10291da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10301da177e4SLinus Torvalds 			goto failed;
10311da177e4SLinus Torvalds 	}
10321da177e4SLinus Torvalds 
10331da177e4SLinus Torvalds 	/*
10341da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10351da177e4SLinus Torvalds 	 */
10361da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10371da177e4SLinus Torvalds 	if (!bh)
10381da177e4SLinus Torvalds 		goto failed;
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds 	/*
10411da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10421da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10431da177e4SLinus Torvalds 	 * run under the page lock.
10441da177e4SLinus Torvalds 	 */
10451da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10461da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10471da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10481da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10491da177e4SLinus Torvalds 	return page;
10501da177e4SLinus Torvalds 
10511da177e4SLinus Torvalds failed:
10521da177e4SLinus Torvalds 	BUG();
10531da177e4SLinus Torvalds 	unlock_page(page);
10541da177e4SLinus Torvalds 	page_cache_release(page);
10551da177e4SLinus Torvalds 	return NULL;
10561da177e4SLinus Torvalds }
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds /*
10591da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10601da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10611da177e4SLinus Torvalds  */
1062858119e1SArjan van de Ven static int
10631da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10641da177e4SLinus Torvalds {
10651da177e4SLinus Torvalds 	struct page *page;
10661da177e4SLinus Torvalds 	pgoff_t index;
10671da177e4SLinus Torvalds 	int sizebits;
10681da177e4SLinus Torvalds 
10691da177e4SLinus Torvalds 	sizebits = -1;
10701da177e4SLinus Torvalds 	do {
10711da177e4SLinus Torvalds 		sizebits++;
10721da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10731da177e4SLinus Torvalds 
10741da177e4SLinus Torvalds 	index = block >> sizebits;
10751da177e4SLinus Torvalds 
1076e5657933SAndrew Morton 	/*
1077e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1078e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1079e5657933SAndrew Morton 	 */
1080e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1081e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1082e5657933SAndrew Morton 
1083e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1084e5657933SAndrew Morton 			"device %s\n",
1085e5657933SAndrew Morton 			__FUNCTION__, (unsigned long long)block,
1086e5657933SAndrew Morton 			bdevname(bdev, b));
1087e5657933SAndrew Morton 		return -EIO;
1088e5657933SAndrew Morton 	}
1089e5657933SAndrew Morton 	block = index << sizebits;
10901da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10911da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10921da177e4SLinus Torvalds 	if (!page)
10931da177e4SLinus Torvalds 		return 0;
10941da177e4SLinus Torvalds 	unlock_page(page);
10951da177e4SLinus Torvalds 	page_cache_release(page);
10961da177e4SLinus Torvalds 	return 1;
10971da177e4SLinus Torvalds }
10981da177e4SLinus Torvalds 
109975c96f85SAdrian Bunk static struct buffer_head *
11001da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
11011da177e4SLinus Torvalds {
11021da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
11031da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
11041da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11051da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11061da177e4SLinus Torvalds 					size);
11071da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
11081da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
11091da177e4SLinus Torvalds 
11101da177e4SLinus Torvalds 		dump_stack();
11111da177e4SLinus Torvalds 		return NULL;
11121da177e4SLinus Torvalds 	}
11131da177e4SLinus Torvalds 
11141da177e4SLinus Torvalds 	for (;;) {
11151da177e4SLinus Torvalds 		struct buffer_head * bh;
1116e5657933SAndrew Morton 		int ret;
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11191da177e4SLinus Torvalds 		if (bh)
11201da177e4SLinus Torvalds 			return bh;
11211da177e4SLinus Torvalds 
1122e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1123e5657933SAndrew Morton 		if (ret < 0)
1124e5657933SAndrew Morton 			return NULL;
1125e5657933SAndrew Morton 		if (ret == 0)
11261da177e4SLinus Torvalds 			free_more_memory();
11271da177e4SLinus Torvalds 	}
11281da177e4SLinus Torvalds }
11291da177e4SLinus Torvalds 
11301da177e4SLinus Torvalds /*
11311da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11321da177e4SLinus Torvalds  *
11331da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11341da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11351da177e4SLinus Torvalds  *
11361da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11371da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11381da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11391da177e4SLinus Torvalds  *
11401da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11411da177e4SLinus Torvalds  * (if the page has buffers).
11421da177e4SLinus Torvalds  *
11431da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11441da177e4SLinus Torvalds  * buffers are not.
11451da177e4SLinus Torvalds  *
11461da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11471da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11481da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11491da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11501da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11511da177e4SLinus Torvalds  */
11521da177e4SLinus Torvalds 
11531da177e4SLinus Torvalds /**
11541da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
115567be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11561da177e4SLinus Torvalds  *
11571da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11581da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11591da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11601da177e4SLinus Torvalds  * inode list.
11611da177e4SLinus Torvalds  *
11621da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11631da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11641da177e4SLinus Torvalds  */
11651da177e4SLinus Torvalds void fastcall mark_buffer_dirty(struct buffer_head *bh)
11661da177e4SLinus Torvalds {
1167787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11681da177e4SLinus Torvalds 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1169787d2214SNick Piggin 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
11701da177e4SLinus Torvalds }
11711da177e4SLinus Torvalds 
11721da177e4SLinus Torvalds /*
11731da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11741da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11751da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11761da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11771da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11781da177e4SLinus Torvalds  */
11791da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11801da177e4SLinus Torvalds {
11811da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11821da177e4SLinus Torvalds 		put_bh(buf);
11831da177e4SLinus Torvalds 		return;
11841da177e4SLinus Torvalds 	}
11851da177e4SLinus Torvalds 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11861da177e4SLinus Torvalds 	WARN_ON(1);
11871da177e4SLinus Torvalds }
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds /*
11901da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11911da177e4SLinus Torvalds  * potentially dirty data.
11921da177e4SLinus Torvalds  */
11931da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11941da177e4SLinus Torvalds {
11951da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
11961da177e4SLinus Torvalds 	if (!list_empty(&bh->b_assoc_buffers)) {
11971da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11981da177e4SLinus Torvalds 
11991da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12001da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
120158ff407bSJan Kara 		bh->b_assoc_map = NULL;
12021da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12031da177e4SLinus Torvalds 	}
12041da177e4SLinus Torvalds 	__brelse(bh);
12051da177e4SLinus Torvalds }
12061da177e4SLinus Torvalds 
12071da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12081da177e4SLinus Torvalds {
12091da177e4SLinus Torvalds 	lock_buffer(bh);
12101da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12111da177e4SLinus Torvalds 		unlock_buffer(bh);
12121da177e4SLinus Torvalds 		return bh;
12131da177e4SLinus Torvalds 	} else {
12141da177e4SLinus Torvalds 		get_bh(bh);
12151da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12161da177e4SLinus Torvalds 		submit_bh(READ, bh);
12171da177e4SLinus Torvalds 		wait_on_buffer(bh);
12181da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12191da177e4SLinus Torvalds 			return bh;
12201da177e4SLinus Torvalds 	}
12211da177e4SLinus Torvalds 	brelse(bh);
12221da177e4SLinus Torvalds 	return NULL;
12231da177e4SLinus Torvalds }
12241da177e4SLinus Torvalds 
12251da177e4SLinus Torvalds /*
12261da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12271da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12281da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12291da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12301da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12311da177e4SLinus Torvalds  *
12321da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12331da177e4SLinus Torvalds  * sb_find_get_block().
12341da177e4SLinus Torvalds  *
12351da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12361da177e4SLinus Torvalds  * a local interrupt disable for that.
12371da177e4SLinus Torvalds  */
12381da177e4SLinus Torvalds 
12391da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12401da177e4SLinus Torvalds 
12411da177e4SLinus Torvalds struct bh_lru {
12421da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12431da177e4SLinus Torvalds };
12441da177e4SLinus Torvalds 
12451da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12461da177e4SLinus Torvalds 
12471da177e4SLinus Torvalds #ifdef CONFIG_SMP
12481da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12491da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12501da177e4SLinus Torvalds #else
12511da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12521da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12531da177e4SLinus Torvalds #endif
12541da177e4SLinus Torvalds 
12551da177e4SLinus Torvalds static inline void check_irqs_on(void)
12561da177e4SLinus Torvalds {
12571da177e4SLinus Torvalds #ifdef irqs_disabled
12581da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12591da177e4SLinus Torvalds #endif
12601da177e4SLinus Torvalds }
12611da177e4SLinus Torvalds 
12621da177e4SLinus Torvalds /*
12631da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12641da177e4SLinus Torvalds  */
12651da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12661da177e4SLinus Torvalds {
12671da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12681da177e4SLinus Torvalds 	struct bh_lru *lru;
12691da177e4SLinus Torvalds 
12701da177e4SLinus Torvalds 	check_irqs_on();
12711da177e4SLinus Torvalds 	bh_lru_lock();
12721da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12731da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12741da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12751da177e4SLinus Torvalds 		int in;
12761da177e4SLinus Torvalds 		int out = 0;
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds 		get_bh(bh);
12791da177e4SLinus Torvalds 		bhs[out++] = bh;
12801da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12811da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12821da177e4SLinus Torvalds 
12831da177e4SLinus Torvalds 			if (bh2 == bh) {
12841da177e4SLinus Torvalds 				__brelse(bh2);
12851da177e4SLinus Torvalds 			} else {
12861da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12871da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12881da177e4SLinus Torvalds 					evictee = bh2;
12891da177e4SLinus Torvalds 				} else {
12901da177e4SLinus Torvalds 					bhs[out++] = bh2;
12911da177e4SLinus Torvalds 				}
12921da177e4SLinus Torvalds 			}
12931da177e4SLinus Torvalds 		}
12941da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12951da177e4SLinus Torvalds 			bhs[out++] = NULL;
12961da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
12971da177e4SLinus Torvalds 	}
12981da177e4SLinus Torvalds 	bh_lru_unlock();
12991da177e4SLinus Torvalds 
13001da177e4SLinus Torvalds 	if (evictee)
13011da177e4SLinus Torvalds 		__brelse(evictee);
13021da177e4SLinus Torvalds }
13031da177e4SLinus Torvalds 
13041da177e4SLinus Torvalds /*
13051da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13061da177e4SLinus Torvalds  */
1307858119e1SArjan van de Ven static struct buffer_head *
13083991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13091da177e4SLinus Torvalds {
13101da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13111da177e4SLinus Torvalds 	struct bh_lru *lru;
13123991d3bdSTomasz Kvarsin 	unsigned int i;
13131da177e4SLinus Torvalds 
13141da177e4SLinus Torvalds 	check_irqs_on();
13151da177e4SLinus Torvalds 	bh_lru_lock();
13161da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13171da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13181da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
13211da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13221da177e4SLinus Torvalds 			if (i) {
13231da177e4SLinus Torvalds 				while (i) {
13241da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
13251da177e4SLinus Torvalds 					i--;
13261da177e4SLinus Torvalds 				}
13271da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13281da177e4SLinus Torvalds 			}
13291da177e4SLinus Torvalds 			get_bh(bh);
13301da177e4SLinus Torvalds 			ret = bh;
13311da177e4SLinus Torvalds 			break;
13321da177e4SLinus Torvalds 		}
13331da177e4SLinus Torvalds 	}
13341da177e4SLinus Torvalds 	bh_lru_unlock();
13351da177e4SLinus Torvalds 	return ret;
13361da177e4SLinus Torvalds }
13371da177e4SLinus Torvalds 
13381da177e4SLinus Torvalds /*
13391da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13401da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13411da177e4SLinus Torvalds  * NULL
13421da177e4SLinus Torvalds  */
13431da177e4SLinus Torvalds struct buffer_head *
13443991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13451da177e4SLinus Torvalds {
13461da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	if (bh == NULL) {
1349385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13501da177e4SLinus Torvalds 		if (bh)
13511da177e4SLinus Torvalds 			bh_lru_install(bh);
13521da177e4SLinus Torvalds 	}
13531da177e4SLinus Torvalds 	if (bh)
13541da177e4SLinus Torvalds 		touch_buffer(bh);
13551da177e4SLinus Torvalds 	return bh;
13561da177e4SLinus Torvalds }
13571da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds /*
13601da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13611da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13621da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13631da177e4SLinus Torvalds  *
13641da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13651da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13661da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13671da177e4SLinus Torvalds  *
13681da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13691da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13701da177e4SLinus Torvalds  */
13711da177e4SLinus Torvalds struct buffer_head *
13723991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13731da177e4SLinus Torvalds {
13741da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13751da177e4SLinus Torvalds 
13761da177e4SLinus Torvalds 	might_sleep();
13771da177e4SLinus Torvalds 	if (bh == NULL)
13781da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13791da177e4SLinus Torvalds 	return bh;
13801da177e4SLinus Torvalds }
13811da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13821da177e4SLinus Torvalds 
13831da177e4SLinus Torvalds /*
13841da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13851da177e4SLinus Torvalds  */
13863991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13871da177e4SLinus Torvalds {
13881da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1389a3e713b5SAndrew Morton 	if (likely(bh)) {
13901da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13911da177e4SLinus Torvalds 		brelse(bh);
13921da177e4SLinus Torvalds 	}
1393a3e713b5SAndrew Morton }
13941da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13951da177e4SLinus Torvalds 
13961da177e4SLinus Torvalds /**
13971da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
139867be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13991da177e4SLinus Torvalds  *  @block: number of block
14001da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14011da177e4SLinus Torvalds  *
14021da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14031da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14041da177e4SLinus Torvalds  */
14051da177e4SLinus Torvalds struct buffer_head *
14063991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
14071da177e4SLinus Torvalds {
14081da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14091da177e4SLinus Torvalds 
1410a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14111da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14121da177e4SLinus Torvalds 	return bh;
14131da177e4SLinus Torvalds }
14141da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds /*
14171da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14181da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14191da177e4SLinus Torvalds  * or with preempt disabled.
14201da177e4SLinus Torvalds  */
14211da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14221da177e4SLinus Torvalds {
14231da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14241da177e4SLinus Torvalds 	int i;
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14271da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14281da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14291da177e4SLinus Torvalds 	}
14301da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14311da177e4SLinus Torvalds }
14321da177e4SLinus Torvalds 
1433f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14341da177e4SLinus Torvalds {
14351da177e4SLinus Torvalds 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
14361da177e4SLinus Torvalds }
14371da177e4SLinus Torvalds 
14381da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14391da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14401da177e4SLinus Torvalds {
14411da177e4SLinus Torvalds 	bh->b_page = page;
1442e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14431da177e4SLinus Torvalds 	if (PageHighMem(page))
14441da177e4SLinus Torvalds 		/*
14451da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14461da177e4SLinus Torvalds 		 */
14471da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14481da177e4SLinus Torvalds 	else
14491da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14501da177e4SLinus Torvalds }
14511da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14521da177e4SLinus Torvalds 
14531da177e4SLinus Torvalds /*
14541da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14551da177e4SLinus Torvalds  */
1456858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14571da177e4SLinus Torvalds {
14581da177e4SLinus Torvalds 	lock_buffer(bh);
14591da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14601da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14611da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14621da177e4SLinus Torvalds 	clear_buffer_req(bh);
14631da177e4SLinus Torvalds 	clear_buffer_new(bh);
14641da177e4SLinus Torvalds 	clear_buffer_delay(bh);
146533a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14661da177e4SLinus Torvalds 	unlock_buffer(bh);
14671da177e4SLinus Torvalds }
14681da177e4SLinus Torvalds 
14691da177e4SLinus Torvalds /**
14701da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14711da177e4SLinus Torvalds  *
14721da177e4SLinus Torvalds  * @page: the page which is affected
14731da177e4SLinus Torvalds  * @offset: the index of the truncation point
14741da177e4SLinus Torvalds  *
14751da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14761da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14771da177e4SLinus Torvalds  *
14781da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14791da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14801da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14811da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14821da177e4SLinus Torvalds  * blocks on-disk.
14831da177e4SLinus Torvalds  */
14842ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14851da177e4SLinus Torvalds {
14861da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14871da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14881da177e4SLinus Torvalds 
14891da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14901da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14911da177e4SLinus Torvalds 		goto out;
14921da177e4SLinus Torvalds 
14931da177e4SLinus Torvalds 	head = page_buffers(page);
14941da177e4SLinus Torvalds 	bh = head;
14951da177e4SLinus Torvalds 	do {
14961da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
14971da177e4SLinus Torvalds 		next = bh->b_this_page;
14981da177e4SLinus Torvalds 
14991da177e4SLinus Torvalds 		/*
15001da177e4SLinus Torvalds 		 * is this block fully invalidated?
15011da177e4SLinus Torvalds 		 */
15021da177e4SLinus Torvalds 		if (offset <= curr_off)
15031da177e4SLinus Torvalds 			discard_buffer(bh);
15041da177e4SLinus Torvalds 		curr_off = next_off;
15051da177e4SLinus Torvalds 		bh = next;
15061da177e4SLinus Torvalds 	} while (bh != head);
15071da177e4SLinus Torvalds 
15081da177e4SLinus Torvalds 	/*
15091da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
15101da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15111da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15121da177e4SLinus Torvalds 	 */
15131da177e4SLinus Torvalds 	if (offset == 0)
15142ff28e22SNeilBrown 		try_to_release_page(page, 0);
15151da177e4SLinus Torvalds out:
15162ff28e22SNeilBrown 	return;
15171da177e4SLinus Torvalds }
15181da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
15191da177e4SLinus Torvalds 
15201da177e4SLinus Torvalds /*
15211da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15221da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15231da177e4SLinus Torvalds  * is already excluded via the page lock.
15241da177e4SLinus Torvalds  */
15251da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15261da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15271da177e4SLinus Torvalds {
15281da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15311da177e4SLinus Torvalds 	bh = head;
15321da177e4SLinus Torvalds 	do {
15331da177e4SLinus Torvalds 		bh->b_state |= b_state;
15341da177e4SLinus Torvalds 		tail = bh;
15351da177e4SLinus Torvalds 		bh = bh->b_this_page;
15361da177e4SLinus Torvalds 	} while (bh);
15371da177e4SLinus Torvalds 	tail->b_this_page = head;
15381da177e4SLinus Torvalds 
15391da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15401da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15411da177e4SLinus Torvalds 		bh = head;
15421da177e4SLinus Torvalds 		do {
15431da177e4SLinus Torvalds 			if (PageDirty(page))
15441da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15451da177e4SLinus Torvalds 			if (PageUptodate(page))
15461da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15471da177e4SLinus Torvalds 			bh = bh->b_this_page;
15481da177e4SLinus Torvalds 		} while (bh != head);
15491da177e4SLinus Torvalds 	}
15501da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15511da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15521da177e4SLinus Torvalds }
15531da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15541da177e4SLinus Torvalds 
15551da177e4SLinus Torvalds /*
15561da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15571da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15581da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15591da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15601da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15611da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15621da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15631da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15641da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15651da177e4SLinus Torvalds  *
15661da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15671da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15681da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15691da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15701da177e4SLinus Torvalds  */
15711da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15721da177e4SLinus Torvalds {
15731da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15741da177e4SLinus Torvalds 
15751da177e4SLinus Torvalds 	might_sleep();
15761da177e4SLinus Torvalds 
1577385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15781da177e4SLinus Torvalds 	if (old_bh) {
15791da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15801da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15811da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15821da177e4SLinus Torvalds 		__brelse(old_bh);
15831da177e4SLinus Torvalds 	}
15841da177e4SLinus Torvalds }
15851da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15861da177e4SLinus Torvalds 
15871da177e4SLinus Torvalds /*
15881da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15891da177e4SLinus Torvalds  *
15901da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15911da177e4SLinus Torvalds  *
15921da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15931da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15941da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15951da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15961da177e4SLinus Torvalds  *
15971da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
15981da177e4SLinus Torvalds  */
15991da177e4SLinus Torvalds 
16001da177e4SLinus Torvalds /*
16011da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16021da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16031da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
16041da177e4SLinus Torvalds  * state inside lock_buffer().
16051da177e4SLinus Torvalds  *
16061da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
16071da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
16081da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
16091da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
16101da177e4SLinus Torvalds  * prevents this contention from occurring.
16111da177e4SLinus Torvalds  */
16121da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
16131da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
16141da177e4SLinus Torvalds {
16151da177e4SLinus Torvalds 	int err;
16161da177e4SLinus Torvalds 	sector_t block;
16171da177e4SLinus Torvalds 	sector_t last_block;
1618f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1619b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
16201da177e4SLinus Torvalds 	int nr_underway = 0;
16211da177e4SLinus Torvalds 
16221da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16231da177e4SLinus Torvalds 
16241da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1627b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16281da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16291da177e4SLinus Torvalds 	}
16301da177e4SLinus Torvalds 
16311da177e4SLinus Torvalds 	/*
16321da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16331da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16341da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16351da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16361da177e4SLinus Torvalds 	 *
16371da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16381da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16391da177e4SLinus Torvalds 	 */
16401da177e4SLinus Torvalds 
164154b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16421da177e4SLinus Torvalds 	head = page_buffers(page);
16431da177e4SLinus Torvalds 	bh = head;
16441da177e4SLinus Torvalds 
16451da177e4SLinus Torvalds 	/*
16461da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16471da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16481da177e4SLinus Torvalds 	 */
16491da177e4SLinus Torvalds 	do {
16501da177e4SLinus Torvalds 		if (block > last_block) {
16511da177e4SLinus Torvalds 			/*
16521da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16531da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16541da177e4SLinus Torvalds 			 * truncate in progress.
16551da177e4SLinus Torvalds 			 */
16561da177e4SLinus Torvalds 			/*
16571da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16581da177e4SLinus Torvalds 			 */
16591da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16601da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
16611da177e4SLinus Torvalds 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1662b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16631da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16641da177e4SLinus Torvalds 			if (err)
16651da177e4SLinus Torvalds 				goto recover;
16661da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16671da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16681da177e4SLinus Torvalds 				clear_buffer_new(bh);
16691da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16701da177e4SLinus Torvalds 							bh->b_blocknr);
16711da177e4SLinus Torvalds 			}
16721da177e4SLinus Torvalds 		}
16731da177e4SLinus Torvalds 		bh = bh->b_this_page;
16741da177e4SLinus Torvalds 		block++;
16751da177e4SLinus Torvalds 	} while (bh != head);
16761da177e4SLinus Torvalds 
16771da177e4SLinus Torvalds 	do {
16781da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16791da177e4SLinus Torvalds 			continue;
16801da177e4SLinus Torvalds 		/*
16811da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16821da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16831da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
16841da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
16851da177e4SLinus Torvalds 		 * throttling.
16861da177e4SLinus Torvalds 		 */
16871da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
16881da177e4SLinus Torvalds 			lock_buffer(bh);
16891da177e4SLinus Torvalds 		} else if (test_set_buffer_locked(bh)) {
16901da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
16911da177e4SLinus Torvalds 			continue;
16921da177e4SLinus Torvalds 		}
16931da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
16941da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
16951da177e4SLinus Torvalds 		} else {
16961da177e4SLinus Torvalds 			unlock_buffer(bh);
16971da177e4SLinus Torvalds 		}
16981da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds 	/*
17011da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17021da177e4SLinus Torvalds 	 * drop the bh refcounts early.
17031da177e4SLinus Torvalds 	 */
17041da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17051da177e4SLinus Torvalds 	set_page_writeback(page);
17061da177e4SLinus Torvalds 
17071da177e4SLinus Torvalds 	do {
17081da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17091da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17101da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17111da177e4SLinus Torvalds 			nr_underway++;
1712ad576e63SNick Piggin 		}
17131da177e4SLinus Torvalds 		bh = next;
17141da177e4SLinus Torvalds 	} while (bh != head);
171505937baaSAndrew Morton 	unlock_page(page);
17161da177e4SLinus Torvalds 
17171da177e4SLinus Torvalds 	err = 0;
17181da177e4SLinus Torvalds done:
17191da177e4SLinus Torvalds 	if (nr_underway == 0) {
17201da177e4SLinus Torvalds 		/*
17211da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17221da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17231da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17241da177e4SLinus Torvalds 		 */
17251da177e4SLinus Torvalds 		end_page_writeback(page);
17263d67f2d7SNick Piggin 
17271da177e4SLinus Torvalds 		/*
17281da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17291da177e4SLinus Torvalds 		 * here on.
17301da177e4SLinus Torvalds 		 */
17311da177e4SLinus Torvalds 		wbc->pages_skipped++;	/* We didn't write this page */
17321da177e4SLinus Torvalds 	}
17331da177e4SLinus Torvalds 	return err;
17341da177e4SLinus Torvalds 
17351da177e4SLinus Torvalds recover:
17361da177e4SLinus Torvalds 	/*
17371da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17381da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17391da177e4SLinus Torvalds 	 * exposing stale data.
17401da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17411da177e4SLinus Torvalds 	 */
17421da177e4SLinus Torvalds 	bh = head;
17431da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17441da177e4SLinus Torvalds 	do {
17451da177e4SLinus Torvalds 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
17461da177e4SLinus Torvalds 			lock_buffer(bh);
17471da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17481da177e4SLinus Torvalds 		} else {
17491da177e4SLinus Torvalds 			/*
17501da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17511da177e4SLinus Torvalds 			 * attachment to a dirty page.
17521da177e4SLinus Torvalds 			 */
17531da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17541da177e4SLinus Torvalds 		}
17551da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17561da177e4SLinus Torvalds 	SetPageError(page);
17571da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17587e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17591da177e4SLinus Torvalds 	set_page_writeback(page);
17601da177e4SLinus Torvalds 	do {
17611da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17621da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17631da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17641da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17651da177e4SLinus Torvalds 			nr_underway++;
1766ad576e63SNick Piggin 		}
17671da177e4SLinus Torvalds 		bh = next;
17681da177e4SLinus Torvalds 	} while (bh != head);
1769ffda9d30SNick Piggin 	unlock_page(page);
17701da177e4SLinus Torvalds 	goto done;
17711da177e4SLinus Torvalds }
17721da177e4SLinus Torvalds 
17731da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
17741da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
17751da177e4SLinus Torvalds {
17761da177e4SLinus Torvalds 	unsigned block_start, block_end;
17771da177e4SLinus Torvalds 	sector_t block;
17781da177e4SLinus Torvalds 	int err = 0;
17791da177e4SLinus Torvalds 	unsigned blocksize, bbits;
17801da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
17811da177e4SLinus Torvalds 
17821da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
17831da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
17841da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
17851da177e4SLinus Torvalds 	BUG_ON(from > to);
17861da177e4SLinus Torvalds 
17871da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
17881da177e4SLinus Torvalds 	if (!page_has_buffers(page))
17891da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
17901da177e4SLinus Torvalds 	head = page_buffers(page);
17911da177e4SLinus Torvalds 
17921da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
17931da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
17941da177e4SLinus Torvalds 
17951da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
17961da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
17971da177e4SLinus Torvalds 		block_end = block_start + blocksize;
17981da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
17991da177e4SLinus Torvalds 			if (PageUptodate(page)) {
18001da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
18011da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18021da177e4SLinus Torvalds 			}
18031da177e4SLinus Torvalds 			continue;
18041da177e4SLinus Torvalds 		}
18051da177e4SLinus Torvalds 		if (buffer_new(bh))
18061da177e4SLinus Torvalds 			clear_buffer_new(bh);
18071da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1808b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18091da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18101da177e4SLinus Torvalds 			if (err)
1811f3ddbdc6SNick Piggin 				break;
18121da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18131da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18141da177e4SLinus Torvalds 							bh->b_blocknr);
18151da177e4SLinus Torvalds 				if (PageUptodate(page)) {
18161da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18171da177e4SLinus Torvalds 					continue;
18181da177e4SLinus Torvalds 				}
18191da177e4SLinus Torvalds 				if (block_end > to || block_start < from) {
18201da177e4SLinus Torvalds 					void *kaddr;
18211da177e4SLinus Torvalds 
18221da177e4SLinus Torvalds 					kaddr = kmap_atomic(page, KM_USER0);
18231da177e4SLinus Torvalds 					if (block_end > to)
18241da177e4SLinus Torvalds 						memset(kaddr+to, 0,
18251da177e4SLinus Torvalds 							block_end-to);
18261da177e4SLinus Torvalds 					if (block_start < from)
18271da177e4SLinus Torvalds 						memset(kaddr+block_start,
18281da177e4SLinus Torvalds 							0, from-block_start);
18291da177e4SLinus Torvalds 					flush_dcache_page(page);
18301da177e4SLinus Torvalds 					kunmap_atomic(kaddr, KM_USER0);
18311da177e4SLinus Torvalds 				}
18321da177e4SLinus Torvalds 				continue;
18331da177e4SLinus Torvalds 			}
18341da177e4SLinus Torvalds 		}
18351da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18361da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18371da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18381da177e4SLinus Torvalds 			continue;
18391da177e4SLinus Torvalds 		}
18401da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
184133a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18421da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18431da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18441da177e4SLinus Torvalds 			*wait_bh++=bh;
18451da177e4SLinus Torvalds 		}
18461da177e4SLinus Torvalds 	}
18471da177e4SLinus Torvalds 	/*
18481da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
18491da177e4SLinus Torvalds 	 */
18501da177e4SLinus Torvalds 	while(wait_bh > wait) {
18511da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
18521da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1853f3ddbdc6SNick Piggin 			err = -EIO;
18541da177e4SLinus Torvalds 	}
1855152becd2SAnton Altaparmakov 	if (!err) {
1856152becd2SAnton Altaparmakov 		bh = head;
1857152becd2SAnton Altaparmakov 		do {
1858152becd2SAnton Altaparmakov 			if (buffer_new(bh))
1859152becd2SAnton Altaparmakov 				clear_buffer_new(bh);
1860152becd2SAnton Altaparmakov 		} while ((bh = bh->b_this_page) != head);
1861152becd2SAnton Altaparmakov 		return 0;
1862152becd2SAnton Altaparmakov 	}
1863f3ddbdc6SNick Piggin 	/* Error case: */
18641da177e4SLinus Torvalds 	/*
18651da177e4SLinus Torvalds 	 * Zero out any newly allocated blocks to avoid exposing stale
18661da177e4SLinus Torvalds 	 * data.  If BH_New is set, we know that the block was newly
18671da177e4SLinus Torvalds 	 * allocated in the above loop.
18681da177e4SLinus Torvalds 	 */
18691da177e4SLinus Torvalds 	bh = head;
18701da177e4SLinus Torvalds 	block_start = 0;
18711da177e4SLinus Torvalds 	do {
18721da177e4SLinus Torvalds 		block_end = block_start+blocksize;
18731da177e4SLinus Torvalds 		if (block_end <= from)
18741da177e4SLinus Torvalds 			goto next_bh;
18751da177e4SLinus Torvalds 		if (block_start >= to)
18761da177e4SLinus Torvalds 			break;
18771da177e4SLinus Torvalds 		if (buffer_new(bh)) {
18781da177e4SLinus Torvalds 			clear_buffer_new(bh);
187901f2705dSNate Diller 			zero_user_page(page, block_start, bh->b_size, KM_USER0);
18801da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
18811da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
18821da177e4SLinus Torvalds 		}
18831da177e4SLinus Torvalds next_bh:
18841da177e4SLinus Torvalds 		block_start = block_end;
18851da177e4SLinus Torvalds 		bh = bh->b_this_page;
18861da177e4SLinus Torvalds 	} while (bh != head);
18871da177e4SLinus Torvalds 	return err;
18881da177e4SLinus Torvalds }
18891da177e4SLinus Torvalds 
18901da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
18911da177e4SLinus Torvalds 		unsigned from, unsigned to)
18921da177e4SLinus Torvalds {
18931da177e4SLinus Torvalds 	unsigned block_start, block_end;
18941da177e4SLinus Torvalds 	int partial = 0;
18951da177e4SLinus Torvalds 	unsigned blocksize;
18961da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
18971da177e4SLinus Torvalds 
18981da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18991da177e4SLinus Torvalds 
19001da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
19011da177e4SLinus Torvalds 	    bh != head || !block_start;
19021da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
19031da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19041da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19051da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19061da177e4SLinus Torvalds 				partial = 1;
19071da177e4SLinus Torvalds 		} else {
19081da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19091da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19101da177e4SLinus Torvalds 		}
19111da177e4SLinus Torvalds 	}
19121da177e4SLinus Torvalds 
19131da177e4SLinus Torvalds 	/*
19141da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19151da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19161da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19171da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19181da177e4SLinus Torvalds 	 */
19191da177e4SLinus Torvalds 	if (!partial)
19201da177e4SLinus Torvalds 		SetPageUptodate(page);
19211da177e4SLinus Torvalds 	return 0;
19221da177e4SLinus Torvalds }
19231da177e4SLinus Torvalds 
19241da177e4SLinus Torvalds /*
19251da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
19261da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
19271da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
19281da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
19291da177e4SLinus Torvalds  * page struct once IO has completed.
19301da177e4SLinus Torvalds  */
19311da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
19321da177e4SLinus Torvalds {
19331da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
19341da177e4SLinus Torvalds 	sector_t iblock, lblock;
19351da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
19361da177e4SLinus Torvalds 	unsigned int blocksize;
19371da177e4SLinus Torvalds 	int nr, i;
19381da177e4SLinus Torvalds 	int fully_mapped = 1;
19391da177e4SLinus Torvalds 
1940cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
19411da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19421da177e4SLinus Torvalds 	if (!page_has_buffers(page))
19431da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
19441da177e4SLinus Torvalds 	head = page_buffers(page);
19451da177e4SLinus Torvalds 
19461da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
19471da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
19481da177e4SLinus Torvalds 	bh = head;
19491da177e4SLinus Torvalds 	nr = 0;
19501da177e4SLinus Torvalds 	i = 0;
19511da177e4SLinus Torvalds 
19521da177e4SLinus Torvalds 	do {
19531da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
19541da177e4SLinus Torvalds 			continue;
19551da177e4SLinus Torvalds 
19561da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1957c64610baSAndrew Morton 			int err = 0;
1958c64610baSAndrew Morton 
19591da177e4SLinus Torvalds 			fully_mapped = 0;
19601da177e4SLinus Torvalds 			if (iblock < lblock) {
1961b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
1962c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
1963c64610baSAndrew Morton 				if (err)
19641da177e4SLinus Torvalds 					SetPageError(page);
19651da177e4SLinus Torvalds 			}
19661da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
196701f2705dSNate Diller 				zero_user_page(page, i * blocksize, blocksize,
196801f2705dSNate Diller 						KM_USER0);
1969c64610baSAndrew Morton 				if (!err)
19701da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19711da177e4SLinus Torvalds 				continue;
19721da177e4SLinus Torvalds 			}
19731da177e4SLinus Torvalds 			/*
19741da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
19751da177e4SLinus Torvalds 			 * synchronously
19761da177e4SLinus Torvalds 			 */
19771da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
19781da177e4SLinus Torvalds 				continue;
19791da177e4SLinus Torvalds 		}
19801da177e4SLinus Torvalds 		arr[nr++] = bh;
19811da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
19821da177e4SLinus Torvalds 
19831da177e4SLinus Torvalds 	if (fully_mapped)
19841da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
19851da177e4SLinus Torvalds 
19861da177e4SLinus Torvalds 	if (!nr) {
19871da177e4SLinus Torvalds 		/*
19881da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
19891da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
19901da177e4SLinus Torvalds 		 */
19911da177e4SLinus Torvalds 		if (!PageError(page))
19921da177e4SLinus Torvalds 			SetPageUptodate(page);
19931da177e4SLinus Torvalds 		unlock_page(page);
19941da177e4SLinus Torvalds 		return 0;
19951da177e4SLinus Torvalds 	}
19961da177e4SLinus Torvalds 
19971da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
19981da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
19991da177e4SLinus Torvalds 		bh = arr[i];
20001da177e4SLinus Torvalds 		lock_buffer(bh);
20011da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
20021da177e4SLinus Torvalds 	}
20031da177e4SLinus Torvalds 
20041da177e4SLinus Torvalds 	/*
20051da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
20061da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
20071da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
20081da177e4SLinus Torvalds 	 */
20091da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
20101da177e4SLinus Torvalds 		bh = arr[i];
20111da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
20121da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
20131da177e4SLinus Torvalds 		else
20141da177e4SLinus Torvalds 			submit_bh(READ, bh);
20151da177e4SLinus Torvalds 	}
20161da177e4SLinus Torvalds 	return 0;
20171da177e4SLinus Torvalds }
20181da177e4SLinus Torvalds 
20191da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
20201da177e4SLinus Torvalds  * truncates.  Uses prepare/commit_write to allow the filesystem to
20211da177e4SLinus Torvalds  * deal with the hole.
20221da177e4SLinus Torvalds  */
202305eb0b51SOGAWA Hirofumi static int __generic_cont_expand(struct inode *inode, loff_t size,
202405eb0b51SOGAWA Hirofumi 				 pgoff_t index, unsigned int offset)
20251da177e4SLinus Torvalds {
20261da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
20271da177e4SLinus Torvalds 	struct page *page;
202805eb0b51SOGAWA Hirofumi 	unsigned long limit;
20291da177e4SLinus Torvalds 	int err;
20301da177e4SLinus Torvalds 
20311da177e4SLinus Torvalds 	err = -EFBIG;
20321da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
20331da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
20341da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
20351da177e4SLinus Torvalds 		goto out;
20361da177e4SLinus Torvalds 	}
20371da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
20381da177e4SLinus Torvalds 		goto out;
20391da177e4SLinus Torvalds 
204005eb0b51SOGAWA Hirofumi 	err = -ENOMEM;
204105eb0b51SOGAWA Hirofumi 	page = grab_cache_page(mapping, index);
204205eb0b51SOGAWA Hirofumi 	if (!page)
204305eb0b51SOGAWA Hirofumi 		goto out;
204405eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
204505eb0b51SOGAWA Hirofumi 	if (err) {
204605eb0b51SOGAWA Hirofumi 		/*
204705eb0b51SOGAWA Hirofumi 		 * ->prepare_write() may have instantiated a few blocks
204805eb0b51SOGAWA Hirofumi 		 * outside i_size.  Trim these off again.
204905eb0b51SOGAWA Hirofumi 		 */
205005eb0b51SOGAWA Hirofumi 		unlock_page(page);
205105eb0b51SOGAWA Hirofumi 		page_cache_release(page);
205205eb0b51SOGAWA Hirofumi 		vmtruncate(inode, inode->i_size);
205305eb0b51SOGAWA Hirofumi 		goto out;
205405eb0b51SOGAWA Hirofumi 	}
205505eb0b51SOGAWA Hirofumi 
205605eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
205705eb0b51SOGAWA Hirofumi 
205805eb0b51SOGAWA Hirofumi 	unlock_page(page);
205905eb0b51SOGAWA Hirofumi 	page_cache_release(page);
206005eb0b51SOGAWA Hirofumi 	if (err > 0)
206105eb0b51SOGAWA Hirofumi 		err = 0;
206205eb0b51SOGAWA Hirofumi out:
206305eb0b51SOGAWA Hirofumi 	return err;
206405eb0b51SOGAWA Hirofumi }
206505eb0b51SOGAWA Hirofumi 
206605eb0b51SOGAWA Hirofumi int generic_cont_expand(struct inode *inode, loff_t size)
206705eb0b51SOGAWA Hirofumi {
206805eb0b51SOGAWA Hirofumi 	pgoff_t index;
206905eb0b51SOGAWA Hirofumi 	unsigned int offset;
207005eb0b51SOGAWA Hirofumi 
20711da177e4SLinus Torvalds 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
20721da177e4SLinus Torvalds 
20731da177e4SLinus Torvalds 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
20741da177e4SLinus Torvalds 	** skip the prepare.  make sure we never send an offset for the start
20751da177e4SLinus Torvalds 	** of a block
20761da177e4SLinus Torvalds 	*/
20771da177e4SLinus Torvalds 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
207805eb0b51SOGAWA Hirofumi 		/* caller must handle this extra byte. */
20791da177e4SLinus Torvalds 		offset++;
20801da177e4SLinus Torvalds 	}
20811da177e4SLinus Torvalds 	index = size >> PAGE_CACHE_SHIFT;
208205eb0b51SOGAWA Hirofumi 
208305eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
20841da177e4SLinus Torvalds }
208505eb0b51SOGAWA Hirofumi 
208605eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size)
208705eb0b51SOGAWA Hirofumi {
208805eb0b51SOGAWA Hirofumi 	loff_t pos = size - 1;
208905eb0b51SOGAWA Hirofumi 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
209005eb0b51SOGAWA Hirofumi 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
209105eb0b51SOGAWA Hirofumi 
209205eb0b51SOGAWA Hirofumi 	/* prepare/commit_write can handle even if from==to==start of block. */
209305eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
20941da177e4SLinus Torvalds }
20951da177e4SLinus Torvalds 
20961da177e4SLinus Torvalds /*
20971da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
20981da177e4SLinus Torvalds  * We may have to extend the file.
20991da177e4SLinus Torvalds  */
21001da177e4SLinus Torvalds 
21011da177e4SLinus Torvalds int cont_prepare_write(struct page *page, unsigned offset,
21021da177e4SLinus Torvalds 		unsigned to, get_block_t *get_block, loff_t *bytes)
21031da177e4SLinus Torvalds {
21041da177e4SLinus Torvalds 	struct address_space *mapping = page->mapping;
21051da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
21061da177e4SLinus Torvalds 	struct page *new_page;
21071da177e4SLinus Torvalds 	pgoff_t pgpos;
21081da177e4SLinus Torvalds 	long status;
21091da177e4SLinus Torvalds 	unsigned zerofrom;
21101da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
21111da177e4SLinus Torvalds 
21121da177e4SLinus Torvalds 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
21131da177e4SLinus Torvalds 		status = -ENOMEM;
21141da177e4SLinus Torvalds 		new_page = grab_cache_page(mapping, pgpos);
21151da177e4SLinus Torvalds 		if (!new_page)
21161da177e4SLinus Torvalds 			goto out;
21171da177e4SLinus Torvalds 		/* we might sleep */
21181da177e4SLinus Torvalds 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
21191da177e4SLinus Torvalds 			unlock_page(new_page);
21201da177e4SLinus Torvalds 			page_cache_release(new_page);
21211da177e4SLinus Torvalds 			continue;
21221da177e4SLinus Torvalds 		}
21231da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
21241da177e4SLinus Torvalds 		if (zerofrom & (blocksize-1)) {
21251da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
21261da177e4SLinus Torvalds 			(*bytes)++;
21271da177e4SLinus Torvalds 		}
21281da177e4SLinus Torvalds 		status = __block_prepare_write(inode, new_page, zerofrom,
21291da177e4SLinus Torvalds 						PAGE_CACHE_SIZE, get_block);
21301da177e4SLinus Torvalds 		if (status)
21311da177e4SLinus Torvalds 			goto out_unmap;
2132ff1be9adSOGAWA Hirofumi 		zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
213301f2705dSNate Diller 				KM_USER0);
21341da177e4SLinus Torvalds 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
21351da177e4SLinus Torvalds 		unlock_page(new_page);
21361da177e4SLinus Torvalds 		page_cache_release(new_page);
21371da177e4SLinus Torvalds 	}
21381da177e4SLinus Torvalds 
21391da177e4SLinus Torvalds 	if (page->index < pgpos) {
21401da177e4SLinus Torvalds 		/* completely inside the area */
21411da177e4SLinus Torvalds 		zerofrom = offset;
21421da177e4SLinus Torvalds 	} else {
21431da177e4SLinus Torvalds 		/* page covers the boundary, find the boundary offset */
21441da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds 		/* if we will expand the thing last block will be filled */
21471da177e4SLinus Torvalds 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
21481da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
21491da177e4SLinus Torvalds 			(*bytes)++;
21501da177e4SLinus Torvalds 		}
21511da177e4SLinus Torvalds 
21521da177e4SLinus Torvalds 		/* starting below the boundary? Nothing to zero out */
21531da177e4SLinus Torvalds 		if (offset <= zerofrom)
21541da177e4SLinus Torvalds 			zerofrom = offset;
21551da177e4SLinus Torvalds 	}
21561da177e4SLinus Torvalds 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
21571da177e4SLinus Torvalds 	if (status)
21581da177e4SLinus Torvalds 		goto out1;
21591da177e4SLinus Torvalds 	if (zerofrom < offset) {
216001f2705dSNate Diller 		zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
21611da177e4SLinus Torvalds 		__block_commit_write(inode, page, zerofrom, offset);
21621da177e4SLinus Torvalds 	}
21631da177e4SLinus Torvalds 	return 0;
21641da177e4SLinus Torvalds out1:
21651da177e4SLinus Torvalds 	ClearPageUptodate(page);
21661da177e4SLinus Torvalds 	return status;
21671da177e4SLinus Torvalds 
21681da177e4SLinus Torvalds out_unmap:
21691da177e4SLinus Torvalds 	ClearPageUptodate(new_page);
21701da177e4SLinus Torvalds 	unlock_page(new_page);
21711da177e4SLinus Torvalds 	page_cache_release(new_page);
21721da177e4SLinus Torvalds out:
21731da177e4SLinus Torvalds 	return status;
21741da177e4SLinus Torvalds }
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
21771da177e4SLinus Torvalds 			get_block_t *get_block)
21781da177e4SLinus Torvalds {
21791da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21801da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
21811da177e4SLinus Torvalds 	if (err)
21821da177e4SLinus Torvalds 		ClearPageUptodate(page);
21831da177e4SLinus Torvalds 	return err;
21841da177e4SLinus Torvalds }
21851da177e4SLinus Torvalds 
21861da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
21871da177e4SLinus Torvalds {
21881da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21891da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
21901da177e4SLinus Torvalds 	return 0;
21911da177e4SLinus Torvalds }
21921da177e4SLinus Torvalds 
21931da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page,
21941da177e4SLinus Torvalds 		unsigned from, unsigned to)
21951da177e4SLinus Torvalds {
21961da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21971da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
21981da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
21991da177e4SLinus Torvalds 	/*
22001da177e4SLinus Torvalds 	 * No need to use i_size_read() here, the i_size
22011b1dcc1bSJes Sorensen 	 * cannot change under us because we hold i_mutex.
22021da177e4SLinus Torvalds 	 */
22031da177e4SLinus Torvalds 	if (pos > inode->i_size) {
22041da177e4SLinus Torvalds 		i_size_write(inode, pos);
22051da177e4SLinus Torvalds 		mark_inode_dirty(inode);
22061da177e4SLinus Torvalds 	}
22071da177e4SLinus Torvalds 	return 0;
22081da177e4SLinus Torvalds }
22091da177e4SLinus Torvalds 
221054171690SDavid Chinner /*
221154171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
221254171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
221354171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
221454171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
221554171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
221654171690SDavid Chinner  * support these features.
221754171690SDavid Chinner  *
221854171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
221954171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
222054171690SDavid Chinner  * vmtruncate() writes the inode size before removing pages, once we have the
222154171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
222254171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
222354171690SDavid Chinner  * unlock the page.
222454171690SDavid Chinner  */
222554171690SDavid Chinner int
222654171690SDavid Chinner block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
222754171690SDavid Chinner 		   get_block_t get_block)
222854171690SDavid Chinner {
222954171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
223054171690SDavid Chinner 	unsigned long end;
223154171690SDavid Chinner 	loff_t size;
223254171690SDavid Chinner 	int ret = -EINVAL;
223354171690SDavid Chinner 
223454171690SDavid Chinner 	lock_page(page);
223554171690SDavid Chinner 	size = i_size_read(inode);
223654171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
223718336338SNick Piggin 	    (page_offset(page) > size)) {
223854171690SDavid Chinner 		/* page got truncated out from underneath us */
223954171690SDavid Chinner 		goto out_unlock;
224054171690SDavid Chinner 	}
224154171690SDavid Chinner 
224254171690SDavid Chinner 	/* page is wholly or partially inside EOF */
224354171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
224454171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
224554171690SDavid Chinner 	else
224654171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
224754171690SDavid Chinner 
224854171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
224954171690SDavid Chinner 	if (!ret)
225054171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
225154171690SDavid Chinner 
225254171690SDavid Chinner out_unlock:
225354171690SDavid Chinner 	unlock_page(page);
225454171690SDavid Chinner 	return ret;
225554171690SDavid Chinner }
22561da177e4SLinus Torvalds 
22571da177e4SLinus Torvalds /*
22581da177e4SLinus Torvalds  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
22591da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
22601da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
22611da177e4SLinus Torvalds  */
22621da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
22631da177e4SLinus Torvalds {
226468671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
22651da177e4SLinus Torvalds }
22661da177e4SLinus Torvalds 
22671da177e4SLinus Torvalds /*
22681da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
22691da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
22701da177e4SLinus Torvalds  */
22711da177e4SLinus Torvalds int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
22721da177e4SLinus Torvalds 			get_block_t *get_block)
22731da177e4SLinus Torvalds {
22741da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22751da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
22761da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2277*a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
22781da177e4SLinus Torvalds 	unsigned block_in_page;
2279*a4b0672dSNick Piggin 	unsigned block_start, block_end;
22801da177e4SLinus Torvalds 	sector_t block_in_file;
22811da177e4SLinus Torvalds 	char *kaddr;
22821da177e4SLinus Torvalds 	int nr_reads = 0;
22831da177e4SLinus Torvalds 	int ret = 0;
22841da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
22851da177e4SLinus Torvalds 
2286*a4b0672dSNick Piggin 	if (page_has_buffers(page))
2287*a4b0672dSNick Piggin 		return block_prepare_write(page, from, to, get_block);
2288*a4b0672dSNick Piggin 
22891da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
22901da177e4SLinus Torvalds 		return 0;
22911da177e4SLinus Torvalds 
2292*a4b0672dSNick Piggin 	/*
2293*a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2294*a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2295*a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2296*a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2297*a4b0672dSNick Piggin 	 *
2298*a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2299*a4b0672dSNick Piggin 	 * than the circular one we're used to.
2300*a4b0672dSNick Piggin 	 */
2301*a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
2302*a4b0672dSNick Piggin 	if (!head)
2303*a4b0672dSNick Piggin 		return -ENOMEM;
2304*a4b0672dSNick Piggin 
23051da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
23061da177e4SLinus Torvalds 
23071da177e4SLinus Torvalds 	/*
23081da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
23091da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
23101da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
23111da177e4SLinus Torvalds 	 */
2312*a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
23131da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2314*a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
23151da177e4SLinus Torvalds 		int create;
23161da177e4SLinus Torvalds 
2317*a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2318*a4b0672dSNick Piggin 		bh->b_state = 0;
23191da177e4SLinus Torvalds 		create = 1;
23201da177e4SLinus Torvalds 		if (block_start >= to)
23211da177e4SLinus Torvalds 			create = 0;
23221da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2323*a4b0672dSNick Piggin 					bh, create);
23241da177e4SLinus Torvalds 		if (ret)
23251da177e4SLinus Torvalds 			goto failed;
2326*a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
23271da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2328*a4b0672dSNick Piggin 		if (buffer_new(bh))
2329*a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2330*a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2331*a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
23321da177e4SLinus Torvalds 			continue;
2333*a4b0672dSNick Piggin 		}
2334*a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
23351da177e4SLinus Torvalds 			kaddr = kmap_atomic(page, KM_USER0);
233622c8ca78SNick Piggin 			if (block_start < from)
23371da177e4SLinus Torvalds 				memset(kaddr+block_start, 0, from-block_start);
233822c8ca78SNick Piggin 			if (block_end > to)
23391da177e4SLinus Torvalds 				memset(kaddr + to, 0, block_end - to);
23401da177e4SLinus Torvalds 			flush_dcache_page(page);
23411da177e4SLinus Torvalds 			kunmap_atomic(kaddr, KM_USER0);
23421da177e4SLinus Torvalds 			continue;
23431da177e4SLinus Torvalds 		}
2344*a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
23451da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
23461da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2347*a4b0672dSNick Piggin 			lock_buffer(bh);
2348*a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2349*a4b0672dSNick Piggin 			submit_bh(READ, bh);
2350*a4b0672dSNick Piggin 			nr_reads++;
23511da177e4SLinus Torvalds 		}
23521da177e4SLinus Torvalds 	}
23531da177e4SLinus Torvalds 
23541da177e4SLinus Torvalds 	if (nr_reads) {
23551da177e4SLinus Torvalds 		/*
23561da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
23571da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
23581da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
23591da177e4SLinus Torvalds 		 */
2360*a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
23611da177e4SLinus Torvalds 			wait_on_buffer(bh);
23621da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
23631da177e4SLinus Torvalds 				ret = -EIO;
23641da177e4SLinus Torvalds 		}
23651da177e4SLinus Torvalds 		if (ret)
23661da177e4SLinus Torvalds 			goto failed;
23671da177e4SLinus Torvalds 	}
23681da177e4SLinus Torvalds 
23691da177e4SLinus Torvalds 	if (is_mapped_to_disk)
23701da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
23711da177e4SLinus Torvalds 
2372*a4b0672dSNick Piggin 	do {
2373*a4b0672dSNick Piggin 		bh = head;
2374*a4b0672dSNick Piggin 		head = head->b_this_page;
2375*a4b0672dSNick Piggin 		free_buffer_head(bh);
2376*a4b0672dSNick Piggin 	} while (head);
2377*a4b0672dSNick Piggin 
23781da177e4SLinus Torvalds 	return 0;
23791da177e4SLinus Torvalds 
23801da177e4SLinus Torvalds failed:
23811da177e4SLinus Torvalds 	/*
2382*a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2383*a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2384*a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2385*a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2386*a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
23871da177e4SLinus Torvalds 	 */
2388*a4b0672dSNick Piggin 	spin_lock(&page->mapping->private_lock);
2389*a4b0672dSNick Piggin 	bh = head;
2390*a4b0672dSNick Piggin 	block_start = 0;
2391*a4b0672dSNick Piggin 	do {
2392*a4b0672dSNick Piggin 		if (PageUptodate(page))
2393*a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
2394*a4b0672dSNick Piggin 		if (PageDirty(page))
2395*a4b0672dSNick Piggin 			set_buffer_dirty(bh);
2396*a4b0672dSNick Piggin 
2397*a4b0672dSNick Piggin 		block_end = block_start+blocksize;
2398*a4b0672dSNick Piggin 		if (block_end <= from)
2399*a4b0672dSNick Piggin 			goto next;
2400*a4b0672dSNick Piggin 		if (block_start >= to)
2401*a4b0672dSNick Piggin 			goto next;
2402*a4b0672dSNick Piggin 
2403*a4b0672dSNick Piggin 		if (buffer_new(bh)) {
2404*a4b0672dSNick Piggin 			clear_buffer_new(bh);
2405*a4b0672dSNick Piggin 			if (!buffer_uptodate(bh)) {
2406*a4b0672dSNick Piggin 				zero_user_page(page, block_start, bh->b_size, KM_USER0);
2407*a4b0672dSNick Piggin 				set_buffer_uptodate(bh);
2408*a4b0672dSNick Piggin 			}
2409*a4b0672dSNick Piggin 			mark_buffer_dirty(bh);
2410*a4b0672dSNick Piggin 		}
2411*a4b0672dSNick Piggin next:
2412*a4b0672dSNick Piggin 		block_start = block_end;
2413*a4b0672dSNick Piggin 		if (!bh->b_this_page)
2414*a4b0672dSNick Piggin 			bh->b_this_page = head;
2415*a4b0672dSNick Piggin 		bh = bh->b_this_page;
2416*a4b0672dSNick Piggin 	} while (bh != head);
2417*a4b0672dSNick Piggin 	attach_page_buffers(page, head);
2418*a4b0672dSNick Piggin 	spin_unlock(&page->mapping->private_lock);
2419*a4b0672dSNick Piggin 
24201da177e4SLinus Torvalds 	return ret;
24211da177e4SLinus Torvalds }
24221da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_prepare_write);
24231da177e4SLinus Torvalds 
242457bf63d6SDave Kleikamp /*
242557bf63d6SDave Kleikamp  * Make sure any changes to nobh_commit_write() are reflected in
242657bf63d6SDave Kleikamp  * nobh_truncate_page(), since it doesn't call commit_write().
242757bf63d6SDave Kleikamp  */
24281da177e4SLinus Torvalds int nobh_commit_write(struct file *file, struct page *page,
24291da177e4SLinus Torvalds 		unsigned from, unsigned to)
24301da177e4SLinus Torvalds {
24311da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
24321da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
24331da177e4SLinus Torvalds 
2434*a4b0672dSNick Piggin 	if (page_has_buffers(page))
2435*a4b0672dSNick Piggin 		return generic_commit_write(file, page, from, to);
2436*a4b0672dSNick Piggin 
243722c8ca78SNick Piggin 	SetPageUptodate(page);
24381da177e4SLinus Torvalds 	set_page_dirty(page);
24391da177e4SLinus Torvalds 	if (pos > inode->i_size) {
24401da177e4SLinus Torvalds 		i_size_write(inode, pos);
24411da177e4SLinus Torvalds 		mark_inode_dirty(inode);
24421da177e4SLinus Torvalds 	}
24431da177e4SLinus Torvalds 	return 0;
24441da177e4SLinus Torvalds }
24451da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_commit_write);
24461da177e4SLinus Torvalds 
24471da177e4SLinus Torvalds /*
24481da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
24491da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
24501da177e4SLinus Torvalds  * the page.
24511da177e4SLinus Torvalds  */
24521da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
24531da177e4SLinus Torvalds 			struct writeback_control *wbc)
24541da177e4SLinus Torvalds {
24551da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
24561da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
24571da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
24581da177e4SLinus Torvalds 	unsigned offset;
24591da177e4SLinus Torvalds 	int ret;
24601da177e4SLinus Torvalds 
24611da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
24621da177e4SLinus Torvalds 	if (page->index < end_index)
24631da177e4SLinus Torvalds 		goto out;
24641da177e4SLinus Torvalds 
24651da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
24661da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
24671da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
24681da177e4SLinus Torvalds 		/*
24691da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
24701da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
24711da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
24721da177e4SLinus Torvalds 		 */
24731da177e4SLinus Torvalds #if 0
24741da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
24751da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
24761da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
24771da177e4SLinus Torvalds #endif
24781da177e4SLinus Torvalds 		unlock_page(page);
24791da177e4SLinus Torvalds 		return 0; /* don't care */
24801da177e4SLinus Torvalds 	}
24811da177e4SLinus Torvalds 
24821da177e4SLinus Torvalds 	/*
24831da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
24841da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
24851da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
24861da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
24871da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
24881da177e4SLinus Torvalds 	 */
248901f2705dSNate Diller 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
24901da177e4SLinus Torvalds out:
24911da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
24921da177e4SLinus Torvalds 	if (ret == -EAGAIN)
24931da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
24941da177e4SLinus Torvalds 	return ret;
24951da177e4SLinus Torvalds }
24961da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
24971da177e4SLinus Torvalds 
24981da177e4SLinus Torvalds /*
24991da177e4SLinus Torvalds  * This function assumes that ->prepare_write() uses nobh_prepare_write().
25001da177e4SLinus Torvalds  */
25011da177e4SLinus Torvalds int nobh_truncate_page(struct address_space *mapping, loff_t from)
25021da177e4SLinus Torvalds {
25031da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
25041da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
25051da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
25061da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
25071da177e4SLinus Torvalds 	unsigned to;
25081da177e4SLinus Torvalds 	struct page *page;
2509f5e54d6eSChristoph Hellwig 	const struct address_space_operations *a_ops = mapping->a_ops;
25101da177e4SLinus Torvalds 	int ret = 0;
25111da177e4SLinus Torvalds 
25121da177e4SLinus Torvalds 	if ((offset & (blocksize - 1)) == 0)
25131da177e4SLinus Torvalds 		goto out;
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds 	ret = -ENOMEM;
25161da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
25171da177e4SLinus Torvalds 	if (!page)
25181da177e4SLinus Torvalds 		goto out;
25191da177e4SLinus Torvalds 
25201da177e4SLinus Torvalds 	to = (offset + blocksize) & ~(blocksize - 1);
25211da177e4SLinus Torvalds 	ret = a_ops->prepare_write(NULL, page, offset, to);
25221da177e4SLinus Torvalds 	if (ret == 0) {
252301f2705dSNate Diller 		zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
252401f2705dSNate Diller 				KM_USER0);
252557bf63d6SDave Kleikamp 		/*
252657bf63d6SDave Kleikamp 		 * It would be more correct to call aops->commit_write()
252757bf63d6SDave Kleikamp 		 * here, but this is more efficient.
252857bf63d6SDave Kleikamp 		 */
252957bf63d6SDave Kleikamp 		SetPageUptodate(page);
25301da177e4SLinus Torvalds 		set_page_dirty(page);
25311da177e4SLinus Torvalds 	}
25321da177e4SLinus Torvalds 	unlock_page(page);
25331da177e4SLinus Torvalds 	page_cache_release(page);
25341da177e4SLinus Torvalds out:
25351da177e4SLinus Torvalds 	return ret;
25361da177e4SLinus Torvalds }
25371da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
25381da177e4SLinus Torvalds 
25391da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
25401da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
25411da177e4SLinus Torvalds {
25421da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
25431da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
25441da177e4SLinus Torvalds 	unsigned blocksize;
254554b21a79SAndrew Morton 	sector_t iblock;
25461da177e4SLinus Torvalds 	unsigned length, pos;
25471da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
25481da177e4SLinus Torvalds 	struct page *page;
25491da177e4SLinus Torvalds 	struct buffer_head *bh;
25501da177e4SLinus Torvalds 	int err;
25511da177e4SLinus Torvalds 
25521da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
25531da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
25561da177e4SLinus Torvalds 	if (!length)
25571da177e4SLinus Torvalds 		return 0;
25581da177e4SLinus Torvalds 
25591da177e4SLinus Torvalds 	length = blocksize - length;
256054b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
25611da177e4SLinus Torvalds 
25621da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
25631da177e4SLinus Torvalds 	err = -ENOMEM;
25641da177e4SLinus Torvalds 	if (!page)
25651da177e4SLinus Torvalds 		goto out;
25661da177e4SLinus Torvalds 
25671da177e4SLinus Torvalds 	if (!page_has_buffers(page))
25681da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
25691da177e4SLinus Torvalds 
25701da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
25711da177e4SLinus Torvalds 	bh = page_buffers(page);
25721da177e4SLinus Torvalds 	pos = blocksize;
25731da177e4SLinus Torvalds 	while (offset >= pos) {
25741da177e4SLinus Torvalds 		bh = bh->b_this_page;
25751da177e4SLinus Torvalds 		iblock++;
25761da177e4SLinus Torvalds 		pos += blocksize;
25771da177e4SLinus Torvalds 	}
25781da177e4SLinus Torvalds 
25791da177e4SLinus Torvalds 	err = 0;
25801da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2581b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
25821da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
25831da177e4SLinus Torvalds 		if (err)
25841da177e4SLinus Torvalds 			goto unlock;
25851da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
25861da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
25871da177e4SLinus Torvalds 			goto unlock;
25881da177e4SLinus Torvalds 	}
25891da177e4SLinus Torvalds 
25901da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
25911da177e4SLinus Torvalds 	if (PageUptodate(page))
25921da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
25931da177e4SLinus Torvalds 
259433a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
25951da177e4SLinus Torvalds 		err = -EIO;
25961da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
25971da177e4SLinus Torvalds 		wait_on_buffer(bh);
25981da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
25991da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
26001da177e4SLinus Torvalds 			goto unlock;
26011da177e4SLinus Torvalds 	}
26021da177e4SLinus Torvalds 
260301f2705dSNate Diller 	zero_user_page(page, offset, length, KM_USER0);
26041da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
26051da177e4SLinus Torvalds 	err = 0;
26061da177e4SLinus Torvalds 
26071da177e4SLinus Torvalds unlock:
26081da177e4SLinus Torvalds 	unlock_page(page);
26091da177e4SLinus Torvalds 	page_cache_release(page);
26101da177e4SLinus Torvalds out:
26111da177e4SLinus Torvalds 	return err;
26121da177e4SLinus Torvalds }
26131da177e4SLinus Torvalds 
26141da177e4SLinus Torvalds /*
26151da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
26161da177e4SLinus Torvalds  */
26171da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
26181da177e4SLinus Torvalds 			struct writeback_control *wbc)
26191da177e4SLinus Torvalds {
26201da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
26211da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
26221da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
26231da177e4SLinus Torvalds 	unsigned offset;
26241da177e4SLinus Torvalds 
26251da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
26261da177e4SLinus Torvalds 	if (page->index < end_index)
26271da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
26281da177e4SLinus Torvalds 
26291da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
26301da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
26311da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26321da177e4SLinus Torvalds 		/*
26331da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
26341da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
26351da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
26361da177e4SLinus Torvalds 		 */
2637aaa4059bSJan Kara 		do_invalidatepage(page, 0);
26381da177e4SLinus Torvalds 		unlock_page(page);
26391da177e4SLinus Torvalds 		return 0; /* don't care */
26401da177e4SLinus Torvalds 	}
26411da177e4SLinus Torvalds 
26421da177e4SLinus Torvalds 	/*
26431da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26441da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
26451da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26461da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26471da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26481da177e4SLinus Torvalds 	 */
264901f2705dSNate Diller 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
26501da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
26511da177e4SLinus Torvalds }
26521da177e4SLinus Torvalds 
26531da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
26541da177e4SLinus Torvalds 			    get_block_t *get_block)
26551da177e4SLinus Torvalds {
26561da177e4SLinus Torvalds 	struct buffer_head tmp;
26571da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26581da177e4SLinus Torvalds 	tmp.b_state = 0;
26591da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2660b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
26611da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
26621da177e4SLinus Torvalds 	return tmp.b_blocknr;
26631da177e4SLinus Torvalds }
26641da177e4SLinus Torvalds 
26656712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
26661da177e4SLinus Torvalds {
26671da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
26681da177e4SLinus Torvalds 
26691da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
26701da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
26711da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
26721da177e4SLinus Torvalds 	}
26731da177e4SLinus Torvalds 
26741da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
26751da177e4SLinus Torvalds 	bio_put(bio);
26761da177e4SLinus Torvalds }
26771da177e4SLinus Torvalds 
26781da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
26791da177e4SLinus Torvalds {
26801da177e4SLinus Torvalds 	struct bio *bio;
26811da177e4SLinus Torvalds 	int ret = 0;
26821da177e4SLinus Torvalds 
26831da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
26841da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
26851da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
26861da177e4SLinus Torvalds 
26871da177e4SLinus Torvalds 	if (buffer_ordered(bh) && (rw == WRITE))
26881da177e4SLinus Torvalds 		rw = WRITE_BARRIER;
26891da177e4SLinus Torvalds 
26901da177e4SLinus Torvalds 	/*
26911da177e4SLinus Torvalds 	 * Only clear out a write error when rewriting, should this
26921da177e4SLinus Torvalds 	 * include WRITE_SYNC as well?
26931da177e4SLinus Torvalds 	 */
26941da177e4SLinus Torvalds 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
26951da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
26961da177e4SLinus Torvalds 
26971da177e4SLinus Torvalds 	/*
26981da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
26991da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
27001da177e4SLinus Torvalds 	 */
27011da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
27021da177e4SLinus Torvalds 
27031da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
27041da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
27051da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
27061da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
27071da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
27081da177e4SLinus Torvalds 
27091da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
27101da177e4SLinus Torvalds 	bio->bi_idx = 0;
27111da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
27121da177e4SLinus Torvalds 
27131da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
27141da177e4SLinus Torvalds 	bio->bi_private = bh;
27151da177e4SLinus Torvalds 
27161da177e4SLinus Torvalds 	bio_get(bio);
27171da177e4SLinus Torvalds 	submit_bio(rw, bio);
27181da177e4SLinus Torvalds 
27191da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
27201da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
27211da177e4SLinus Torvalds 
27221da177e4SLinus Torvalds 	bio_put(bio);
27231da177e4SLinus Torvalds 	return ret;
27241da177e4SLinus Torvalds }
27251da177e4SLinus Torvalds 
27261da177e4SLinus Torvalds /**
27271da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2728a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
27291da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
27301da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
27311da177e4SLinus Torvalds  *
2732a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2733a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2734a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2735a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2736a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
27371da177e4SLinus Torvalds  *
27381da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2739a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2740a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2741a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2742a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2743a7662236SJan Kara  * actually clean until the buffer gets unlocked).
27441da177e4SLinus Torvalds  *
27451da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
27461da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
27471da177e4SLinus Torvalds  * any waiters.
27481da177e4SLinus Torvalds  *
27491da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
27501da177e4SLinus Torvalds  * multiple of the current approved size for the device.
27511da177e4SLinus Torvalds  */
27521da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
27531da177e4SLinus Torvalds {
27541da177e4SLinus Torvalds 	int i;
27551da177e4SLinus Torvalds 
27561da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
27571da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
27581da177e4SLinus Torvalds 
2759a7662236SJan Kara 		if (rw == SWRITE)
2760a7662236SJan Kara 			lock_buffer(bh);
2761a7662236SJan Kara 		else if (test_set_buffer_locked(bh))
27621da177e4SLinus Torvalds 			continue;
27631da177e4SLinus Torvalds 
2764a7662236SJan Kara 		if (rw == WRITE || rw == SWRITE) {
27651da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
276676c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2767e60e5c50SOGAWA Hirofumi 				get_bh(bh);
27681da177e4SLinus Torvalds 				submit_bh(WRITE, bh);
27691da177e4SLinus Torvalds 				continue;
27701da177e4SLinus Torvalds 			}
27711da177e4SLinus Torvalds 		} else {
27721da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
277376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2774e60e5c50SOGAWA Hirofumi 				get_bh(bh);
27751da177e4SLinus Torvalds 				submit_bh(rw, bh);
27761da177e4SLinus Torvalds 				continue;
27771da177e4SLinus Torvalds 			}
27781da177e4SLinus Torvalds 		}
27791da177e4SLinus Torvalds 		unlock_buffer(bh);
27801da177e4SLinus Torvalds 	}
27811da177e4SLinus Torvalds }
27821da177e4SLinus Torvalds 
27831da177e4SLinus Torvalds /*
27841da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
27851da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
27861da177e4SLinus Torvalds  * the buffer_head.
27871da177e4SLinus Torvalds  */
27881da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
27891da177e4SLinus Torvalds {
27901da177e4SLinus Torvalds 	int ret = 0;
27911da177e4SLinus Torvalds 
27921da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
27931da177e4SLinus Torvalds 	lock_buffer(bh);
27941da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
27951da177e4SLinus Torvalds 		get_bh(bh);
27961da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
27971da177e4SLinus Torvalds 		ret = submit_bh(WRITE, bh);
27981da177e4SLinus Torvalds 		wait_on_buffer(bh);
27991da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
28001da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
28011da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
28021da177e4SLinus Torvalds 		}
28031da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
28041da177e4SLinus Torvalds 			ret = -EIO;
28051da177e4SLinus Torvalds 	} else {
28061da177e4SLinus Torvalds 		unlock_buffer(bh);
28071da177e4SLinus Torvalds 	}
28081da177e4SLinus Torvalds 	return ret;
28091da177e4SLinus Torvalds }
28101da177e4SLinus Torvalds 
28111da177e4SLinus Torvalds /*
28121da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
28131da177e4SLinus Torvalds  * are unused, and releases them if so.
28141da177e4SLinus Torvalds  *
28151da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
28161da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
28171da177e4SLinus Torvalds  *
28181da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
28191da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
28201da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
28211da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
28221da177e4SLinus Torvalds  * filesystem data on the same device.
28231da177e4SLinus Torvalds  *
28241da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
28251da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
28261da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
28271da177e4SLinus Torvalds  * private_lock.
28281da177e4SLinus Torvalds  *
28291da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
28301da177e4SLinus Torvalds  */
28311da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
28321da177e4SLinus Torvalds {
28331da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
28341da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28351da177e4SLinus Torvalds }
28361da177e4SLinus Torvalds 
28371da177e4SLinus Torvalds static int
28381da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
28391da177e4SLinus Torvalds {
28401da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
28411da177e4SLinus Torvalds 	struct buffer_head *bh;
28421da177e4SLinus Torvalds 
28431da177e4SLinus Torvalds 	bh = head;
28441da177e4SLinus Torvalds 	do {
2845de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
28461da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
28471da177e4SLinus Torvalds 		if (buffer_busy(bh))
28481da177e4SLinus Torvalds 			goto failed;
28491da177e4SLinus Torvalds 		bh = bh->b_this_page;
28501da177e4SLinus Torvalds 	} while (bh != head);
28511da177e4SLinus Torvalds 
28521da177e4SLinus Torvalds 	do {
28531da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
28541da177e4SLinus Torvalds 
28551da177e4SLinus Torvalds 		if (!list_empty(&bh->b_assoc_buffers))
28561da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
28571da177e4SLinus Torvalds 		bh = next;
28581da177e4SLinus Torvalds 	} while (bh != head);
28591da177e4SLinus Torvalds 	*buffers_to_free = head;
28601da177e4SLinus Torvalds 	__clear_page_buffers(page);
28611da177e4SLinus Torvalds 	return 1;
28621da177e4SLinus Torvalds failed:
28631da177e4SLinus Torvalds 	return 0;
28641da177e4SLinus Torvalds }
28651da177e4SLinus Torvalds 
28661da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
28671da177e4SLinus Torvalds {
28681da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
28691da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
28701da177e4SLinus Torvalds 	int ret = 0;
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
2873ecdfc978SLinus Torvalds 	if (PageWriteback(page))
28741da177e4SLinus Torvalds 		return 0;
28751da177e4SLinus Torvalds 
28761da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
28771da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
28781da177e4SLinus Torvalds 		goto out;
28791da177e4SLinus Torvalds 	}
28801da177e4SLinus Torvalds 
28811da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
28821da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
2883ecdfc978SLinus Torvalds 
2884ecdfc978SLinus Torvalds 	/*
2885ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
2886ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
2887ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
2888ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2889ecdfc978SLinus Torvalds 	 *
2890ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
2891ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
2892ecdfc978SLinus Torvalds 	 * the page also.
289387df7241SNick Piggin 	 *
289487df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
289587df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
289687df7241SNick Piggin 	 * dirty bit from being lost.
2897ecdfc978SLinus Torvalds 	 */
2898ecdfc978SLinus Torvalds 	if (ret)
2899ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
290087df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
29011da177e4SLinus Torvalds out:
29021da177e4SLinus Torvalds 	if (buffers_to_free) {
29031da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
29041da177e4SLinus Torvalds 
29051da177e4SLinus Torvalds 		do {
29061da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
29071da177e4SLinus Torvalds 			free_buffer_head(bh);
29081da177e4SLinus Torvalds 			bh = next;
29091da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
29101da177e4SLinus Torvalds 	}
29111da177e4SLinus Torvalds 	return ret;
29121da177e4SLinus Torvalds }
29131da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29141da177e4SLinus Torvalds 
29153978d717SNeilBrown void block_sync_page(struct page *page)
29161da177e4SLinus Torvalds {
29171da177e4SLinus Torvalds 	struct address_space *mapping;
29181da177e4SLinus Torvalds 
29191da177e4SLinus Torvalds 	smp_mb();
29201da177e4SLinus Torvalds 	mapping = page_mapping(page);
29211da177e4SLinus Torvalds 	if (mapping)
29221da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
29231da177e4SLinus Torvalds }
29241da177e4SLinus Torvalds 
29251da177e4SLinus Torvalds /*
29261da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
29271da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
29281da177e4SLinus Torvalds  *
29291da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
29301da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
29311da177e4SLinus Torvalds  */
29321da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data)
29331da177e4SLinus Torvalds {
29341da177e4SLinus Torvalds 	static int msg_count;
29351da177e4SLinus Torvalds 
29361da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
29371da177e4SLinus Torvalds 		return -EPERM;
29381da177e4SLinus Torvalds 
29391da177e4SLinus Torvalds 	if (msg_count < 5) {
29401da177e4SLinus Torvalds 		msg_count++;
29411da177e4SLinus Torvalds 		printk(KERN_INFO
29421da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
29431da177e4SLinus Torvalds 			" system call\n", current->comm);
29441da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
29451da177e4SLinus Torvalds 	}
29461da177e4SLinus Torvalds 
29471da177e4SLinus Torvalds 	if (func == 1)
29481da177e4SLinus Torvalds 		do_exit(0);
29491da177e4SLinus Torvalds 	return 0;
29501da177e4SLinus Torvalds }
29511da177e4SLinus Torvalds 
29521da177e4SLinus Torvalds /*
29531da177e4SLinus Torvalds  * Buffer-head allocation
29541da177e4SLinus Torvalds  */
2955e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
29561da177e4SLinus Torvalds 
29571da177e4SLinus Torvalds /*
29581da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
29591da177e4SLinus Torvalds  * stripping them in writeback.
29601da177e4SLinus Torvalds  */
29611da177e4SLinus Torvalds static int max_buffer_heads;
29621da177e4SLinus Torvalds 
29631da177e4SLinus Torvalds int buffer_heads_over_limit;
29641da177e4SLinus Torvalds 
29651da177e4SLinus Torvalds struct bh_accounting {
29661da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
29671da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
29681da177e4SLinus Torvalds };
29691da177e4SLinus Torvalds 
29701da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
29711da177e4SLinus Torvalds 
29721da177e4SLinus Torvalds static void recalc_bh_state(void)
29731da177e4SLinus Torvalds {
29741da177e4SLinus Torvalds 	int i;
29751da177e4SLinus Torvalds 	int tot = 0;
29761da177e4SLinus Torvalds 
29771da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
29781da177e4SLinus Torvalds 		return;
29791da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
29808a143426SEric Dumazet 	for_each_online_cpu(i)
29811da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
29821da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
29831da177e4SLinus Torvalds }
29841da177e4SLinus Torvalds 
2985dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
29861da177e4SLinus Torvalds {
2987a35afb83SChristoph Lameter 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
29881da177e4SLinus Torvalds 	if (ret) {
2989a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2990736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
29911da177e4SLinus Torvalds 		recalc_bh_state();
2992736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
29931da177e4SLinus Torvalds 	}
29941da177e4SLinus Torvalds 	return ret;
29951da177e4SLinus Torvalds }
29961da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
29971da177e4SLinus Torvalds 
29981da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
29991da177e4SLinus Torvalds {
30001da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
30011da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3002736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
30031da177e4SLinus Torvalds 	recalc_bh_state();
3004736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
30051da177e4SLinus Torvalds }
30061da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
30071da177e4SLinus Torvalds 
30081da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
30091da177e4SLinus Torvalds {
30101da177e4SLinus Torvalds 	int i;
30111da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30121da177e4SLinus Torvalds 
30131da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
30141da177e4SLinus Torvalds 		brelse(b->bhs[i]);
30151da177e4SLinus Torvalds 		b->bhs[i] = NULL;
30161da177e4SLinus Torvalds 	}
30178a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
30188a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
30198a143426SEric Dumazet 	put_cpu_var(bh_accounting);
30201da177e4SLinus Torvalds }
30211da177e4SLinus Torvalds 
30221da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
30231da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
30241da177e4SLinus Torvalds {
30258bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
30261da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
30271da177e4SLinus Torvalds 	return NOTIFY_OK;
30281da177e4SLinus Torvalds }
30291da177e4SLinus Torvalds 
30301da177e4SLinus Torvalds void __init buffer_init(void)
30311da177e4SLinus Torvalds {
30321da177e4SLinus Torvalds 	int nrpages;
30331da177e4SLinus Torvalds 
3034a35afb83SChristoph Lameter 	bh_cachep = KMEM_CACHE(buffer_head,
3035a35afb83SChristoph Lameter 			SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
30361da177e4SLinus Torvalds 
30371da177e4SLinus Torvalds 	/*
30381da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
30391da177e4SLinus Torvalds 	 */
30401da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
30411da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
30421da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
30431da177e4SLinus Torvalds }
30441da177e4SLinus Torvalds 
30451da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
30461da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
30471da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
30481da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
30491da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
305054171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite);
30511da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
30521da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
30531da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
30541da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
30551da177e4SLinus Torvalds EXPORT_SYMBOL(cont_prepare_write);
30561da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
30571da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
30581da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
30591da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
30601da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
30611da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write);
30621da177e4SLinus Torvalds EXPORT_SYMBOL(generic_cont_expand);
306305eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
30641da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
30651da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
30661da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
30671da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
30681da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
30691da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
30701da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3071