xref: /linux/fs/buffer.c (revision fc9b52cd8f5f459b88adcf67c47668425ae31a78)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
70*fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
77*fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7972ed3d03SNick Piggin 	smp_mb__before_clear_bit();
801da177e4SLinus Torvalds 	clear_buffer_locked(bh);
811da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
821da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /*
861da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
871da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
881da177e4SLinus Torvalds  * if you want to preserve its state.
891da177e4SLinus Torvalds  */
901da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
911da177e4SLinus Torvalds {
921da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
931da177e4SLinus Torvalds }
941da177e4SLinus Torvalds 
951da177e4SLinus Torvalds static void
961da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
971da177e4SLinus Torvalds {
981da177e4SLinus Torvalds 	ClearPagePrivate(page);
994c21e2f2SHugh Dickins 	set_page_private(page, 0);
1001da177e4SLinus Torvalds 	page_cache_release(page);
1011da177e4SLinus Torvalds }
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1041da177e4SLinus Torvalds {
1051da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1081da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1091da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1101da177e4SLinus Torvalds }
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds /*
11368671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
11468671f35SDmitry Monakhov  * unlocking it.
11568671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
11668671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
11768671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
11868671f35SDmitry Monakhov  * itself.
1191da177e4SLinus Torvalds  */
12068671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1211da177e4SLinus Torvalds {
1221da177e4SLinus Torvalds 	if (uptodate) {
1231da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1241da177e4SLinus Torvalds 	} else {
1251da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1261da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1271da177e4SLinus Torvalds 	}
1281da177e4SLinus Torvalds 	unlock_buffer(bh);
12968671f35SDmitry Monakhov }
13068671f35SDmitry Monakhov 
13168671f35SDmitry Monakhov /*
13268671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
13368671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
13468671f35SDmitry Monakhov  */
13568671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
13668671f35SDmitry Monakhov {
13768671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1381da177e4SLinus Torvalds 	put_bh(bh);
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1421da177e4SLinus Torvalds {
1431da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1441da177e4SLinus Torvalds 
1451da177e4SLinus Torvalds 	if (uptodate) {
1461da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds 	} else {
1481da177e4SLinus Torvalds 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1491da177e4SLinus Torvalds 			buffer_io_error(bh);
1501da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1511da177e4SLinus Torvalds 					"I/O error on %s\n",
1521da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1531da177e4SLinus Torvalds 		}
1541da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1551da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1561da177e4SLinus Torvalds 	}
1571da177e4SLinus Torvalds 	unlock_buffer(bh);
1581da177e4SLinus Torvalds 	put_bh(bh);
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds /*
1621da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1631da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1641da177e4SLinus Torvalds  */
1651da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds 	int ret = 0;
1681da177e4SLinus Torvalds 
16928fd1298SOGAWA Hirofumi 	if (bdev)
17028fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1711da177e4SLinus Torvalds 	return ret;
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds /*
1761da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1771da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1781da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1791da177e4SLinus Torvalds  */
1801da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1811da177e4SLinus Torvalds {
1821da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1831da177e4SLinus Torvalds 	if (sb) {
1841da177e4SLinus Torvalds 		int res = fsync_super(sb);
1851da177e4SLinus Torvalds 		drop_super(sb);
1861da177e4SLinus Torvalds 		return res;
1871da177e4SLinus Torvalds 	}
1881da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1891da177e4SLinus Torvalds }
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds /**
1921da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
1931da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
1941da177e4SLinus Torvalds  *
195f73ca1b7SDavid Chinner  * This takes the block device bd_mount_sem to make sure no new mounts
1961da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
1971da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
1981da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
1991da177e4SLinus Torvalds  */
2001da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
2011da177e4SLinus Torvalds {
2021da177e4SLinus Torvalds 	struct super_block *sb;
2031da177e4SLinus Torvalds 
204f73ca1b7SDavid Chinner 	down(&bdev->bd_mount_sem);
2051da177e4SLinus Torvalds 	sb = get_super(bdev);
2061da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
2071da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
208d59dd462Sakpm@osdl.org 		smp_wmb();
2091da177e4SLinus Torvalds 
210d25b9a1fSOGAWA Hirofumi 		__fsync_super(sb);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
213d59dd462Sakpm@osdl.org 		smp_wmb();
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 		if (sb->s_op->write_super_lockfs)
2181da177e4SLinus Torvalds 			sb->s_op->write_super_lockfs(sb);
2191da177e4SLinus Torvalds 	}
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	sync_blockdev(bdev);
2221da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2231da177e4SLinus Torvalds }
2241da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds /**
2271da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2281da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2291da177e4SLinus Torvalds  * @sb:		associated superblock
2301da177e4SLinus Torvalds  *
2311da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2321da177e4SLinus Torvalds  */
2331da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb)
2341da177e4SLinus Torvalds {
2351da177e4SLinus Torvalds 	if (sb) {
2361da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds 		if (sb->s_op->unlockfs)
2391da177e4SLinus Torvalds 			sb->s_op->unlockfs(sb);
2401da177e4SLinus Torvalds 		sb->s_frozen = SB_UNFROZEN;
241d59dd462Sakpm@osdl.org 		smp_wmb();
2421da177e4SLinus Torvalds 		wake_up(&sb->s_wait_unfrozen);
2431da177e4SLinus Torvalds 		drop_super(sb);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 
246f73ca1b7SDavid Chinner 	up(&bdev->bd_mount_sem);
2471da177e4SLinus Torvalds }
2481da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /*
2511da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
2521da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
2531da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
2541da177e4SLinus Torvalds  * private_lock.
2551da177e4SLinus Torvalds  *
2561da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
2571da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
2581da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
2591da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
2601da177e4SLinus Torvalds  */
2611da177e4SLinus Torvalds static struct buffer_head *
262385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
2651da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
2661da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
2671da177e4SLinus Torvalds 	pgoff_t index;
2681da177e4SLinus Torvalds 	struct buffer_head *bh;
2691da177e4SLinus Torvalds 	struct buffer_head *head;
2701da177e4SLinus Torvalds 	struct page *page;
2711da177e4SLinus Torvalds 	int all_mapped = 1;
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
2741da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
2751da177e4SLinus Torvalds 	if (!page)
2761da177e4SLinus Torvalds 		goto out;
2771da177e4SLinus Torvalds 
2781da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2791da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2801da177e4SLinus Torvalds 		goto out_unlock;
2811da177e4SLinus Torvalds 	head = page_buffers(page);
2821da177e4SLinus Torvalds 	bh = head;
2831da177e4SLinus Torvalds 	do {
2841da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
2851da177e4SLinus Torvalds 			ret = bh;
2861da177e4SLinus Torvalds 			get_bh(bh);
2871da177e4SLinus Torvalds 			goto out_unlock;
2881da177e4SLinus Torvalds 		}
2891da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
2901da177e4SLinus Torvalds 			all_mapped = 0;
2911da177e4SLinus Torvalds 		bh = bh->b_this_page;
2921da177e4SLinus Torvalds 	} while (bh != head);
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2951da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2961da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2971da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2981da177e4SLinus Torvalds 	 */
2991da177e4SLinus Torvalds 	if (all_mapped) {
3001da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
3011da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
302205f87f6SBadari Pulavarty 			(unsigned long long)block,
303205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
304205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
305205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
3061da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
3071da177e4SLinus Torvalds 	}
3081da177e4SLinus Torvalds out_unlock:
3091da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
3101da177e4SLinus Torvalds 	page_cache_release(page);
3111da177e4SLinus Torvalds out:
3121da177e4SLinus Torvalds 	return ret;
3131da177e4SLinus Torvalds }
3141da177e4SLinus Torvalds 
3151da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
3161da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
3171da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
3181da177e4SLinus Torvalds    by the user.
3191da177e4SLinus Torvalds 
3201da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
3211da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
3221da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
3231da177e4SLinus Torvalds 
3241da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
3251da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
3281da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
3291da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
3301da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
3311da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
3321da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
3331da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
3341da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
3351da177e4SLinus Torvalds 
3361da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
3371da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
3381da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
3411da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
3421da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
3431da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
3441da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
3451da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
3461da177e4SLinus Torvalds    pass does the actual I/O. */
347f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
3481da177e4SLinus Torvalds {
3490e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3500e1dfc66SAndrew Morton 
3510e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
3520e1dfc66SAndrew Morton 		return;
3530e1dfc66SAndrew Morton 
3541da177e4SLinus Torvalds 	invalidate_bh_lrus();
355fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
3561da177e4SLinus Torvalds }
3571da177e4SLinus Torvalds 
3581da177e4SLinus Torvalds /*
3591da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
3601da177e4SLinus Torvalds  */
3611da177e4SLinus Torvalds static void free_more_memory(void)
3621da177e4SLinus Torvalds {
3631da177e4SLinus Torvalds 	struct zone **zones;
3641da177e4SLinus Torvalds 	pg_data_t *pgdat;
3651da177e4SLinus Torvalds 
366687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
3671da177e4SLinus Torvalds 	yield();
3681da177e4SLinus Torvalds 
369ec936fc5SKAMEZAWA Hiroyuki 	for_each_online_pgdat(pgdat) {
370af4ca457SAl Viro 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
3711da177e4SLinus Torvalds 		if (*zones)
3725ad333ebSAndy Whitcroft 			try_to_free_pages(zones, 0, GFP_NOFS);
3731da177e4SLinus Torvalds 	}
3741da177e4SLinus Torvalds }
3751da177e4SLinus Torvalds 
3761da177e4SLinus Torvalds /*
3771da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
3781da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3791da177e4SLinus Torvalds  */
3801da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3811da177e4SLinus Torvalds {
3821da177e4SLinus Torvalds 	unsigned long flags;
383a3972203SNick Piggin 	struct buffer_head *first;
3841da177e4SLinus Torvalds 	struct buffer_head *tmp;
3851da177e4SLinus Torvalds 	struct page *page;
3861da177e4SLinus Torvalds 	int page_uptodate = 1;
3871da177e4SLinus Torvalds 
3881da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3891da177e4SLinus Torvalds 
3901da177e4SLinus Torvalds 	page = bh->b_page;
3911da177e4SLinus Torvalds 	if (uptodate) {
3921da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3931da177e4SLinus Torvalds 	} else {
3941da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3951da177e4SLinus Torvalds 		if (printk_ratelimit())
3961da177e4SLinus Torvalds 			buffer_io_error(bh);
3971da177e4SLinus Torvalds 		SetPageError(page);
3981da177e4SLinus Torvalds 	}
3991da177e4SLinus Torvalds 
4001da177e4SLinus Torvalds 	/*
4011da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
4021da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
4031da177e4SLinus Torvalds 	 * decide that the page is now completely done.
4041da177e4SLinus Torvalds 	 */
405a3972203SNick Piggin 	first = page_buffers(page);
406a3972203SNick Piggin 	local_irq_save(flags);
407a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
4081da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
4091da177e4SLinus Torvalds 	unlock_buffer(bh);
4101da177e4SLinus Torvalds 	tmp = bh;
4111da177e4SLinus Torvalds 	do {
4121da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
4131da177e4SLinus Torvalds 			page_uptodate = 0;
4141da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
4151da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4161da177e4SLinus Torvalds 			goto still_busy;
4171da177e4SLinus Torvalds 		}
4181da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4191da177e4SLinus Torvalds 	} while (tmp != bh);
420a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421a3972203SNick Piggin 	local_irq_restore(flags);
4221da177e4SLinus Torvalds 
4231da177e4SLinus Torvalds 	/*
4241da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
4251da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
4261da177e4SLinus Torvalds 	 */
4271da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
4281da177e4SLinus Torvalds 		SetPageUptodate(page);
4291da177e4SLinus Torvalds 	unlock_page(page);
4301da177e4SLinus Torvalds 	return;
4311da177e4SLinus Torvalds 
4321da177e4SLinus Torvalds still_busy:
433a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434a3972203SNick Piggin 	local_irq_restore(flags);
4351da177e4SLinus Torvalds 	return;
4361da177e4SLinus Torvalds }
4371da177e4SLinus Torvalds 
4381da177e4SLinus Torvalds /*
4391da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
4401da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
4411da177e4SLinus Torvalds  */
442b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
4431da177e4SLinus Torvalds {
4441da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
4451da177e4SLinus Torvalds 	unsigned long flags;
446a3972203SNick Piggin 	struct buffer_head *first;
4471da177e4SLinus Torvalds 	struct buffer_head *tmp;
4481da177e4SLinus Torvalds 	struct page *page;
4491da177e4SLinus Torvalds 
4501da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
4511da177e4SLinus Torvalds 
4521da177e4SLinus Torvalds 	page = bh->b_page;
4531da177e4SLinus Torvalds 	if (uptodate) {
4541da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
4551da177e4SLinus Torvalds 	} else {
4561da177e4SLinus Torvalds 		if (printk_ratelimit()) {
4571da177e4SLinus Torvalds 			buffer_io_error(bh);
4581da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
4591da177e4SLinus Torvalds 					"I/O error on %s\n",
4601da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
4611da177e4SLinus Torvalds 		}
4621da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
46358ff407bSJan Kara 		set_buffer_write_io_error(bh);
4641da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
4651da177e4SLinus Torvalds 		SetPageError(page);
4661da177e4SLinus Torvalds 	}
4671da177e4SLinus Torvalds 
468a3972203SNick Piggin 	first = page_buffers(page);
469a3972203SNick Piggin 	local_irq_save(flags);
470a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471a3972203SNick Piggin 
4721da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4731da177e4SLinus Torvalds 	unlock_buffer(bh);
4741da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4751da177e4SLinus Torvalds 	while (tmp != bh) {
4761da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4771da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4781da177e4SLinus Torvalds 			goto still_busy;
4791da177e4SLinus Torvalds 		}
4801da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4811da177e4SLinus Torvalds 	}
482a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483a3972203SNick Piggin 	local_irq_restore(flags);
4841da177e4SLinus Torvalds 	end_page_writeback(page);
4851da177e4SLinus Torvalds 	return;
4861da177e4SLinus Torvalds 
4871da177e4SLinus Torvalds still_busy:
488a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489a3972203SNick Piggin 	local_irq_restore(flags);
4901da177e4SLinus Torvalds 	return;
4911da177e4SLinus Torvalds }
4921da177e4SLinus Torvalds 
4931da177e4SLinus Torvalds /*
4941da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4951da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4961da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4971da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4981da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4991da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
5001da177e4SLinus Torvalds  * that this buffer is not under async I/O.
5011da177e4SLinus Torvalds  *
5021da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
5031da177e4SLinus Torvalds  * left.
5041da177e4SLinus Torvalds  *
5051da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
5061da177e4SLinus Torvalds  * the buffers.
5071da177e4SLinus Torvalds  *
5081da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
5091da177e4SLinus Torvalds  * page.
5101da177e4SLinus Torvalds  *
5111da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
5121da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
5131da177e4SLinus Torvalds  */
5141da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
5151da177e4SLinus Torvalds {
5161da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
5171da177e4SLinus Torvalds 	set_buffer_async_read(bh);
5181da177e4SLinus Torvalds }
5191da177e4SLinus Torvalds 
5201da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
5211da177e4SLinus Torvalds {
5221da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
5231da177e4SLinus Torvalds 	set_buffer_async_write(bh);
5241da177e4SLinus Torvalds }
5251da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds 
5281da177e4SLinus Torvalds /*
5291da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
5301da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
5311da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
5321da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
5331da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
5341da177e4SLinus Torvalds  *
5351da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
5361da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
5371da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
5381da177e4SLinus Torvalds  *
5391da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
5401da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
5411da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
5421da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
5431da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
5441da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
5451da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
5461da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
5471da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
5481da177e4SLinus Torvalds  * ->private_lock.
5491da177e4SLinus Torvalds  *
5501da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
5511da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
5521da177e4SLinus Torvalds  *
5531da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
5541da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
5551da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
5561da177e4SLinus Torvalds  * be true at clear_inode() time.
5571da177e4SLinus Torvalds  *
5581da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
5591da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
5601da177e4SLinus Torvalds  * BUG_ON(!list_empty).
5611da177e4SLinus Torvalds  *
5621da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
5631da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
5641da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5651da177e4SLinus Torvalds  * queued up.
5661da177e4SLinus Torvalds  *
5671da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5681da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5691da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5701da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5711da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5721da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5731da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5741da177e4SLinus Torvalds  * b_inode back.
5751da177e4SLinus Torvalds  */
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds /*
5781da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5791da177e4SLinus Torvalds  */
5801da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh)
5811da177e4SLinus Torvalds {
5821da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
58358ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
58458ff407bSJan Kara 	if (buffer_write_io_error(bh))
58558ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
58658ff407bSJan Kara 	bh->b_assoc_map = NULL;
5871da177e4SLinus Torvalds }
5881da177e4SLinus Torvalds 
5891da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5901da177e4SLinus Torvalds {
5911da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5921da177e4SLinus Torvalds }
5931da177e4SLinus Torvalds 
5941da177e4SLinus Torvalds /*
5951da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5961da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5971da177e4SLinus Torvalds  * writes to the disk.
5981da177e4SLinus Torvalds  *
5991da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
6001da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
6011da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
6021da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
6031da177e4SLinus Torvalds  */
6041da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
6051da177e4SLinus Torvalds {
6061da177e4SLinus Torvalds 	struct buffer_head *bh;
6071da177e4SLinus Torvalds 	struct list_head *p;
6081da177e4SLinus Torvalds 	int err = 0;
6091da177e4SLinus Torvalds 
6101da177e4SLinus Torvalds 	spin_lock(lock);
6111da177e4SLinus Torvalds repeat:
6121da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
6131da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
6141da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
6151da177e4SLinus Torvalds 			get_bh(bh);
6161da177e4SLinus Torvalds 			spin_unlock(lock);
6171da177e4SLinus Torvalds 			wait_on_buffer(bh);
6181da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
6191da177e4SLinus Torvalds 				err = -EIO;
6201da177e4SLinus Torvalds 			brelse(bh);
6211da177e4SLinus Torvalds 			spin_lock(lock);
6221da177e4SLinus Torvalds 			goto repeat;
6231da177e4SLinus Torvalds 		}
6241da177e4SLinus Torvalds 	}
6251da177e4SLinus Torvalds 	spin_unlock(lock);
6261da177e4SLinus Torvalds 	return err;
6271da177e4SLinus Torvalds }
6281da177e4SLinus Torvalds 
6291da177e4SLinus Torvalds /**
6301da177e4SLinus Torvalds  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
6311da177e4SLinus Torvalds  *                        buffers
63267be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
6331da177e4SLinus Torvalds  *
6341da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6351da177e4SLinus Torvalds  * that I/O.
6361da177e4SLinus Torvalds  *
63767be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
63867be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
63967be2dd1SMartin Waitz  * a successful fsync().
6401da177e4SLinus Torvalds  */
6411da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6421da177e4SLinus Torvalds {
6431da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6441da177e4SLinus Torvalds 
6451da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6461da177e4SLinus Torvalds 		return 0;
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6491da177e4SLinus Torvalds 					&mapping->private_list);
6501da177e4SLinus Torvalds }
6511da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6521da177e4SLinus Torvalds 
6531da177e4SLinus Torvalds /*
6541da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6551da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6561da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6571da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6581da177e4SLinus Torvalds  */
6591da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6601da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6611da177e4SLinus Torvalds {
6621da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6631da177e4SLinus Torvalds 	if (bh) {
6641da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6651da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6661da177e4SLinus Torvalds 		put_bh(bh);
6671da177e4SLinus Torvalds 	}
6681da177e4SLinus Torvalds }
6691da177e4SLinus Torvalds 
6701da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6731da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6741da177e4SLinus Torvalds 
6751da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6761da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6771da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6781da177e4SLinus Torvalds 	} else {
679e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6801da177e4SLinus Torvalds 	}
6811da177e4SLinus Torvalds 	if (list_empty(&bh->b_assoc_buffers)) {
6821da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6831da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6841da177e4SLinus Torvalds 				&mapping->private_list);
68558ff407bSJan Kara 		bh->b_assoc_map = mapping;
6861da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6871da177e4SLinus Torvalds 	}
6881da177e4SLinus Torvalds }
6891da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6901da177e4SLinus Torvalds 
6911da177e4SLinus Torvalds /*
692787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693787d2214SNick Piggin  * dirty.
694787d2214SNick Piggin  *
695787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
696787d2214SNick Piggin  * not been truncated.
697787d2214SNick Piggin  */
698787d2214SNick Piggin static int __set_page_dirty(struct page *page,
699787d2214SNick Piggin 		struct address_space *mapping, int warn)
700787d2214SNick Piggin {
701787d2214SNick Piggin 	if (unlikely(!mapping))
702787d2214SNick Piggin 		return !TestSetPageDirty(page);
703787d2214SNick Piggin 
704787d2214SNick Piggin 	if (TestSetPageDirty(page))
705787d2214SNick Piggin 		return 0;
706787d2214SNick Piggin 
707787d2214SNick Piggin 	write_lock_irq(&mapping->tree_lock);
708787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
709787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
710787d2214SNick Piggin 
711787d2214SNick Piggin 		if (mapping_cap_account_dirty(mapping)) {
712787d2214SNick Piggin 			__inc_zone_page_state(page, NR_FILE_DIRTY);
713c9e51e41SPeter Zijlstra 			__inc_bdi_stat(mapping->backing_dev_info,
714c9e51e41SPeter Zijlstra 					BDI_RECLAIMABLE);
715787d2214SNick Piggin 			task_io_account_write(PAGE_CACHE_SIZE);
716787d2214SNick Piggin 		}
717787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
718787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
719787d2214SNick Piggin 	}
720787d2214SNick Piggin 	write_unlock_irq(&mapping->tree_lock);
721787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
722787d2214SNick Piggin 
723787d2214SNick Piggin 	return 1;
724787d2214SNick Piggin }
725787d2214SNick Piggin 
726787d2214SNick Piggin /*
7271da177e4SLinus Torvalds  * Add a page to the dirty page list.
7281da177e4SLinus Torvalds  *
7291da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
7301da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
7311da177e4SLinus Torvalds  *
7321da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
7331da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
7341da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
7351da177e4SLinus Torvalds  * dirty.
7361da177e4SLinus Torvalds  *
7371da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
7381da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
7391da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
7401da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
7411da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
7421da177e4SLinus Torvalds  * page on the dirty page list.
7431da177e4SLinus Torvalds  *
7441da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
7451da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
7461da177e4SLinus Torvalds  * added to the page after it was set dirty.
7471da177e4SLinus Torvalds  *
7481da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7491da177e4SLinus Torvalds  * address_space though.
7501da177e4SLinus Torvalds  */
7511da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7521da177e4SLinus Torvalds {
753787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
754ebf7a227SNick Piggin 
755ebf7a227SNick Piggin 	if (unlikely(!mapping))
756ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7571da177e4SLinus Torvalds 
7581da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7591da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7601da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7611da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7621da177e4SLinus Torvalds 
7631da177e4SLinus Torvalds 		do {
7641da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7651da177e4SLinus Torvalds 			bh = bh->b_this_page;
7661da177e4SLinus Torvalds 		} while (bh != head);
7671da177e4SLinus Torvalds 	}
7681da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7691da177e4SLinus Torvalds 
770787d2214SNick Piggin 	return __set_page_dirty(page, mapping, 1);
7711da177e4SLinus Torvalds }
7721da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7731da177e4SLinus Torvalds 
7741da177e4SLinus Torvalds /*
7751da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7761da177e4SLinus Torvalds  *
7771da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7781da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7791da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7801da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7811da177e4SLinus Torvalds  *
7821da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7831da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7841da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7851da177e4SLinus Torvalds  *
7861da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7871da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7881da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7891da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7901da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7911da177e4SLinus Torvalds  * any newly dirty buffers for write.
7921da177e4SLinus Torvalds  */
7931da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7941da177e4SLinus Torvalds {
7951da177e4SLinus Torvalds 	struct buffer_head *bh;
7961da177e4SLinus Torvalds 	struct list_head tmp;
7971da177e4SLinus Torvalds 	int err = 0, err2;
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
8001da177e4SLinus Torvalds 
8011da177e4SLinus Torvalds 	spin_lock(lock);
8021da177e4SLinus Torvalds 	while (!list_empty(list)) {
8031da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
80458ff407bSJan Kara 		__remove_assoc_queue(bh);
8051da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
8061da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
8071da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8081da177e4SLinus Torvalds 				get_bh(bh);
8091da177e4SLinus Torvalds 				spin_unlock(lock);
8101da177e4SLinus Torvalds 				/*
8111da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
8121da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
8131da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
8141da177e4SLinus Torvalds 				 * flight on potentially older contents.
8151da177e4SLinus Torvalds 				 */
816a7662236SJan Kara 				ll_rw_block(SWRITE, 1, &bh);
8171da177e4SLinus Torvalds 				brelse(bh);
8181da177e4SLinus Torvalds 				spin_lock(lock);
8191da177e4SLinus Torvalds 			}
8201da177e4SLinus Torvalds 		}
8211da177e4SLinus Torvalds 	}
8221da177e4SLinus Torvalds 
8231da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
8241da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
82558ff407bSJan Kara 		list_del_init(&bh->b_assoc_buffers);
8261da177e4SLinus Torvalds 		get_bh(bh);
8271da177e4SLinus Torvalds 		spin_unlock(lock);
8281da177e4SLinus Torvalds 		wait_on_buffer(bh);
8291da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8301da177e4SLinus Torvalds 			err = -EIO;
8311da177e4SLinus Torvalds 		brelse(bh);
8321da177e4SLinus Torvalds 		spin_lock(lock);
8331da177e4SLinus Torvalds 	}
8341da177e4SLinus Torvalds 
8351da177e4SLinus Torvalds 	spin_unlock(lock);
8361da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8371da177e4SLinus Torvalds 	if (err)
8381da177e4SLinus Torvalds 		return err;
8391da177e4SLinus Torvalds 	else
8401da177e4SLinus Torvalds 		return err2;
8411da177e4SLinus Torvalds }
8421da177e4SLinus Torvalds 
8431da177e4SLinus Torvalds /*
8441da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8451da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8461da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8471da177e4SLinus Torvalds  *
8481da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8491da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8501da177e4SLinus Torvalds  * for reiserfs.
8511da177e4SLinus Torvalds  */
8521da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8531da177e4SLinus Torvalds {
8541da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8551da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8561da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8571da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8581da177e4SLinus Torvalds 
8591da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8601da177e4SLinus Torvalds 		while (!list_empty(list))
8611da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8621da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8631da177e4SLinus Torvalds 	}
8641da177e4SLinus Torvalds }
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds /*
8671da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8681da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8691da177e4SLinus Torvalds  *
8701da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8711da177e4SLinus Torvalds  */
8721da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8731da177e4SLinus Torvalds {
8741da177e4SLinus Torvalds 	int ret = 1;
8751da177e4SLinus Torvalds 
8761da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8771da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8781da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8791da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8801da177e4SLinus Torvalds 
8811da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8821da177e4SLinus Torvalds 		while (!list_empty(list)) {
8831da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8841da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8851da177e4SLinus Torvalds 				ret = 0;
8861da177e4SLinus Torvalds 				break;
8871da177e4SLinus Torvalds 			}
8881da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8891da177e4SLinus Torvalds 		}
8901da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8911da177e4SLinus Torvalds 	}
8921da177e4SLinus Torvalds 	return ret;
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds 
8951da177e4SLinus Torvalds /*
8961da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8971da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8981da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8991da177e4SLinus Torvalds  * buffers.
9001da177e4SLinus Torvalds  *
9011da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
9021da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
9031da177e4SLinus Torvalds  */
9041da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
9051da177e4SLinus Torvalds 		int retry)
9061da177e4SLinus Torvalds {
9071da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
9081da177e4SLinus Torvalds 	long offset;
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds try_again:
9111da177e4SLinus Torvalds 	head = NULL;
9121da177e4SLinus Torvalds 	offset = PAGE_SIZE;
9131da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
9141da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
9151da177e4SLinus Torvalds 		if (!bh)
9161da177e4SLinus Torvalds 			goto no_grow;
9171da177e4SLinus Torvalds 
9181da177e4SLinus Torvalds 		bh->b_bdev = NULL;
9191da177e4SLinus Torvalds 		bh->b_this_page = head;
9201da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9211da177e4SLinus Torvalds 		head = bh;
9221da177e4SLinus Torvalds 
9231da177e4SLinus Torvalds 		bh->b_state = 0;
9241da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
925fc5cd582SChris Mason 		bh->b_private = NULL;
9261da177e4SLinus Torvalds 		bh->b_size = size;
9271da177e4SLinus Torvalds 
9281da177e4SLinus Torvalds 		/* Link the buffer to its page */
9291da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9301da177e4SLinus Torvalds 
93101ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9321da177e4SLinus Torvalds 	}
9331da177e4SLinus Torvalds 	return head;
9341da177e4SLinus Torvalds /*
9351da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9361da177e4SLinus Torvalds  */
9371da177e4SLinus Torvalds no_grow:
9381da177e4SLinus Torvalds 	if (head) {
9391da177e4SLinus Torvalds 		do {
9401da177e4SLinus Torvalds 			bh = head;
9411da177e4SLinus Torvalds 			head = head->b_this_page;
9421da177e4SLinus Torvalds 			free_buffer_head(bh);
9431da177e4SLinus Torvalds 		} while (head);
9441da177e4SLinus Torvalds 	}
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	/*
9471da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9481da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9491da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9501da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9511da177e4SLinus Torvalds 	 */
9521da177e4SLinus Torvalds 	if (!retry)
9531da177e4SLinus Torvalds 		return NULL;
9541da177e4SLinus Torvalds 
9551da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9561da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9571da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9581da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9591da177e4SLinus Torvalds 	 * async buffer heads in use.
9601da177e4SLinus Torvalds 	 */
9611da177e4SLinus Torvalds 	free_more_memory();
9621da177e4SLinus Torvalds 	goto try_again;
9631da177e4SLinus Torvalds }
9641da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9651da177e4SLinus Torvalds 
9661da177e4SLinus Torvalds static inline void
9671da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9681da177e4SLinus Torvalds {
9691da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9701da177e4SLinus Torvalds 
9711da177e4SLinus Torvalds 	bh = head;
9721da177e4SLinus Torvalds 	do {
9731da177e4SLinus Torvalds 		tail = bh;
9741da177e4SLinus Torvalds 		bh = bh->b_this_page;
9751da177e4SLinus Torvalds 	} while (bh);
9761da177e4SLinus Torvalds 	tail->b_this_page = head;
9771da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9781da177e4SLinus Torvalds }
9791da177e4SLinus Torvalds 
9801da177e4SLinus Torvalds /*
9811da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9821da177e4SLinus Torvalds  */
9831da177e4SLinus Torvalds static void
9841da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9851da177e4SLinus Torvalds 			sector_t block, int size)
9861da177e4SLinus Torvalds {
9871da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9881da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9891da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	do {
9921da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9931da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9941da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9951da177e4SLinus Torvalds 			bh->b_blocknr = block;
9961da177e4SLinus Torvalds 			if (uptodate)
9971da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9981da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9991da177e4SLinus Torvalds 		}
10001da177e4SLinus Torvalds 		block++;
10011da177e4SLinus Torvalds 		bh = bh->b_this_page;
10021da177e4SLinus Torvalds 	} while (bh != head);
10031da177e4SLinus Torvalds }
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds /*
10061da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
10071da177e4SLinus Torvalds  *
10081da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
10091da177e4SLinus Torvalds  */
10101da177e4SLinus Torvalds static struct page *
10111da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
10121da177e4SLinus Torvalds 		pgoff_t index, int size)
10131da177e4SLinus Torvalds {
10141da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
10151da177e4SLinus Torvalds 	struct page *page;
10161da177e4SLinus Torvalds 	struct buffer_head *bh;
10171da177e4SLinus Torvalds 
1018ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
1019769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
10201da177e4SLinus Torvalds 	if (!page)
10211da177e4SLinus Torvalds 		return NULL;
10221da177e4SLinus Torvalds 
1023e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
10261da177e4SLinus Torvalds 		bh = page_buffers(page);
10271da177e4SLinus Torvalds 		if (bh->b_size == size) {
10281da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
10291da177e4SLinus Torvalds 			return page;
10301da177e4SLinus Torvalds 		}
10311da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10321da177e4SLinus Torvalds 			goto failed;
10331da177e4SLinus Torvalds 	}
10341da177e4SLinus Torvalds 
10351da177e4SLinus Torvalds 	/*
10361da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10371da177e4SLinus Torvalds 	 */
10381da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10391da177e4SLinus Torvalds 	if (!bh)
10401da177e4SLinus Torvalds 		goto failed;
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	/*
10431da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10441da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10451da177e4SLinus Torvalds 	 * run under the page lock.
10461da177e4SLinus Torvalds 	 */
10471da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10481da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10491da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10501da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10511da177e4SLinus Torvalds 	return page;
10521da177e4SLinus Torvalds 
10531da177e4SLinus Torvalds failed:
10541da177e4SLinus Torvalds 	BUG();
10551da177e4SLinus Torvalds 	unlock_page(page);
10561da177e4SLinus Torvalds 	page_cache_release(page);
10571da177e4SLinus Torvalds 	return NULL;
10581da177e4SLinus Torvalds }
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds /*
10611da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10621da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10631da177e4SLinus Torvalds  */
1064858119e1SArjan van de Ven static int
10651da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10661da177e4SLinus Torvalds {
10671da177e4SLinus Torvalds 	struct page *page;
10681da177e4SLinus Torvalds 	pgoff_t index;
10691da177e4SLinus Torvalds 	int sizebits;
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	sizebits = -1;
10721da177e4SLinus Torvalds 	do {
10731da177e4SLinus Torvalds 		sizebits++;
10741da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10751da177e4SLinus Torvalds 
10761da177e4SLinus Torvalds 	index = block >> sizebits;
10771da177e4SLinus Torvalds 
1078e5657933SAndrew Morton 	/*
1079e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1080e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1081e5657933SAndrew Morton 	 */
1082e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1083e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1084e5657933SAndrew Morton 
1085e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1086e5657933SAndrew Morton 			"device %s\n",
1087e5657933SAndrew Morton 			__FUNCTION__, (unsigned long long)block,
1088e5657933SAndrew Morton 			bdevname(bdev, b));
1089e5657933SAndrew Morton 		return -EIO;
1090e5657933SAndrew Morton 	}
1091e5657933SAndrew Morton 	block = index << sizebits;
10921da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10931da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10941da177e4SLinus Torvalds 	if (!page)
10951da177e4SLinus Torvalds 		return 0;
10961da177e4SLinus Torvalds 	unlock_page(page);
10971da177e4SLinus Torvalds 	page_cache_release(page);
10981da177e4SLinus Torvalds 	return 1;
10991da177e4SLinus Torvalds }
11001da177e4SLinus Torvalds 
110175c96f85SAdrian Bunk static struct buffer_head *
11021da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
11031da177e4SLinus Torvalds {
11041da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
11051da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
11061da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11071da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11081da177e4SLinus Torvalds 					size);
11091da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
11101da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
11111da177e4SLinus Torvalds 
11121da177e4SLinus Torvalds 		dump_stack();
11131da177e4SLinus Torvalds 		return NULL;
11141da177e4SLinus Torvalds 	}
11151da177e4SLinus Torvalds 
11161da177e4SLinus Torvalds 	for (;;) {
11171da177e4SLinus Torvalds 		struct buffer_head * bh;
1118e5657933SAndrew Morton 		int ret;
11191da177e4SLinus Torvalds 
11201da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11211da177e4SLinus Torvalds 		if (bh)
11221da177e4SLinus Torvalds 			return bh;
11231da177e4SLinus Torvalds 
1124e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1125e5657933SAndrew Morton 		if (ret < 0)
1126e5657933SAndrew Morton 			return NULL;
1127e5657933SAndrew Morton 		if (ret == 0)
11281da177e4SLinus Torvalds 			free_more_memory();
11291da177e4SLinus Torvalds 	}
11301da177e4SLinus Torvalds }
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds /*
11331da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11341da177e4SLinus Torvalds  *
11351da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11361da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11371da177e4SLinus Torvalds  *
11381da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11391da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11401da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11411da177e4SLinus Torvalds  *
11421da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11431da177e4SLinus Torvalds  * (if the page has buffers).
11441da177e4SLinus Torvalds  *
11451da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11461da177e4SLinus Torvalds  * buffers are not.
11471da177e4SLinus Torvalds  *
11481da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11491da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11501da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11511da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11521da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11531da177e4SLinus Torvalds  */
11541da177e4SLinus Torvalds 
11551da177e4SLinus Torvalds /**
11561da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
115767be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11581da177e4SLinus Torvalds  *
11591da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11601da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11611da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11621da177e4SLinus Torvalds  * inode list.
11631da177e4SLinus Torvalds  *
11641da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11651da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11661da177e4SLinus Torvalds  */
1167*fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11681da177e4SLinus Torvalds {
1169787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11701da177e4SLinus Torvalds 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1171787d2214SNick Piggin 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
11721da177e4SLinus Torvalds }
11731da177e4SLinus Torvalds 
11741da177e4SLinus Torvalds /*
11751da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11761da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11771da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11781da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11791da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11801da177e4SLinus Torvalds  */
11811da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11821da177e4SLinus Torvalds {
11831da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11841da177e4SLinus Torvalds 		put_bh(buf);
11851da177e4SLinus Torvalds 		return;
11861da177e4SLinus Torvalds 	}
11871da177e4SLinus Torvalds 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11881da177e4SLinus Torvalds 	WARN_ON(1);
11891da177e4SLinus Torvalds }
11901da177e4SLinus Torvalds 
11911da177e4SLinus Torvalds /*
11921da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11931da177e4SLinus Torvalds  * potentially dirty data.
11941da177e4SLinus Torvalds  */
11951da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11961da177e4SLinus Torvalds {
11971da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
11981da177e4SLinus Torvalds 	if (!list_empty(&bh->b_assoc_buffers)) {
11991da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
12001da177e4SLinus Torvalds 
12011da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12021da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
120358ff407bSJan Kara 		bh->b_assoc_map = NULL;
12041da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12051da177e4SLinus Torvalds 	}
12061da177e4SLinus Torvalds 	__brelse(bh);
12071da177e4SLinus Torvalds }
12081da177e4SLinus Torvalds 
12091da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12101da177e4SLinus Torvalds {
12111da177e4SLinus Torvalds 	lock_buffer(bh);
12121da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12131da177e4SLinus Torvalds 		unlock_buffer(bh);
12141da177e4SLinus Torvalds 		return bh;
12151da177e4SLinus Torvalds 	} else {
12161da177e4SLinus Torvalds 		get_bh(bh);
12171da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12181da177e4SLinus Torvalds 		submit_bh(READ, bh);
12191da177e4SLinus Torvalds 		wait_on_buffer(bh);
12201da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12211da177e4SLinus Torvalds 			return bh;
12221da177e4SLinus Torvalds 	}
12231da177e4SLinus Torvalds 	brelse(bh);
12241da177e4SLinus Torvalds 	return NULL;
12251da177e4SLinus Torvalds }
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds /*
12281da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12291da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12301da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12311da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12321da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12331da177e4SLinus Torvalds  *
12341da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12351da177e4SLinus Torvalds  * sb_find_get_block().
12361da177e4SLinus Torvalds  *
12371da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12381da177e4SLinus Torvalds  * a local interrupt disable for that.
12391da177e4SLinus Torvalds  */
12401da177e4SLinus Torvalds 
12411da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12421da177e4SLinus Torvalds 
12431da177e4SLinus Torvalds struct bh_lru {
12441da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12451da177e4SLinus Torvalds };
12461da177e4SLinus Torvalds 
12471da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12481da177e4SLinus Torvalds 
12491da177e4SLinus Torvalds #ifdef CONFIG_SMP
12501da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12511da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12521da177e4SLinus Torvalds #else
12531da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12541da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12551da177e4SLinus Torvalds #endif
12561da177e4SLinus Torvalds 
12571da177e4SLinus Torvalds static inline void check_irqs_on(void)
12581da177e4SLinus Torvalds {
12591da177e4SLinus Torvalds #ifdef irqs_disabled
12601da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12611da177e4SLinus Torvalds #endif
12621da177e4SLinus Torvalds }
12631da177e4SLinus Torvalds 
12641da177e4SLinus Torvalds /*
12651da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12661da177e4SLinus Torvalds  */
12671da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12681da177e4SLinus Torvalds {
12691da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12701da177e4SLinus Torvalds 	struct bh_lru *lru;
12711da177e4SLinus Torvalds 
12721da177e4SLinus Torvalds 	check_irqs_on();
12731da177e4SLinus Torvalds 	bh_lru_lock();
12741da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12751da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12761da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12771da177e4SLinus Torvalds 		int in;
12781da177e4SLinus Torvalds 		int out = 0;
12791da177e4SLinus Torvalds 
12801da177e4SLinus Torvalds 		get_bh(bh);
12811da177e4SLinus Torvalds 		bhs[out++] = bh;
12821da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12831da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12841da177e4SLinus Torvalds 
12851da177e4SLinus Torvalds 			if (bh2 == bh) {
12861da177e4SLinus Torvalds 				__brelse(bh2);
12871da177e4SLinus Torvalds 			} else {
12881da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12891da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12901da177e4SLinus Torvalds 					evictee = bh2;
12911da177e4SLinus Torvalds 				} else {
12921da177e4SLinus Torvalds 					bhs[out++] = bh2;
12931da177e4SLinus Torvalds 				}
12941da177e4SLinus Torvalds 			}
12951da177e4SLinus Torvalds 		}
12961da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12971da177e4SLinus Torvalds 			bhs[out++] = NULL;
12981da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
12991da177e4SLinus Torvalds 	}
13001da177e4SLinus Torvalds 	bh_lru_unlock();
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds 	if (evictee)
13031da177e4SLinus Torvalds 		__brelse(evictee);
13041da177e4SLinus Torvalds }
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds /*
13071da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13081da177e4SLinus Torvalds  */
1309858119e1SArjan van de Ven static struct buffer_head *
13103991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13111da177e4SLinus Torvalds {
13121da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13131da177e4SLinus Torvalds 	struct bh_lru *lru;
13143991d3bdSTomasz Kvarsin 	unsigned int i;
13151da177e4SLinus Torvalds 
13161da177e4SLinus Torvalds 	check_irqs_on();
13171da177e4SLinus Torvalds 	bh_lru_lock();
13181da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13191da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13201da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
13211da177e4SLinus Torvalds 
13221da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
13231da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13241da177e4SLinus Torvalds 			if (i) {
13251da177e4SLinus Torvalds 				while (i) {
13261da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
13271da177e4SLinus Torvalds 					i--;
13281da177e4SLinus Torvalds 				}
13291da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13301da177e4SLinus Torvalds 			}
13311da177e4SLinus Torvalds 			get_bh(bh);
13321da177e4SLinus Torvalds 			ret = bh;
13331da177e4SLinus Torvalds 			break;
13341da177e4SLinus Torvalds 		}
13351da177e4SLinus Torvalds 	}
13361da177e4SLinus Torvalds 	bh_lru_unlock();
13371da177e4SLinus Torvalds 	return ret;
13381da177e4SLinus Torvalds }
13391da177e4SLinus Torvalds 
13401da177e4SLinus Torvalds /*
13411da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13421da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13431da177e4SLinus Torvalds  * NULL
13441da177e4SLinus Torvalds  */
13451da177e4SLinus Torvalds struct buffer_head *
13463991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13471da177e4SLinus Torvalds {
13481da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13491da177e4SLinus Torvalds 
13501da177e4SLinus Torvalds 	if (bh == NULL) {
1351385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13521da177e4SLinus Torvalds 		if (bh)
13531da177e4SLinus Torvalds 			bh_lru_install(bh);
13541da177e4SLinus Torvalds 	}
13551da177e4SLinus Torvalds 	if (bh)
13561da177e4SLinus Torvalds 		touch_buffer(bh);
13571da177e4SLinus Torvalds 	return bh;
13581da177e4SLinus Torvalds }
13591da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13601da177e4SLinus Torvalds 
13611da177e4SLinus Torvalds /*
13621da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13631da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13641da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13651da177e4SLinus Torvalds  *
13661da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13671da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13681da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13691da177e4SLinus Torvalds  *
13701da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13711da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13721da177e4SLinus Torvalds  */
13731da177e4SLinus Torvalds struct buffer_head *
13743991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13751da177e4SLinus Torvalds {
13761da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13771da177e4SLinus Torvalds 
13781da177e4SLinus Torvalds 	might_sleep();
13791da177e4SLinus Torvalds 	if (bh == NULL)
13801da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13811da177e4SLinus Torvalds 	return bh;
13821da177e4SLinus Torvalds }
13831da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds /*
13861da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13871da177e4SLinus Torvalds  */
13883991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13891da177e4SLinus Torvalds {
13901da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1391a3e713b5SAndrew Morton 	if (likely(bh)) {
13921da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13931da177e4SLinus Torvalds 		brelse(bh);
13941da177e4SLinus Torvalds 	}
1395a3e713b5SAndrew Morton }
13961da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13971da177e4SLinus Torvalds 
13981da177e4SLinus Torvalds /**
13991da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
140067be2dd1SMartin Waitz  *  @bdev: the block_device to read from
14011da177e4SLinus Torvalds  *  @block: number of block
14021da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14031da177e4SLinus Torvalds  *
14041da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14051da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14061da177e4SLinus Torvalds  */
14071da177e4SLinus Torvalds struct buffer_head *
14083991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
14091da177e4SLinus Torvalds {
14101da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14111da177e4SLinus Torvalds 
1412a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14131da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14141da177e4SLinus Torvalds 	return bh;
14151da177e4SLinus Torvalds }
14161da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
14171da177e4SLinus Torvalds 
14181da177e4SLinus Torvalds /*
14191da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14201da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14211da177e4SLinus Torvalds  * or with preempt disabled.
14221da177e4SLinus Torvalds  */
14231da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14241da177e4SLinus Torvalds {
14251da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14261da177e4SLinus Torvalds 	int i;
14271da177e4SLinus Torvalds 
14281da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14291da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14301da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14311da177e4SLinus Torvalds 	}
14321da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14331da177e4SLinus Torvalds }
14341da177e4SLinus Torvalds 
1435f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14361da177e4SLinus Torvalds {
14371da177e4SLinus Torvalds 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
14381da177e4SLinus Torvalds }
14399db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14401da177e4SLinus Torvalds 
14411da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14421da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14431da177e4SLinus Torvalds {
14441da177e4SLinus Torvalds 	bh->b_page = page;
1445e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14461da177e4SLinus Torvalds 	if (PageHighMem(page))
14471da177e4SLinus Torvalds 		/*
14481da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14491da177e4SLinus Torvalds 		 */
14501da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14511da177e4SLinus Torvalds 	else
14521da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14531da177e4SLinus Torvalds }
14541da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14551da177e4SLinus Torvalds 
14561da177e4SLinus Torvalds /*
14571da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14581da177e4SLinus Torvalds  */
1459858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14601da177e4SLinus Torvalds {
14611da177e4SLinus Torvalds 	lock_buffer(bh);
14621da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14631da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14641da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14651da177e4SLinus Torvalds 	clear_buffer_req(bh);
14661da177e4SLinus Torvalds 	clear_buffer_new(bh);
14671da177e4SLinus Torvalds 	clear_buffer_delay(bh);
146833a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14691da177e4SLinus Torvalds 	unlock_buffer(bh);
14701da177e4SLinus Torvalds }
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds /**
14731da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14741da177e4SLinus Torvalds  *
14751da177e4SLinus Torvalds  * @page: the page which is affected
14761da177e4SLinus Torvalds  * @offset: the index of the truncation point
14771da177e4SLinus Torvalds  *
14781da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14791da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14801da177e4SLinus Torvalds  *
14811da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14821da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14831da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14841da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14851da177e4SLinus Torvalds  * blocks on-disk.
14861da177e4SLinus Torvalds  */
14872ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14881da177e4SLinus Torvalds {
14891da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14901da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14931da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14941da177e4SLinus Torvalds 		goto out;
14951da177e4SLinus Torvalds 
14961da177e4SLinus Torvalds 	head = page_buffers(page);
14971da177e4SLinus Torvalds 	bh = head;
14981da177e4SLinus Torvalds 	do {
14991da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
15001da177e4SLinus Torvalds 		next = bh->b_this_page;
15011da177e4SLinus Torvalds 
15021da177e4SLinus Torvalds 		/*
15031da177e4SLinus Torvalds 		 * is this block fully invalidated?
15041da177e4SLinus Torvalds 		 */
15051da177e4SLinus Torvalds 		if (offset <= curr_off)
15061da177e4SLinus Torvalds 			discard_buffer(bh);
15071da177e4SLinus Torvalds 		curr_off = next_off;
15081da177e4SLinus Torvalds 		bh = next;
15091da177e4SLinus Torvalds 	} while (bh != head);
15101da177e4SLinus Torvalds 
15111da177e4SLinus Torvalds 	/*
15121da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
15131da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15141da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15151da177e4SLinus Torvalds 	 */
15161da177e4SLinus Torvalds 	if (offset == 0)
15172ff28e22SNeilBrown 		try_to_release_page(page, 0);
15181da177e4SLinus Torvalds out:
15192ff28e22SNeilBrown 	return;
15201da177e4SLinus Torvalds }
15211da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
15221da177e4SLinus Torvalds 
15231da177e4SLinus Torvalds /*
15241da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15251da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15261da177e4SLinus Torvalds  * is already excluded via the page lock.
15271da177e4SLinus Torvalds  */
15281da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15291da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15301da177e4SLinus Torvalds {
15311da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15321da177e4SLinus Torvalds 
15331da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15341da177e4SLinus Torvalds 	bh = head;
15351da177e4SLinus Torvalds 	do {
15361da177e4SLinus Torvalds 		bh->b_state |= b_state;
15371da177e4SLinus Torvalds 		tail = bh;
15381da177e4SLinus Torvalds 		bh = bh->b_this_page;
15391da177e4SLinus Torvalds 	} while (bh);
15401da177e4SLinus Torvalds 	tail->b_this_page = head;
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15431da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15441da177e4SLinus Torvalds 		bh = head;
15451da177e4SLinus Torvalds 		do {
15461da177e4SLinus Torvalds 			if (PageDirty(page))
15471da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15481da177e4SLinus Torvalds 			if (PageUptodate(page))
15491da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15501da177e4SLinus Torvalds 			bh = bh->b_this_page;
15511da177e4SLinus Torvalds 		} while (bh != head);
15521da177e4SLinus Torvalds 	}
15531da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15541da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15551da177e4SLinus Torvalds }
15561da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds /*
15591da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15601da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15611da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15621da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15631da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15641da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15651da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15661da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15671da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15681da177e4SLinus Torvalds  *
15691da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15701da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15711da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15721da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15731da177e4SLinus Torvalds  */
15741da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15751da177e4SLinus Torvalds {
15761da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15771da177e4SLinus Torvalds 
15781da177e4SLinus Torvalds 	might_sleep();
15791da177e4SLinus Torvalds 
1580385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15811da177e4SLinus Torvalds 	if (old_bh) {
15821da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15831da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15841da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15851da177e4SLinus Torvalds 		__brelse(old_bh);
15861da177e4SLinus Torvalds 	}
15871da177e4SLinus Torvalds }
15881da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15891da177e4SLinus Torvalds 
15901da177e4SLinus Torvalds /*
15911da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15921da177e4SLinus Torvalds  *
15931da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15941da177e4SLinus Torvalds  *
15951da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15961da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15971da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15981da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15991da177e4SLinus Torvalds  *
16001da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
16011da177e4SLinus Torvalds  */
16021da177e4SLinus Torvalds 
16031da177e4SLinus Torvalds /*
16041da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16051da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16061da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
16071da177e4SLinus Torvalds  * state inside lock_buffer().
16081da177e4SLinus Torvalds  *
16091da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
16101da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
16111da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
16121da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
16131da177e4SLinus Torvalds  * prevents this contention from occurring.
16141da177e4SLinus Torvalds  */
16151da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
16161da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
16171da177e4SLinus Torvalds {
16181da177e4SLinus Torvalds 	int err;
16191da177e4SLinus Torvalds 	sector_t block;
16201da177e4SLinus Torvalds 	sector_t last_block;
1621f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1622b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
16231da177e4SLinus Torvalds 	int nr_underway = 0;
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16261da177e4SLinus Torvalds 
16271da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16281da177e4SLinus Torvalds 
16291da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1630b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16311da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16321da177e4SLinus Torvalds 	}
16331da177e4SLinus Torvalds 
16341da177e4SLinus Torvalds 	/*
16351da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16361da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16371da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16381da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16391da177e4SLinus Torvalds 	 *
16401da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16411da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16421da177e4SLinus Torvalds 	 */
16431da177e4SLinus Torvalds 
164454b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16451da177e4SLinus Torvalds 	head = page_buffers(page);
16461da177e4SLinus Torvalds 	bh = head;
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds 	/*
16491da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16501da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16511da177e4SLinus Torvalds 	 */
16521da177e4SLinus Torvalds 	do {
16531da177e4SLinus Torvalds 		if (block > last_block) {
16541da177e4SLinus Torvalds 			/*
16551da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16561da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16571da177e4SLinus Torvalds 			 * truncate in progress.
16581da177e4SLinus Torvalds 			 */
16591da177e4SLinus Torvalds 			/*
16601da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16611da177e4SLinus Torvalds 			 */
16621da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16631da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
16641da177e4SLinus Torvalds 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1665b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16661da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16671da177e4SLinus Torvalds 			if (err)
16681da177e4SLinus Torvalds 				goto recover;
16691da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16701da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16711da177e4SLinus Torvalds 				clear_buffer_new(bh);
16721da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16731da177e4SLinus Torvalds 							bh->b_blocknr);
16741da177e4SLinus Torvalds 			}
16751da177e4SLinus Torvalds 		}
16761da177e4SLinus Torvalds 		bh = bh->b_this_page;
16771da177e4SLinus Torvalds 		block++;
16781da177e4SLinus Torvalds 	} while (bh != head);
16791da177e4SLinus Torvalds 
16801da177e4SLinus Torvalds 	do {
16811da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16821da177e4SLinus Torvalds 			continue;
16831da177e4SLinus Torvalds 		/*
16841da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16851da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16861da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
16871da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
16881da177e4SLinus Torvalds 		 * throttling.
16891da177e4SLinus Torvalds 		 */
16901da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
16911da177e4SLinus Torvalds 			lock_buffer(bh);
16921da177e4SLinus Torvalds 		} else if (test_set_buffer_locked(bh)) {
16931da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
16941da177e4SLinus Torvalds 			continue;
16951da177e4SLinus Torvalds 		}
16961da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
16971da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
16981da177e4SLinus Torvalds 		} else {
16991da177e4SLinus Torvalds 			unlock_buffer(bh);
17001da177e4SLinus Torvalds 		}
17011da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17021da177e4SLinus Torvalds 
17031da177e4SLinus Torvalds 	/*
17041da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17051da177e4SLinus Torvalds 	 * drop the bh refcounts early.
17061da177e4SLinus Torvalds 	 */
17071da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17081da177e4SLinus Torvalds 	set_page_writeback(page);
17091da177e4SLinus Torvalds 
17101da177e4SLinus Torvalds 	do {
17111da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17121da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17131da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17141da177e4SLinus Torvalds 			nr_underway++;
1715ad576e63SNick Piggin 		}
17161da177e4SLinus Torvalds 		bh = next;
17171da177e4SLinus Torvalds 	} while (bh != head);
171805937baaSAndrew Morton 	unlock_page(page);
17191da177e4SLinus Torvalds 
17201da177e4SLinus Torvalds 	err = 0;
17211da177e4SLinus Torvalds done:
17221da177e4SLinus Torvalds 	if (nr_underway == 0) {
17231da177e4SLinus Torvalds 		/*
17241da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17251da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17261da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17271da177e4SLinus Torvalds 		 */
17281da177e4SLinus Torvalds 		end_page_writeback(page);
17293d67f2d7SNick Piggin 
17301da177e4SLinus Torvalds 		/*
17311da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17321da177e4SLinus Torvalds 		 * here on.
17331da177e4SLinus Torvalds 		 */
17341da177e4SLinus Torvalds 	}
17351da177e4SLinus Torvalds 	return err;
17361da177e4SLinus Torvalds 
17371da177e4SLinus Torvalds recover:
17381da177e4SLinus Torvalds 	/*
17391da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17401da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17411da177e4SLinus Torvalds 	 * exposing stale data.
17421da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17431da177e4SLinus Torvalds 	 */
17441da177e4SLinus Torvalds 	bh = head;
17451da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17461da177e4SLinus Torvalds 	do {
17471da177e4SLinus Torvalds 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
17481da177e4SLinus Torvalds 			lock_buffer(bh);
17491da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17501da177e4SLinus Torvalds 		} else {
17511da177e4SLinus Torvalds 			/*
17521da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17531da177e4SLinus Torvalds 			 * attachment to a dirty page.
17541da177e4SLinus Torvalds 			 */
17551da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17561da177e4SLinus Torvalds 		}
17571da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17581da177e4SLinus Torvalds 	SetPageError(page);
17591da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17607e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17611da177e4SLinus Torvalds 	set_page_writeback(page);
17621da177e4SLinus Torvalds 	do {
17631da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17641da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17651da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17661da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17671da177e4SLinus Torvalds 			nr_underway++;
1768ad576e63SNick Piggin 		}
17691da177e4SLinus Torvalds 		bh = next;
17701da177e4SLinus Torvalds 	} while (bh != head);
1771ffda9d30SNick Piggin 	unlock_page(page);
17721da177e4SLinus Torvalds 	goto done;
17731da177e4SLinus Torvalds }
17741da177e4SLinus Torvalds 
1775afddba49SNick Piggin /*
1776afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1777afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1778afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1779afddba49SNick Piggin  */
1780afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1781afddba49SNick Piggin {
1782afddba49SNick Piggin 	unsigned int block_start, block_end;
1783afddba49SNick Piggin 	struct buffer_head *head, *bh;
1784afddba49SNick Piggin 
1785afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1786afddba49SNick Piggin 	if (!page_has_buffers(page))
1787afddba49SNick Piggin 		return;
1788afddba49SNick Piggin 
1789afddba49SNick Piggin 	bh = head = page_buffers(page);
1790afddba49SNick Piggin 	block_start = 0;
1791afddba49SNick Piggin 	do {
1792afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1793afddba49SNick Piggin 
1794afddba49SNick Piggin 		if (buffer_new(bh)) {
1795afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1796afddba49SNick Piggin 				if (!PageUptodate(page)) {
1797afddba49SNick Piggin 					unsigned start, size;
1798afddba49SNick Piggin 
1799afddba49SNick Piggin 					start = max(from, block_start);
1800afddba49SNick Piggin 					size = min(to, block_end) - start;
1801afddba49SNick Piggin 
1802eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1803afddba49SNick Piggin 					set_buffer_uptodate(bh);
1804afddba49SNick Piggin 				}
1805afddba49SNick Piggin 
1806afddba49SNick Piggin 				clear_buffer_new(bh);
1807afddba49SNick Piggin 				mark_buffer_dirty(bh);
1808afddba49SNick Piggin 			}
1809afddba49SNick Piggin 		}
1810afddba49SNick Piggin 
1811afddba49SNick Piggin 		block_start = block_end;
1812afddba49SNick Piggin 		bh = bh->b_this_page;
1813afddba49SNick Piggin 	} while (bh != head);
1814afddba49SNick Piggin }
1815afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1816afddba49SNick Piggin 
18171da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
18181da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
18191da177e4SLinus Torvalds {
18201da177e4SLinus Torvalds 	unsigned block_start, block_end;
18211da177e4SLinus Torvalds 	sector_t block;
18221da177e4SLinus Torvalds 	int err = 0;
18231da177e4SLinus Torvalds 	unsigned blocksize, bbits;
18241da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
18251da177e4SLinus Torvalds 
18261da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
18271da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
18281da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
18291da177e4SLinus Torvalds 	BUG_ON(from > to);
18301da177e4SLinus Torvalds 
18311da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18321da177e4SLinus Torvalds 	if (!page_has_buffers(page))
18331da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
18341da177e4SLinus Torvalds 	head = page_buffers(page);
18351da177e4SLinus Torvalds 
18361da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
18371da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
18381da177e4SLinus Torvalds 
18391da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
18401da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
18411da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18421da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18431da177e4SLinus Torvalds 			if (PageUptodate(page)) {
18441da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
18451da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18461da177e4SLinus Torvalds 			}
18471da177e4SLinus Torvalds 			continue;
18481da177e4SLinus Torvalds 		}
18491da177e4SLinus Torvalds 		if (buffer_new(bh))
18501da177e4SLinus Torvalds 			clear_buffer_new(bh);
18511da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1852b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18531da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18541da177e4SLinus Torvalds 			if (err)
1855f3ddbdc6SNick Piggin 				break;
18561da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18571da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18581da177e4SLinus Torvalds 							bh->b_blocknr);
18591da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1860637aff46SNick Piggin 					clear_buffer_new(bh);
18611da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1862637aff46SNick Piggin 					mark_buffer_dirty(bh);
18631da177e4SLinus Torvalds 					continue;
18641da177e4SLinus Torvalds 				}
1865eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1866eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1867eebd2aa3SChristoph Lameter 						to, block_end,
1868eebd2aa3SChristoph Lameter 						block_start, from);
18691da177e4SLinus Torvalds 				continue;
18701da177e4SLinus Torvalds 			}
18711da177e4SLinus Torvalds 		}
18721da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18731da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18741da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18751da177e4SLinus Torvalds 			continue;
18761da177e4SLinus Torvalds 		}
18771da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
187833a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18791da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18801da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18811da177e4SLinus Torvalds 			*wait_bh++=bh;
18821da177e4SLinus Torvalds 		}
18831da177e4SLinus Torvalds 	}
18841da177e4SLinus Torvalds 	/*
18851da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
18861da177e4SLinus Torvalds 	 */
18871da177e4SLinus Torvalds 	while(wait_bh > wait) {
18881da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
18891da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1890f3ddbdc6SNick Piggin 			err = -EIO;
18911da177e4SLinus Torvalds 	}
1892afddba49SNick Piggin 	if (unlikely(err))
1893afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
18941da177e4SLinus Torvalds 	return err;
18951da177e4SLinus Torvalds }
18961da177e4SLinus Torvalds 
18971da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
18981da177e4SLinus Torvalds 		unsigned from, unsigned to)
18991da177e4SLinus Torvalds {
19001da177e4SLinus Torvalds 	unsigned block_start, block_end;
19011da177e4SLinus Torvalds 	int partial = 0;
19021da177e4SLinus Torvalds 	unsigned blocksize;
19031da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
19041da177e4SLinus Torvalds 
19051da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
19081da177e4SLinus Torvalds 	    bh != head || !block_start;
19091da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
19101da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19111da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19121da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19131da177e4SLinus Torvalds 				partial = 1;
19141da177e4SLinus Torvalds 		} else {
19151da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19161da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19171da177e4SLinus Torvalds 		}
1918afddba49SNick Piggin 		clear_buffer_new(bh);
19191da177e4SLinus Torvalds 	}
19201da177e4SLinus Torvalds 
19211da177e4SLinus Torvalds 	/*
19221da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19231da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19241da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19251da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19261da177e4SLinus Torvalds 	 */
19271da177e4SLinus Torvalds 	if (!partial)
19281da177e4SLinus Torvalds 		SetPageUptodate(page);
19291da177e4SLinus Torvalds 	return 0;
19301da177e4SLinus Torvalds }
19311da177e4SLinus Torvalds 
19321da177e4SLinus Torvalds /*
1933afddba49SNick Piggin  * block_write_begin takes care of the basic task of block allocation and
1934afddba49SNick Piggin  * bringing partial write blocks uptodate first.
1935afddba49SNick Piggin  *
1936afddba49SNick Piggin  * If *pagep is not NULL, then block_write_begin uses the locked page
1937afddba49SNick Piggin  * at *pagep rather than allocating its own. In this case, the page will
1938afddba49SNick Piggin  * not be unlocked or deallocated on failure.
1939afddba49SNick Piggin  */
1940afddba49SNick Piggin int block_write_begin(struct file *file, struct address_space *mapping,
1941afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
1942afddba49SNick Piggin 			struct page **pagep, void **fsdata,
1943afddba49SNick Piggin 			get_block_t *get_block)
1944afddba49SNick Piggin {
1945afddba49SNick Piggin 	struct inode *inode = mapping->host;
1946afddba49SNick Piggin 	int status = 0;
1947afddba49SNick Piggin 	struct page *page;
1948afddba49SNick Piggin 	pgoff_t index;
1949afddba49SNick Piggin 	unsigned start, end;
1950afddba49SNick Piggin 	int ownpage = 0;
1951afddba49SNick Piggin 
1952afddba49SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
1953afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1954afddba49SNick Piggin 	end = start + len;
1955afddba49SNick Piggin 
1956afddba49SNick Piggin 	page = *pagep;
1957afddba49SNick Piggin 	if (page == NULL) {
1958afddba49SNick Piggin 		ownpage = 1;
1959afddba49SNick Piggin 		page = __grab_cache_page(mapping, index);
1960afddba49SNick Piggin 		if (!page) {
1961afddba49SNick Piggin 			status = -ENOMEM;
1962afddba49SNick Piggin 			goto out;
1963afddba49SNick Piggin 		}
1964afddba49SNick Piggin 		*pagep = page;
1965afddba49SNick Piggin 	} else
1966afddba49SNick Piggin 		BUG_ON(!PageLocked(page));
1967afddba49SNick Piggin 
1968afddba49SNick Piggin 	status = __block_prepare_write(inode, page, start, end, get_block);
1969afddba49SNick Piggin 	if (unlikely(status)) {
1970afddba49SNick Piggin 		ClearPageUptodate(page);
1971afddba49SNick Piggin 
1972afddba49SNick Piggin 		if (ownpage) {
1973afddba49SNick Piggin 			unlock_page(page);
1974afddba49SNick Piggin 			page_cache_release(page);
1975afddba49SNick Piggin 			*pagep = NULL;
1976afddba49SNick Piggin 
1977afddba49SNick Piggin 			/*
1978afddba49SNick Piggin 			 * prepare_write() may have instantiated a few blocks
1979afddba49SNick Piggin 			 * outside i_size.  Trim these off again. Don't need
1980afddba49SNick Piggin 			 * i_size_read because we hold i_mutex.
1981afddba49SNick Piggin 			 */
1982afddba49SNick Piggin 			if (pos + len > inode->i_size)
1983afddba49SNick Piggin 				vmtruncate(inode, inode->i_size);
1984afddba49SNick Piggin 		}
1985afddba49SNick Piggin 		goto out;
1986afddba49SNick Piggin 	}
1987afddba49SNick Piggin 
1988afddba49SNick Piggin out:
1989afddba49SNick Piggin 	return status;
1990afddba49SNick Piggin }
1991afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
1992afddba49SNick Piggin 
1993afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
1994afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
1995afddba49SNick Piggin 			struct page *page, void *fsdata)
1996afddba49SNick Piggin {
1997afddba49SNick Piggin 	struct inode *inode = mapping->host;
1998afddba49SNick Piggin 	unsigned start;
1999afddba49SNick Piggin 
2000afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
2001afddba49SNick Piggin 
2002afddba49SNick Piggin 	if (unlikely(copied < len)) {
2003afddba49SNick Piggin 		/*
2004afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
2005afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
2006afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
2007afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
2008afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
2009afddba49SNick Piggin 		 * destroy our partial write.
2010afddba49SNick Piggin 		 *
2011afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2012afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2013afddba49SNick Piggin 		 * caller to redo the whole thing.
2014afddba49SNick Piggin 		 */
2015afddba49SNick Piggin 		if (!PageUptodate(page))
2016afddba49SNick Piggin 			copied = 0;
2017afddba49SNick Piggin 
2018afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2019afddba49SNick Piggin 	}
2020afddba49SNick Piggin 	flush_dcache_page(page);
2021afddba49SNick Piggin 
2022afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2023afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2024afddba49SNick Piggin 
2025afddba49SNick Piggin 	return copied;
2026afddba49SNick Piggin }
2027afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2028afddba49SNick Piggin 
2029afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2030afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2031afddba49SNick Piggin 			struct page *page, void *fsdata)
2032afddba49SNick Piggin {
2033afddba49SNick Piggin 	struct inode *inode = mapping->host;
2034afddba49SNick Piggin 
2035afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2036afddba49SNick Piggin 
2037afddba49SNick Piggin 	/*
2038afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
2039afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
2040afddba49SNick Piggin 	 *
2041afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
2042afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
2043afddba49SNick Piggin 	 */
2044afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
2045afddba49SNick Piggin 		i_size_write(inode, pos+copied);
2046afddba49SNick Piggin 		mark_inode_dirty(inode);
2047afddba49SNick Piggin 	}
2048afddba49SNick Piggin 
2049afddba49SNick Piggin 	unlock_page(page);
2050afddba49SNick Piggin 	page_cache_release(page);
2051afddba49SNick Piggin 
2052afddba49SNick Piggin 	return copied;
2053afddba49SNick Piggin }
2054afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2055afddba49SNick Piggin 
2056afddba49SNick Piggin /*
20571da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
20581da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
20591da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
20601da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
20611da177e4SLinus Torvalds  * page struct once IO has completed.
20621da177e4SLinus Torvalds  */
20631da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
20641da177e4SLinus Torvalds {
20651da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
20661da177e4SLinus Torvalds 	sector_t iblock, lblock;
20671da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
20681da177e4SLinus Torvalds 	unsigned int blocksize;
20691da177e4SLinus Torvalds 	int nr, i;
20701da177e4SLinus Torvalds 	int fully_mapped = 1;
20711da177e4SLinus Torvalds 
2072cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
20731da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
20741da177e4SLinus Torvalds 	if (!page_has_buffers(page))
20751da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
20761da177e4SLinus Torvalds 	head = page_buffers(page);
20771da177e4SLinus Torvalds 
20781da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
20791da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
20801da177e4SLinus Torvalds 	bh = head;
20811da177e4SLinus Torvalds 	nr = 0;
20821da177e4SLinus Torvalds 	i = 0;
20831da177e4SLinus Torvalds 
20841da177e4SLinus Torvalds 	do {
20851da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
20861da177e4SLinus Torvalds 			continue;
20871da177e4SLinus Torvalds 
20881da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2089c64610baSAndrew Morton 			int err = 0;
2090c64610baSAndrew Morton 
20911da177e4SLinus Torvalds 			fully_mapped = 0;
20921da177e4SLinus Torvalds 			if (iblock < lblock) {
2093b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2094c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2095c64610baSAndrew Morton 				if (err)
20961da177e4SLinus Torvalds 					SetPageError(page);
20971da177e4SLinus Torvalds 			}
20981da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2099eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2100c64610baSAndrew Morton 				if (!err)
21011da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21021da177e4SLinus Torvalds 				continue;
21031da177e4SLinus Torvalds 			}
21041da177e4SLinus Torvalds 			/*
21051da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
21061da177e4SLinus Torvalds 			 * synchronously
21071da177e4SLinus Torvalds 			 */
21081da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
21091da177e4SLinus Torvalds 				continue;
21101da177e4SLinus Torvalds 		}
21111da177e4SLinus Torvalds 		arr[nr++] = bh;
21121da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
21131da177e4SLinus Torvalds 
21141da177e4SLinus Torvalds 	if (fully_mapped)
21151da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
21161da177e4SLinus Torvalds 
21171da177e4SLinus Torvalds 	if (!nr) {
21181da177e4SLinus Torvalds 		/*
21191da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
21201da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
21211da177e4SLinus Torvalds 		 */
21221da177e4SLinus Torvalds 		if (!PageError(page))
21231da177e4SLinus Torvalds 			SetPageUptodate(page);
21241da177e4SLinus Torvalds 		unlock_page(page);
21251da177e4SLinus Torvalds 		return 0;
21261da177e4SLinus Torvalds 	}
21271da177e4SLinus Torvalds 
21281da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
21291da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21301da177e4SLinus Torvalds 		bh = arr[i];
21311da177e4SLinus Torvalds 		lock_buffer(bh);
21321da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
21331da177e4SLinus Torvalds 	}
21341da177e4SLinus Torvalds 
21351da177e4SLinus Torvalds 	/*
21361da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
21371da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
21381da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
21391da177e4SLinus Torvalds 	 */
21401da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21411da177e4SLinus Torvalds 		bh = arr[i];
21421da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21431da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
21441da177e4SLinus Torvalds 		else
21451da177e4SLinus Torvalds 			submit_bh(READ, bh);
21461da177e4SLinus Torvalds 	}
21471da177e4SLinus Torvalds 	return 0;
21481da177e4SLinus Torvalds }
21491da177e4SLinus Torvalds 
21501da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
215189e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
21521da177e4SLinus Torvalds  * deal with the hole.
21531da177e4SLinus Torvalds  */
215489e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
21551da177e4SLinus Torvalds {
21561da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
21571da177e4SLinus Torvalds 	struct page *page;
215889e10787SNick Piggin 	void *fsdata;
215905eb0b51SOGAWA Hirofumi 	unsigned long limit;
21601da177e4SLinus Torvalds 	int err;
21611da177e4SLinus Torvalds 
21621da177e4SLinus Torvalds 	err = -EFBIG;
21631da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
21641da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
21651da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
21661da177e4SLinus Torvalds 		goto out;
21671da177e4SLinus Torvalds 	}
21681da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
21691da177e4SLinus Torvalds 		goto out;
21701da177e4SLinus Torvalds 
217189e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
217289e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
217389e10787SNick Piggin 				&page, &fsdata);
217489e10787SNick Piggin 	if (err)
217505eb0b51SOGAWA Hirofumi 		goto out;
217605eb0b51SOGAWA Hirofumi 
217789e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
217889e10787SNick Piggin 	BUG_ON(err > 0);
217905eb0b51SOGAWA Hirofumi 
218005eb0b51SOGAWA Hirofumi out:
218105eb0b51SOGAWA Hirofumi 	return err;
218205eb0b51SOGAWA Hirofumi }
218305eb0b51SOGAWA Hirofumi 
218489e10787SNick Piggin int cont_expand_zero(struct file *file, struct address_space *mapping,
218589e10787SNick Piggin 			loff_t pos, loff_t *bytes)
218605eb0b51SOGAWA Hirofumi {
218789e10787SNick Piggin 	struct inode *inode = mapping->host;
218889e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
218989e10787SNick Piggin 	struct page *page;
219089e10787SNick Piggin 	void *fsdata;
219189e10787SNick Piggin 	pgoff_t index, curidx;
219289e10787SNick Piggin 	loff_t curpos;
219389e10787SNick Piggin 	unsigned zerofrom, offset, len;
219489e10787SNick Piggin 	int err = 0;
219505eb0b51SOGAWA Hirofumi 
219689e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
219789e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
219889e10787SNick Piggin 
219989e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
220089e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
220189e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
220289e10787SNick Piggin 			*bytes |= (blocksize-1);
220389e10787SNick Piggin 			(*bytes)++;
220489e10787SNick Piggin 		}
220589e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
220689e10787SNick Piggin 
220789e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
220889e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
220989e10787SNick Piggin 						&page, &fsdata);
221089e10787SNick Piggin 		if (err)
221189e10787SNick Piggin 			goto out;
2212eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
221389e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
221489e10787SNick Piggin 						page, fsdata);
221589e10787SNick Piggin 		if (err < 0)
221689e10787SNick Piggin 			goto out;
221789e10787SNick Piggin 		BUG_ON(err != len);
221889e10787SNick Piggin 		err = 0;
221989e10787SNick Piggin 	}
222089e10787SNick Piggin 
222189e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
222289e10787SNick Piggin 	if (index == curidx) {
222389e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
222489e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
222589e10787SNick Piggin 		if (offset <= zerofrom) {
222689e10787SNick Piggin 			goto out;
222789e10787SNick Piggin 		}
222889e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
222989e10787SNick Piggin 			*bytes |= (blocksize-1);
223089e10787SNick Piggin 			(*bytes)++;
223189e10787SNick Piggin 		}
223289e10787SNick Piggin 		len = offset - zerofrom;
223389e10787SNick Piggin 
223489e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
223589e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
223689e10787SNick Piggin 						&page, &fsdata);
223789e10787SNick Piggin 		if (err)
223889e10787SNick Piggin 			goto out;
2239eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
224089e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
224189e10787SNick Piggin 						page, fsdata);
224289e10787SNick Piggin 		if (err < 0)
224389e10787SNick Piggin 			goto out;
224489e10787SNick Piggin 		BUG_ON(err != len);
224589e10787SNick Piggin 		err = 0;
224689e10787SNick Piggin 	}
224789e10787SNick Piggin out:
224889e10787SNick Piggin 	return err;
22491da177e4SLinus Torvalds }
22501da177e4SLinus Torvalds 
22511da177e4SLinus Torvalds /*
22521da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
22531da177e4SLinus Torvalds  * We may have to extend the file.
22541da177e4SLinus Torvalds  */
225589e10787SNick Piggin int cont_write_begin(struct file *file, struct address_space *mapping,
225689e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
225789e10787SNick Piggin 			struct page **pagep, void **fsdata,
225889e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
22591da177e4SLinus Torvalds {
22601da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
22611da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
226289e10787SNick Piggin 	unsigned zerofrom;
226389e10787SNick Piggin 	int err;
22641da177e4SLinus Torvalds 
226589e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
226689e10787SNick Piggin 	if (err)
22671da177e4SLinus Torvalds 		goto out;
22681da177e4SLinus Torvalds 
22691da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
227089e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
22711da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
22721da177e4SLinus Torvalds 		(*bytes)++;
22731da177e4SLinus Torvalds 	}
22741da177e4SLinus Torvalds 
227589e10787SNick Piggin 	*pagep = NULL;
227689e10787SNick Piggin 	err = block_write_begin(file, mapping, pos, len,
227789e10787SNick Piggin 				flags, pagep, fsdata, get_block);
22781da177e4SLinus Torvalds out:
227989e10787SNick Piggin 	return err;
22801da177e4SLinus Torvalds }
22811da177e4SLinus Torvalds 
22821da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
22831da177e4SLinus Torvalds 			get_block_t *get_block)
22841da177e4SLinus Torvalds {
22851da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22861da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
22871da177e4SLinus Torvalds 	if (err)
22881da177e4SLinus Torvalds 		ClearPageUptodate(page);
22891da177e4SLinus Torvalds 	return err;
22901da177e4SLinus Torvalds }
22911da177e4SLinus Torvalds 
22921da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
22931da177e4SLinus Torvalds {
22941da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22951da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
22961da177e4SLinus Torvalds 	return 0;
22971da177e4SLinus Torvalds }
22981da177e4SLinus Torvalds 
22991da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page,
23001da177e4SLinus Torvalds 		unsigned from, unsigned to)
23011da177e4SLinus Torvalds {
23021da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23031da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
23041da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
23051da177e4SLinus Torvalds 	/*
23061da177e4SLinus Torvalds 	 * No need to use i_size_read() here, the i_size
23071b1dcc1bSJes Sorensen 	 * cannot change under us because we hold i_mutex.
23081da177e4SLinus Torvalds 	 */
23091da177e4SLinus Torvalds 	if (pos > inode->i_size) {
23101da177e4SLinus Torvalds 		i_size_write(inode, pos);
23111da177e4SLinus Torvalds 		mark_inode_dirty(inode);
23121da177e4SLinus Torvalds 	}
23131da177e4SLinus Torvalds 	return 0;
23141da177e4SLinus Torvalds }
23151da177e4SLinus Torvalds 
231654171690SDavid Chinner /*
231754171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
231854171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
231954171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
232054171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
232154171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
232254171690SDavid Chinner  * support these features.
232354171690SDavid Chinner  *
232454171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
232554171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
232654171690SDavid Chinner  * vmtruncate() writes the inode size before removing pages, once we have the
232754171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
232854171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
232954171690SDavid Chinner  * unlock the page.
233054171690SDavid Chinner  */
233154171690SDavid Chinner int
233254171690SDavid Chinner block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
233354171690SDavid Chinner 		   get_block_t get_block)
233454171690SDavid Chinner {
233554171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
233654171690SDavid Chinner 	unsigned long end;
233754171690SDavid Chinner 	loff_t size;
233854171690SDavid Chinner 	int ret = -EINVAL;
233954171690SDavid Chinner 
234054171690SDavid Chinner 	lock_page(page);
234154171690SDavid Chinner 	size = i_size_read(inode);
234254171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
234318336338SNick Piggin 	    (page_offset(page) > size)) {
234454171690SDavid Chinner 		/* page got truncated out from underneath us */
234554171690SDavid Chinner 		goto out_unlock;
234654171690SDavid Chinner 	}
234754171690SDavid Chinner 
234854171690SDavid Chinner 	/* page is wholly or partially inside EOF */
234954171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
235054171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
235154171690SDavid Chinner 	else
235254171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
235354171690SDavid Chinner 
235454171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
235554171690SDavid Chinner 	if (!ret)
235654171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
235754171690SDavid Chinner 
235854171690SDavid Chinner out_unlock:
235954171690SDavid Chinner 	unlock_page(page);
236054171690SDavid Chinner 	return ret;
236154171690SDavid Chinner }
23621da177e4SLinus Torvalds 
23631da177e4SLinus Torvalds /*
236403158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
23651da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
23661da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
23671da177e4SLinus Torvalds  */
23681da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
23691da177e4SLinus Torvalds {
237068671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
23711da177e4SLinus Torvalds }
23721da177e4SLinus Torvalds 
23731da177e4SLinus Torvalds /*
237403158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
237503158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
237603158cd7SNick Piggin  * dirty races).
237703158cd7SNick Piggin  */
237803158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
237903158cd7SNick Piggin {
238003158cd7SNick Piggin 	struct buffer_head *bh;
238103158cd7SNick Piggin 
238203158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
238303158cd7SNick Piggin 
238403158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
238503158cd7SNick Piggin 	bh = head;
238603158cd7SNick Piggin 	do {
238703158cd7SNick Piggin 		if (PageDirty(page))
238803158cd7SNick Piggin 			set_buffer_dirty(bh);
238903158cd7SNick Piggin 		if (!bh->b_this_page)
239003158cd7SNick Piggin 			bh->b_this_page = head;
239103158cd7SNick Piggin 		bh = bh->b_this_page;
239203158cd7SNick Piggin 	} while (bh != head);
239303158cd7SNick Piggin 	attach_page_buffers(page, head);
239403158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
239503158cd7SNick Piggin }
239603158cd7SNick Piggin 
239703158cd7SNick Piggin /*
23981da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
23991da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
24001da177e4SLinus Torvalds  */
240103158cd7SNick Piggin int nobh_write_begin(struct file *file, struct address_space *mapping,
240203158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
240303158cd7SNick Piggin 			struct page **pagep, void **fsdata,
24041da177e4SLinus Torvalds 			get_block_t *get_block)
24051da177e4SLinus Torvalds {
240603158cd7SNick Piggin 	struct inode *inode = mapping->host;
24071da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
24081da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2409a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
241003158cd7SNick Piggin 	struct page *page;
241103158cd7SNick Piggin 	pgoff_t index;
241203158cd7SNick Piggin 	unsigned from, to;
24131da177e4SLinus Torvalds 	unsigned block_in_page;
2414a4b0672dSNick Piggin 	unsigned block_start, block_end;
24151da177e4SLinus Torvalds 	sector_t block_in_file;
24161da177e4SLinus Torvalds 	int nr_reads = 0;
24171da177e4SLinus Torvalds 	int ret = 0;
24181da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
24191da177e4SLinus Torvalds 
242003158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
242103158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
242203158cd7SNick Piggin 	to = from + len;
242303158cd7SNick Piggin 
242403158cd7SNick Piggin 	page = __grab_cache_page(mapping, index);
242503158cd7SNick Piggin 	if (!page)
242603158cd7SNick Piggin 		return -ENOMEM;
242703158cd7SNick Piggin 	*pagep = page;
242803158cd7SNick Piggin 	*fsdata = NULL;
242903158cd7SNick Piggin 
243003158cd7SNick Piggin 	if (page_has_buffers(page)) {
243103158cd7SNick Piggin 		unlock_page(page);
243203158cd7SNick Piggin 		page_cache_release(page);
243303158cd7SNick Piggin 		*pagep = NULL;
243403158cd7SNick Piggin 		return block_write_begin(file, mapping, pos, len, flags, pagep,
243503158cd7SNick Piggin 					fsdata, get_block);
243603158cd7SNick Piggin 	}
2437a4b0672dSNick Piggin 
24381da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
24391da177e4SLinus Torvalds 		return 0;
24401da177e4SLinus Torvalds 
2441a4b0672dSNick Piggin 	/*
2442a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2443a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2444a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2445a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2446a4b0672dSNick Piggin 	 *
2447a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2448a4b0672dSNick Piggin 	 * than the circular one we're used to.
2449a4b0672dSNick Piggin 	 */
2450a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
245103158cd7SNick Piggin 	if (!head) {
245203158cd7SNick Piggin 		ret = -ENOMEM;
245303158cd7SNick Piggin 		goto out_release;
245403158cd7SNick Piggin 	}
2455a4b0672dSNick Piggin 
24561da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
24571da177e4SLinus Torvalds 
24581da177e4SLinus Torvalds 	/*
24591da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
24601da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
24611da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
24621da177e4SLinus Torvalds 	 */
2463a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
24641da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2465a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
24661da177e4SLinus Torvalds 		int create;
24671da177e4SLinus Torvalds 
2468a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2469a4b0672dSNick Piggin 		bh->b_state = 0;
24701da177e4SLinus Torvalds 		create = 1;
24711da177e4SLinus Torvalds 		if (block_start >= to)
24721da177e4SLinus Torvalds 			create = 0;
24731da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2474a4b0672dSNick Piggin 					bh, create);
24751da177e4SLinus Torvalds 		if (ret)
24761da177e4SLinus Torvalds 			goto failed;
2477a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
24781da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2479a4b0672dSNick Piggin 		if (buffer_new(bh))
2480a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2481a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2482a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
24831da177e4SLinus Torvalds 			continue;
2484a4b0672dSNick Piggin 		}
2485a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2486eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2487eebd2aa3SChristoph Lameter 							to, block_end);
24881da177e4SLinus Torvalds 			continue;
24891da177e4SLinus Torvalds 		}
2490a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
24911da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
24921da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2493a4b0672dSNick Piggin 			lock_buffer(bh);
2494a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2495a4b0672dSNick Piggin 			submit_bh(READ, bh);
2496a4b0672dSNick Piggin 			nr_reads++;
24971da177e4SLinus Torvalds 		}
24981da177e4SLinus Torvalds 	}
24991da177e4SLinus Torvalds 
25001da177e4SLinus Torvalds 	if (nr_reads) {
25011da177e4SLinus Torvalds 		/*
25021da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
25031da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
25041da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
25051da177e4SLinus Torvalds 		 */
2506a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
25071da177e4SLinus Torvalds 			wait_on_buffer(bh);
25081da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
25091da177e4SLinus Torvalds 				ret = -EIO;
25101da177e4SLinus Torvalds 		}
25111da177e4SLinus Torvalds 		if (ret)
25121da177e4SLinus Torvalds 			goto failed;
25131da177e4SLinus Torvalds 	}
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds 	if (is_mapped_to_disk)
25161da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
25171da177e4SLinus Torvalds 
251803158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2519a4b0672dSNick Piggin 
25201da177e4SLinus Torvalds 	return 0;
25211da177e4SLinus Torvalds 
25221da177e4SLinus Torvalds failed:
252303158cd7SNick Piggin 	BUG_ON(!ret);
25241da177e4SLinus Torvalds 	/*
2525a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2526a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2527a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2528a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2529a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
25301da177e4SLinus Torvalds 	 */
253103158cd7SNick Piggin 	attach_nobh_buffers(page, head);
253203158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2533a4b0672dSNick Piggin 
253403158cd7SNick Piggin out_release:
253503158cd7SNick Piggin 	unlock_page(page);
253603158cd7SNick Piggin 	page_cache_release(page);
253703158cd7SNick Piggin 	*pagep = NULL;
2538a4b0672dSNick Piggin 
253903158cd7SNick Piggin 	if (pos + len > inode->i_size)
254003158cd7SNick Piggin 		vmtruncate(inode, inode->i_size);
2541a4b0672dSNick Piggin 
25421da177e4SLinus Torvalds 	return ret;
25431da177e4SLinus Torvalds }
254403158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
25451da177e4SLinus Torvalds 
254603158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
254703158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
254803158cd7SNick Piggin 			struct page *page, void *fsdata)
25491da177e4SLinus Torvalds {
25501da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2551efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
255203158cd7SNick Piggin 	struct buffer_head *bh;
25531da177e4SLinus Torvalds 
255403158cd7SNick Piggin 	if (!PageMappedToDisk(page)) {
255503158cd7SNick Piggin 		if (unlikely(copied < len) && !page_has_buffers(page))
255603158cd7SNick Piggin 			attach_nobh_buffers(page, head);
2557a4b0672dSNick Piggin 		if (page_has_buffers(page))
255803158cd7SNick Piggin 			return generic_write_end(file, mapping, pos, len,
255903158cd7SNick Piggin 						copied, page, fsdata);
256003158cd7SNick Piggin 	}
2561a4b0672dSNick Piggin 
256222c8ca78SNick Piggin 	SetPageUptodate(page);
25631da177e4SLinus Torvalds 	set_page_dirty(page);
256403158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
256503158cd7SNick Piggin 		i_size_write(inode, pos+copied);
25661da177e4SLinus Torvalds 		mark_inode_dirty(inode);
25671da177e4SLinus Torvalds 	}
256803158cd7SNick Piggin 
256903158cd7SNick Piggin 	unlock_page(page);
257003158cd7SNick Piggin 	page_cache_release(page);
257103158cd7SNick Piggin 
257203158cd7SNick Piggin 	while (head) {
257303158cd7SNick Piggin 		bh = head;
257403158cd7SNick Piggin 		head = head->b_this_page;
257503158cd7SNick Piggin 		free_buffer_head(bh);
25761da177e4SLinus Torvalds 	}
257703158cd7SNick Piggin 
257803158cd7SNick Piggin 	return copied;
257903158cd7SNick Piggin }
258003158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
25811da177e4SLinus Torvalds 
25821da177e4SLinus Torvalds /*
25831da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
25841da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
25851da177e4SLinus Torvalds  * the page.
25861da177e4SLinus Torvalds  */
25871da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
25881da177e4SLinus Torvalds 			struct writeback_control *wbc)
25891da177e4SLinus Torvalds {
25901da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
25911da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
25921da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
25931da177e4SLinus Torvalds 	unsigned offset;
25941da177e4SLinus Torvalds 	int ret;
25951da177e4SLinus Torvalds 
25961da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
25971da177e4SLinus Torvalds 	if (page->index < end_index)
25981da177e4SLinus Torvalds 		goto out;
25991da177e4SLinus Torvalds 
26001da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
26011da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
26021da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26031da177e4SLinus Torvalds 		/*
26041da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
26051da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
26061da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
26071da177e4SLinus Torvalds 		 */
26081da177e4SLinus Torvalds #if 0
26091da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
26101da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
26111da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
26121da177e4SLinus Torvalds #endif
26131da177e4SLinus Torvalds 		unlock_page(page);
26141da177e4SLinus Torvalds 		return 0; /* don't care */
26151da177e4SLinus Torvalds 	}
26161da177e4SLinus Torvalds 
26171da177e4SLinus Torvalds 	/*
26181da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26191da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
26201da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26211da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26221da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26231da177e4SLinus Torvalds 	 */
2624eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
26251da177e4SLinus Torvalds out:
26261da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
26271da177e4SLinus Torvalds 	if (ret == -EAGAIN)
26281da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
26291da177e4SLinus Torvalds 	return ret;
26301da177e4SLinus Torvalds }
26311da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
26321da177e4SLinus Torvalds 
263303158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
263403158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
26351da177e4SLinus Torvalds {
26361da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
26371da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
263803158cd7SNick Piggin 	unsigned blocksize;
263903158cd7SNick Piggin 	sector_t iblock;
264003158cd7SNick Piggin 	unsigned length, pos;
264103158cd7SNick Piggin 	struct inode *inode = mapping->host;
26421da177e4SLinus Torvalds 	struct page *page;
264303158cd7SNick Piggin 	struct buffer_head map_bh;
264403158cd7SNick Piggin 	int err;
26451da177e4SLinus Torvalds 
264603158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
264703158cd7SNick Piggin 	length = offset & (blocksize - 1);
26481da177e4SLinus Torvalds 
264903158cd7SNick Piggin 	/* Block boundary? Nothing to do */
265003158cd7SNick Piggin 	if (!length)
265103158cd7SNick Piggin 		return 0;
265203158cd7SNick Piggin 
265303158cd7SNick Piggin 	length = blocksize - length;
265403158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
265503158cd7SNick Piggin 
26561da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
265703158cd7SNick Piggin 	err = -ENOMEM;
26581da177e4SLinus Torvalds 	if (!page)
26591da177e4SLinus Torvalds 		goto out;
26601da177e4SLinus Torvalds 
266103158cd7SNick Piggin 	if (page_has_buffers(page)) {
266203158cd7SNick Piggin has_buffers:
266303158cd7SNick Piggin 		unlock_page(page);
266403158cd7SNick Piggin 		page_cache_release(page);
266503158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
26661da177e4SLinus Torvalds 	}
266703158cd7SNick Piggin 
266803158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
266903158cd7SNick Piggin 	pos = blocksize;
267003158cd7SNick Piggin 	while (offset >= pos) {
267103158cd7SNick Piggin 		iblock++;
267203158cd7SNick Piggin 		pos += blocksize;
267303158cd7SNick Piggin 	}
267403158cd7SNick Piggin 
267503158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
267603158cd7SNick Piggin 	if (err)
267703158cd7SNick Piggin 		goto unlock;
267803158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
267903158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
268003158cd7SNick Piggin 		goto unlock;
268103158cd7SNick Piggin 
268203158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
268303158cd7SNick Piggin 	if (!PageUptodate(page)) {
268403158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
268503158cd7SNick Piggin 		if (err) {
268603158cd7SNick Piggin 			page_cache_release(page);
268703158cd7SNick Piggin 			goto out;
268803158cd7SNick Piggin 		}
268903158cd7SNick Piggin 		lock_page(page);
269003158cd7SNick Piggin 		if (!PageUptodate(page)) {
269103158cd7SNick Piggin 			err = -EIO;
269203158cd7SNick Piggin 			goto unlock;
269303158cd7SNick Piggin 		}
269403158cd7SNick Piggin 		if (page_has_buffers(page))
269503158cd7SNick Piggin 			goto has_buffers;
269603158cd7SNick Piggin 	}
2697eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
269803158cd7SNick Piggin 	set_page_dirty(page);
269903158cd7SNick Piggin 	err = 0;
270003158cd7SNick Piggin 
270103158cd7SNick Piggin unlock:
27021da177e4SLinus Torvalds 	unlock_page(page);
27031da177e4SLinus Torvalds 	page_cache_release(page);
27041da177e4SLinus Torvalds out:
270503158cd7SNick Piggin 	return err;
27061da177e4SLinus Torvalds }
27071da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
27081da177e4SLinus Torvalds 
27091da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
27101da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
27111da177e4SLinus Torvalds {
27121da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27131da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
27141da177e4SLinus Torvalds 	unsigned blocksize;
271554b21a79SAndrew Morton 	sector_t iblock;
27161da177e4SLinus Torvalds 	unsigned length, pos;
27171da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27181da177e4SLinus Torvalds 	struct page *page;
27191da177e4SLinus Torvalds 	struct buffer_head *bh;
27201da177e4SLinus Torvalds 	int err;
27211da177e4SLinus Torvalds 
27221da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
27231da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
27241da177e4SLinus Torvalds 
27251da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
27261da177e4SLinus Torvalds 	if (!length)
27271da177e4SLinus Torvalds 		return 0;
27281da177e4SLinus Torvalds 
27291da177e4SLinus Torvalds 	length = blocksize - length;
273054b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
27311da177e4SLinus Torvalds 
27321da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
27331da177e4SLinus Torvalds 	err = -ENOMEM;
27341da177e4SLinus Torvalds 	if (!page)
27351da177e4SLinus Torvalds 		goto out;
27361da177e4SLinus Torvalds 
27371da177e4SLinus Torvalds 	if (!page_has_buffers(page))
27381da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
27391da177e4SLinus Torvalds 
27401da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
27411da177e4SLinus Torvalds 	bh = page_buffers(page);
27421da177e4SLinus Torvalds 	pos = blocksize;
27431da177e4SLinus Torvalds 	while (offset >= pos) {
27441da177e4SLinus Torvalds 		bh = bh->b_this_page;
27451da177e4SLinus Torvalds 		iblock++;
27461da177e4SLinus Torvalds 		pos += blocksize;
27471da177e4SLinus Torvalds 	}
27481da177e4SLinus Torvalds 
27491da177e4SLinus Torvalds 	err = 0;
27501da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2751b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
27521da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
27531da177e4SLinus Torvalds 		if (err)
27541da177e4SLinus Torvalds 			goto unlock;
27551da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
27561da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
27571da177e4SLinus Torvalds 			goto unlock;
27581da177e4SLinus Torvalds 	}
27591da177e4SLinus Torvalds 
27601da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
27611da177e4SLinus Torvalds 	if (PageUptodate(page))
27621da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
27631da177e4SLinus Torvalds 
276433a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
27651da177e4SLinus Torvalds 		err = -EIO;
27661da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
27671da177e4SLinus Torvalds 		wait_on_buffer(bh);
27681da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
27691da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
27701da177e4SLinus Torvalds 			goto unlock;
27711da177e4SLinus Torvalds 	}
27721da177e4SLinus Torvalds 
2773eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
27741da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
27751da177e4SLinus Torvalds 	err = 0;
27761da177e4SLinus Torvalds 
27771da177e4SLinus Torvalds unlock:
27781da177e4SLinus Torvalds 	unlock_page(page);
27791da177e4SLinus Torvalds 	page_cache_release(page);
27801da177e4SLinus Torvalds out:
27811da177e4SLinus Torvalds 	return err;
27821da177e4SLinus Torvalds }
27831da177e4SLinus Torvalds 
27841da177e4SLinus Torvalds /*
27851da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
27861da177e4SLinus Torvalds  */
27871da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
27881da177e4SLinus Torvalds 			struct writeback_control *wbc)
27891da177e4SLinus Torvalds {
27901da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
27911da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27921da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
27931da177e4SLinus Torvalds 	unsigned offset;
27941da177e4SLinus Torvalds 
27951da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
27961da177e4SLinus Torvalds 	if (page->index < end_index)
27971da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
27981da177e4SLinus Torvalds 
27991da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
28001da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
28011da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
28021da177e4SLinus Torvalds 		/*
28031da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
28041da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
28051da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
28061da177e4SLinus Torvalds 		 */
2807aaa4059bSJan Kara 		do_invalidatepage(page, 0);
28081da177e4SLinus Torvalds 		unlock_page(page);
28091da177e4SLinus Torvalds 		return 0; /* don't care */
28101da177e4SLinus Torvalds 	}
28111da177e4SLinus Torvalds 
28121da177e4SLinus Torvalds 	/*
28131da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
28141da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
28151da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
28161da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
28171da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
28181da177e4SLinus Torvalds 	 */
2819eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
28201da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
28211da177e4SLinus Torvalds }
28221da177e4SLinus Torvalds 
28231da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
28241da177e4SLinus Torvalds 			    get_block_t *get_block)
28251da177e4SLinus Torvalds {
28261da177e4SLinus Torvalds 	struct buffer_head tmp;
28271da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
28281da177e4SLinus Torvalds 	tmp.b_state = 0;
28291da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2830b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
28311da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
28321da177e4SLinus Torvalds 	return tmp.b_blocknr;
28331da177e4SLinus Torvalds }
28341da177e4SLinus Torvalds 
28356712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
28361da177e4SLinus Torvalds {
28371da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
28381da177e4SLinus Torvalds 
28391da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
28401da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
28411da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
28421da177e4SLinus Torvalds 	}
28431da177e4SLinus Torvalds 
28441da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
28451da177e4SLinus Torvalds 	bio_put(bio);
28461da177e4SLinus Torvalds }
28471da177e4SLinus Torvalds 
28481da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
28491da177e4SLinus Torvalds {
28501da177e4SLinus Torvalds 	struct bio *bio;
28511da177e4SLinus Torvalds 	int ret = 0;
28521da177e4SLinus Torvalds 
28531da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
28541da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
28551da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
28561da177e4SLinus Torvalds 
28571da177e4SLinus Torvalds 	if (buffer_ordered(bh) && (rw == WRITE))
28581da177e4SLinus Torvalds 		rw = WRITE_BARRIER;
28591da177e4SLinus Torvalds 
28601da177e4SLinus Torvalds 	/*
28611da177e4SLinus Torvalds 	 * Only clear out a write error when rewriting, should this
28621da177e4SLinus Torvalds 	 * include WRITE_SYNC as well?
28631da177e4SLinus Torvalds 	 */
28641da177e4SLinus Torvalds 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
28651da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
28661da177e4SLinus Torvalds 
28671da177e4SLinus Torvalds 	/*
28681da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
28691da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
28701da177e4SLinus Torvalds 	 */
28711da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
28721da177e4SLinus Torvalds 
28731da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
28741da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
28751da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
28761da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
28771da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
28781da177e4SLinus Torvalds 
28791da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
28801da177e4SLinus Torvalds 	bio->bi_idx = 0;
28811da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
28821da177e4SLinus Torvalds 
28831da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
28841da177e4SLinus Torvalds 	bio->bi_private = bh;
28851da177e4SLinus Torvalds 
28861da177e4SLinus Torvalds 	bio_get(bio);
28871da177e4SLinus Torvalds 	submit_bio(rw, bio);
28881da177e4SLinus Torvalds 
28891da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
28901da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds 	bio_put(bio);
28931da177e4SLinus Torvalds 	return ret;
28941da177e4SLinus Torvalds }
28951da177e4SLinus Torvalds 
28961da177e4SLinus Torvalds /**
28971da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2898a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
28991da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
29001da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
29011da177e4SLinus Torvalds  *
2902a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2903a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2904a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2905a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2906a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
29071da177e4SLinus Torvalds  *
29081da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2909a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2910a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2911a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2912a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2913a7662236SJan Kara  * actually clean until the buffer gets unlocked).
29141da177e4SLinus Torvalds  *
29151da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
29161da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
29171da177e4SLinus Torvalds  * any waiters.
29181da177e4SLinus Torvalds  *
29191da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
29201da177e4SLinus Torvalds  * multiple of the current approved size for the device.
29211da177e4SLinus Torvalds  */
29221da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
29231da177e4SLinus Torvalds {
29241da177e4SLinus Torvalds 	int i;
29251da177e4SLinus Torvalds 
29261da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
29271da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
29281da177e4SLinus Torvalds 
2929a7662236SJan Kara 		if (rw == SWRITE)
2930a7662236SJan Kara 			lock_buffer(bh);
2931a7662236SJan Kara 		else if (test_set_buffer_locked(bh))
29321da177e4SLinus Torvalds 			continue;
29331da177e4SLinus Torvalds 
2934a7662236SJan Kara 		if (rw == WRITE || rw == SWRITE) {
29351da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
293676c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2937e60e5c50SOGAWA Hirofumi 				get_bh(bh);
29381da177e4SLinus Torvalds 				submit_bh(WRITE, bh);
29391da177e4SLinus Torvalds 				continue;
29401da177e4SLinus Torvalds 			}
29411da177e4SLinus Torvalds 		} else {
29421da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
294376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2944e60e5c50SOGAWA Hirofumi 				get_bh(bh);
29451da177e4SLinus Torvalds 				submit_bh(rw, bh);
29461da177e4SLinus Torvalds 				continue;
29471da177e4SLinus Torvalds 			}
29481da177e4SLinus Torvalds 		}
29491da177e4SLinus Torvalds 		unlock_buffer(bh);
29501da177e4SLinus Torvalds 	}
29511da177e4SLinus Torvalds }
29521da177e4SLinus Torvalds 
29531da177e4SLinus Torvalds /*
29541da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
29551da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
29561da177e4SLinus Torvalds  * the buffer_head.
29571da177e4SLinus Torvalds  */
29581da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
29591da177e4SLinus Torvalds {
29601da177e4SLinus Torvalds 	int ret = 0;
29611da177e4SLinus Torvalds 
29621da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
29631da177e4SLinus Torvalds 	lock_buffer(bh);
29641da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
29651da177e4SLinus Torvalds 		get_bh(bh);
29661da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
29671da177e4SLinus Torvalds 		ret = submit_bh(WRITE, bh);
29681da177e4SLinus Torvalds 		wait_on_buffer(bh);
29691da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
29701da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
29711da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
29721da177e4SLinus Torvalds 		}
29731da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
29741da177e4SLinus Torvalds 			ret = -EIO;
29751da177e4SLinus Torvalds 	} else {
29761da177e4SLinus Torvalds 		unlock_buffer(bh);
29771da177e4SLinus Torvalds 	}
29781da177e4SLinus Torvalds 	return ret;
29791da177e4SLinus Torvalds }
29801da177e4SLinus Torvalds 
29811da177e4SLinus Torvalds /*
29821da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
29831da177e4SLinus Torvalds  * are unused, and releases them if so.
29841da177e4SLinus Torvalds  *
29851da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
29861da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
29871da177e4SLinus Torvalds  *
29881da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
29891da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
29901da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
29911da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
29921da177e4SLinus Torvalds  * filesystem data on the same device.
29931da177e4SLinus Torvalds  *
29941da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
29951da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
29961da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
29971da177e4SLinus Torvalds  * private_lock.
29981da177e4SLinus Torvalds  *
29991da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
30001da177e4SLinus Torvalds  */
30011da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
30021da177e4SLinus Torvalds {
30031da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
30041da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
30051da177e4SLinus Torvalds }
30061da177e4SLinus Torvalds 
30071da177e4SLinus Torvalds static int
30081da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
30091da177e4SLinus Torvalds {
30101da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
30111da177e4SLinus Torvalds 	struct buffer_head *bh;
30121da177e4SLinus Torvalds 
30131da177e4SLinus Torvalds 	bh = head;
30141da177e4SLinus Torvalds 	do {
3015de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
30161da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
30171da177e4SLinus Torvalds 		if (buffer_busy(bh))
30181da177e4SLinus Torvalds 			goto failed;
30191da177e4SLinus Torvalds 		bh = bh->b_this_page;
30201da177e4SLinus Torvalds 	} while (bh != head);
30211da177e4SLinus Torvalds 
30221da177e4SLinus Torvalds 	do {
30231da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
30241da177e4SLinus Torvalds 
30251da177e4SLinus Torvalds 		if (!list_empty(&bh->b_assoc_buffers))
30261da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
30271da177e4SLinus Torvalds 		bh = next;
30281da177e4SLinus Torvalds 	} while (bh != head);
30291da177e4SLinus Torvalds 	*buffers_to_free = head;
30301da177e4SLinus Torvalds 	__clear_page_buffers(page);
30311da177e4SLinus Torvalds 	return 1;
30321da177e4SLinus Torvalds failed:
30331da177e4SLinus Torvalds 	return 0;
30341da177e4SLinus Torvalds }
30351da177e4SLinus Torvalds 
30361da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
30371da177e4SLinus Torvalds {
30381da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
30391da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
30401da177e4SLinus Torvalds 	int ret = 0;
30411da177e4SLinus Torvalds 
30421da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3043ecdfc978SLinus Torvalds 	if (PageWriteback(page))
30441da177e4SLinus Torvalds 		return 0;
30451da177e4SLinus Torvalds 
30461da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
30471da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
30481da177e4SLinus Torvalds 		goto out;
30491da177e4SLinus Torvalds 	}
30501da177e4SLinus Torvalds 
30511da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
30521da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3053ecdfc978SLinus Torvalds 
3054ecdfc978SLinus Torvalds 	/*
3055ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3056ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3057ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3058ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3059ecdfc978SLinus Torvalds 	 *
3060ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3061ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3062ecdfc978SLinus Torvalds 	 * the page also.
306387df7241SNick Piggin 	 *
306487df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
306587df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
306687df7241SNick Piggin 	 * dirty bit from being lost.
3067ecdfc978SLinus Torvalds 	 */
3068ecdfc978SLinus Torvalds 	if (ret)
3069ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
307087df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
30711da177e4SLinus Torvalds out:
30721da177e4SLinus Torvalds 	if (buffers_to_free) {
30731da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
30741da177e4SLinus Torvalds 
30751da177e4SLinus Torvalds 		do {
30761da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
30771da177e4SLinus Torvalds 			free_buffer_head(bh);
30781da177e4SLinus Torvalds 			bh = next;
30791da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
30801da177e4SLinus Torvalds 	}
30811da177e4SLinus Torvalds 	return ret;
30821da177e4SLinus Torvalds }
30831da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
30841da177e4SLinus Torvalds 
30853978d717SNeilBrown void block_sync_page(struct page *page)
30861da177e4SLinus Torvalds {
30871da177e4SLinus Torvalds 	struct address_space *mapping;
30881da177e4SLinus Torvalds 
30891da177e4SLinus Torvalds 	smp_mb();
30901da177e4SLinus Torvalds 	mapping = page_mapping(page);
30911da177e4SLinus Torvalds 	if (mapping)
30921da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
30931da177e4SLinus Torvalds }
30941da177e4SLinus Torvalds 
30951da177e4SLinus Torvalds /*
30961da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
30971da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
30981da177e4SLinus Torvalds  *
30991da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
31001da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
31011da177e4SLinus Torvalds  */
31021da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data)
31031da177e4SLinus Torvalds {
31041da177e4SLinus Torvalds 	static int msg_count;
31051da177e4SLinus Torvalds 
31061da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
31071da177e4SLinus Torvalds 		return -EPERM;
31081da177e4SLinus Torvalds 
31091da177e4SLinus Torvalds 	if (msg_count < 5) {
31101da177e4SLinus Torvalds 		msg_count++;
31111da177e4SLinus Torvalds 		printk(KERN_INFO
31121da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
31131da177e4SLinus Torvalds 			" system call\n", current->comm);
31141da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
31151da177e4SLinus Torvalds 	}
31161da177e4SLinus Torvalds 
31171da177e4SLinus Torvalds 	if (func == 1)
31181da177e4SLinus Torvalds 		do_exit(0);
31191da177e4SLinus Torvalds 	return 0;
31201da177e4SLinus Torvalds }
31211da177e4SLinus Torvalds 
31221da177e4SLinus Torvalds /*
31231da177e4SLinus Torvalds  * Buffer-head allocation
31241da177e4SLinus Torvalds  */
3125e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
31261da177e4SLinus Torvalds 
31271da177e4SLinus Torvalds /*
31281da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
31291da177e4SLinus Torvalds  * stripping them in writeback.
31301da177e4SLinus Torvalds  */
31311da177e4SLinus Torvalds static int max_buffer_heads;
31321da177e4SLinus Torvalds 
31331da177e4SLinus Torvalds int buffer_heads_over_limit;
31341da177e4SLinus Torvalds 
31351da177e4SLinus Torvalds struct bh_accounting {
31361da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
31371da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
31381da177e4SLinus Torvalds };
31391da177e4SLinus Torvalds 
31401da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
31411da177e4SLinus Torvalds 
31421da177e4SLinus Torvalds static void recalc_bh_state(void)
31431da177e4SLinus Torvalds {
31441da177e4SLinus Torvalds 	int i;
31451da177e4SLinus Torvalds 	int tot = 0;
31461da177e4SLinus Torvalds 
31471da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
31481da177e4SLinus Torvalds 		return;
31491da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
31508a143426SEric Dumazet 	for_each_online_cpu(i)
31511da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
31521da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
31531da177e4SLinus Torvalds }
31541da177e4SLinus Torvalds 
3155dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
31561da177e4SLinus Torvalds {
3157b98938c3SChristoph Lameter 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3158e12ba74dSMel Gorman 				set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
31591da177e4SLinus Torvalds 	if (ret) {
3160a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3161736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
31621da177e4SLinus Torvalds 		recalc_bh_state();
3163736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
31641da177e4SLinus Torvalds 	}
31651da177e4SLinus Torvalds 	return ret;
31661da177e4SLinus Torvalds }
31671da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
31681da177e4SLinus Torvalds 
31691da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
31701da177e4SLinus Torvalds {
31711da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
31721da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3173736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
31741da177e4SLinus Torvalds 	recalc_bh_state();
3175736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
31761da177e4SLinus Torvalds }
31771da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
31781da177e4SLinus Torvalds 
31791da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
31801da177e4SLinus Torvalds {
31811da177e4SLinus Torvalds 	int i;
31821da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
31831da177e4SLinus Torvalds 
31841da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
31851da177e4SLinus Torvalds 		brelse(b->bhs[i]);
31861da177e4SLinus Torvalds 		b->bhs[i] = NULL;
31871da177e4SLinus Torvalds 	}
31888a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
31898a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
31908a143426SEric Dumazet 	put_cpu_var(bh_accounting);
31911da177e4SLinus Torvalds }
31921da177e4SLinus Torvalds 
31931da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
31941da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
31951da177e4SLinus Torvalds {
31968bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
31971da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
31981da177e4SLinus Torvalds 	return NOTIFY_OK;
31991da177e4SLinus Torvalds }
32001da177e4SLinus Torvalds 
3201389d1b08SAneesh Kumar K.V /**
3202389d1b08SAneesh Kumar K.V  * bh_uptodate_or_lock: Test whether the buffer is uptodate
3203389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3204389d1b08SAneesh Kumar K.V  *
3205389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3206389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3207389d1b08SAneesh Kumar K.V  */
3208389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3209389d1b08SAneesh Kumar K.V {
3210389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3211389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3212389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3213389d1b08SAneesh Kumar K.V 			return 0;
3214389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3215389d1b08SAneesh Kumar K.V 	}
3216389d1b08SAneesh Kumar K.V 	return 1;
3217389d1b08SAneesh Kumar K.V }
3218389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3219389d1b08SAneesh Kumar K.V 
3220389d1b08SAneesh Kumar K.V /**
3221389d1b08SAneesh Kumar K.V  * bh_submit_read: Submit a locked buffer for reading
3222389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3223389d1b08SAneesh Kumar K.V  *
3224389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3225389d1b08SAneesh Kumar K.V  */
3226389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3227389d1b08SAneesh Kumar K.V {
3228389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3229389d1b08SAneesh Kumar K.V 
3230389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3231389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3232389d1b08SAneesh Kumar K.V 		return 0;
3233389d1b08SAneesh Kumar K.V 	}
3234389d1b08SAneesh Kumar K.V 
3235389d1b08SAneesh Kumar K.V 	get_bh(bh);
3236389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3237389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3238389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3239389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3240389d1b08SAneesh Kumar K.V 		return 0;
3241389d1b08SAneesh Kumar K.V 	return -EIO;
3242389d1b08SAneesh Kumar K.V }
3243389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3244389d1b08SAneesh Kumar K.V 
3245b98938c3SChristoph Lameter static void
3246b98938c3SChristoph Lameter init_buffer_head(struct kmem_cache *cachep, void *data)
3247b98938c3SChristoph Lameter {
3248b98938c3SChristoph Lameter 	struct buffer_head *bh = data;
3249b98938c3SChristoph Lameter 
3250b98938c3SChristoph Lameter 	memset(bh, 0, sizeof(*bh));
3251b98938c3SChristoph Lameter 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3252b98938c3SChristoph Lameter }
3253b98938c3SChristoph Lameter 
32541da177e4SLinus Torvalds void __init buffer_init(void)
32551da177e4SLinus Torvalds {
32561da177e4SLinus Torvalds 	int nrpages;
32571da177e4SLinus Torvalds 
3258b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3259b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3260b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3261b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3262b98938c3SChristoph Lameter 				init_buffer_head);
32631da177e4SLinus Torvalds 
32641da177e4SLinus Torvalds 	/*
32651da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
32661da177e4SLinus Torvalds 	 */
32671da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
32681da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
32691da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
32701da177e4SLinus Torvalds }
32711da177e4SLinus Torvalds 
32721da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
32731da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
32741da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
32751da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
32761da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
327754171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite);
32781da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
32791da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
32801da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
32811da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
328289e10787SNick Piggin EXPORT_SYMBOL(cont_write_begin);
32831da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
32841da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
32851da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
32861da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
32871da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
32881da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write);
328905eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
32901da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
32911da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
32921da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
32931da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
32941da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
32951da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
32961da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3297