xref: /linux/fs/buffer.c (revision ea125892a17f43919c726777ed1e4929d41e7984)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds void fastcall __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
771da177e4SLinus Torvalds void fastcall unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7972ed3d03SNick Piggin 	smp_mb__before_clear_bit();
801da177e4SLinus Torvalds 	clear_buffer_locked(bh);
811da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
821da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /*
861da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
871da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
881da177e4SLinus Torvalds  * if you want to preserve its state.
891da177e4SLinus Torvalds  */
901da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
911da177e4SLinus Torvalds {
921da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
931da177e4SLinus Torvalds }
941da177e4SLinus Torvalds 
951da177e4SLinus Torvalds static void
961da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
971da177e4SLinus Torvalds {
981da177e4SLinus Torvalds 	ClearPagePrivate(page);
994c21e2f2SHugh Dickins 	set_page_private(page, 0);
1001da177e4SLinus Torvalds 	page_cache_release(page);
1011da177e4SLinus Torvalds }
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1041da177e4SLinus Torvalds {
1051da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1081da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1091da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1101da177e4SLinus Torvalds }
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds /*
1131da177e4SLinus Torvalds  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
1141da177e4SLinus Torvalds  * unlock the buffer. This is what ll_rw_block uses too.
1151da177e4SLinus Torvalds  */
1161da177e4SLinus Torvalds void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
1171da177e4SLinus Torvalds {
1181da177e4SLinus Torvalds 	if (uptodate) {
1191da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1201da177e4SLinus Torvalds 	} else {
1211da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1221da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1231da177e4SLinus Torvalds 	}
1241da177e4SLinus Torvalds 	unlock_buffer(bh);
1251da177e4SLinus Torvalds 	put_bh(bh);
1261da177e4SLinus Torvalds }
1271da177e4SLinus Torvalds 
1281da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1291da177e4SLinus Torvalds {
1301da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1311da177e4SLinus Torvalds 
1321da177e4SLinus Torvalds 	if (uptodate) {
1331da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1341da177e4SLinus Torvalds 	} else {
1351da177e4SLinus Torvalds 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1361da177e4SLinus Torvalds 			buffer_io_error(bh);
1371da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1381da177e4SLinus Torvalds 					"I/O error on %s\n",
1391da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1401da177e4SLinus Torvalds 		}
1411da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1421da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1431da177e4SLinus Torvalds 	}
1441da177e4SLinus Torvalds 	unlock_buffer(bh);
1451da177e4SLinus Torvalds 	put_bh(bh);
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds /*
1491da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1501da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1511da177e4SLinus Torvalds  */
1521da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1531da177e4SLinus Torvalds {
1541da177e4SLinus Torvalds 	int ret = 0;
1551da177e4SLinus Torvalds 
15628fd1298SOGAWA Hirofumi 	if (bdev)
15728fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1581da177e4SLinus Torvalds 	return ret;
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds /*
1631da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1641da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1651da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1661da177e4SLinus Torvalds  */
1671da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1681da177e4SLinus Torvalds {
1691da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1701da177e4SLinus Torvalds 	if (sb) {
1711da177e4SLinus Torvalds 		int res = fsync_super(sb);
1721da177e4SLinus Torvalds 		drop_super(sb);
1731da177e4SLinus Torvalds 		return res;
1741da177e4SLinus Torvalds 	}
1751da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1761da177e4SLinus Torvalds }
1771da177e4SLinus Torvalds 
1781da177e4SLinus Torvalds /**
1791da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
1801da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
1811da177e4SLinus Torvalds  *
182f73ca1b7SDavid Chinner  * This takes the block device bd_mount_sem to make sure no new mounts
1831da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
1841da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
1851da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
1861da177e4SLinus Torvalds  */
1871da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
1881da177e4SLinus Torvalds {
1891da177e4SLinus Torvalds 	struct super_block *sb;
1901da177e4SLinus Torvalds 
191f73ca1b7SDavid Chinner 	down(&bdev->bd_mount_sem);
1921da177e4SLinus Torvalds 	sb = get_super(bdev);
1931da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
1941da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
195d59dd462Sakpm@osdl.org 		smp_wmb();
1961da177e4SLinus Torvalds 
197d25b9a1fSOGAWA Hirofumi 		__fsync_super(sb);
1981da177e4SLinus Torvalds 
1991da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
200d59dd462Sakpm@osdl.org 		smp_wmb();
2011da177e4SLinus Torvalds 
2021da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds 		if (sb->s_op->write_super_lockfs)
2051da177e4SLinus Torvalds 			sb->s_op->write_super_lockfs(sb);
2061da177e4SLinus Torvalds 	}
2071da177e4SLinus Torvalds 
2081da177e4SLinus Torvalds 	sync_blockdev(bdev);
2091da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2101da177e4SLinus Torvalds }
2111da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2121da177e4SLinus Torvalds 
2131da177e4SLinus Torvalds /**
2141da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2151da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2161da177e4SLinus Torvalds  * @sb:		associated superblock
2171da177e4SLinus Torvalds  *
2181da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2191da177e4SLinus Torvalds  */
2201da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb)
2211da177e4SLinus Torvalds {
2221da177e4SLinus Torvalds 	if (sb) {
2231da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
2241da177e4SLinus Torvalds 
2251da177e4SLinus Torvalds 		if (sb->s_op->unlockfs)
2261da177e4SLinus Torvalds 			sb->s_op->unlockfs(sb);
2271da177e4SLinus Torvalds 		sb->s_frozen = SB_UNFROZEN;
228d59dd462Sakpm@osdl.org 		smp_wmb();
2291da177e4SLinus Torvalds 		wake_up(&sb->s_wait_unfrozen);
2301da177e4SLinus Torvalds 		drop_super(sb);
2311da177e4SLinus Torvalds 	}
2321da177e4SLinus Torvalds 
233f73ca1b7SDavid Chinner 	up(&bdev->bd_mount_sem);
2341da177e4SLinus Torvalds }
2351da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
2361da177e4SLinus Torvalds 
2371da177e4SLinus Torvalds /*
2381da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
2391da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
2401da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
2411da177e4SLinus Torvalds  * private_lock.
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
2441da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
2451da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
2461da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
2471da177e4SLinus Torvalds  */
2481da177e4SLinus Torvalds static struct buffer_head *
249385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
2501da177e4SLinus Torvalds {
2511da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
2521da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
2531da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
2541da177e4SLinus Torvalds 	pgoff_t index;
2551da177e4SLinus Torvalds 	struct buffer_head *bh;
2561da177e4SLinus Torvalds 	struct buffer_head *head;
2571da177e4SLinus Torvalds 	struct page *page;
2581da177e4SLinus Torvalds 	int all_mapped = 1;
2591da177e4SLinus Torvalds 
2601da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
2611da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
2621da177e4SLinus Torvalds 	if (!page)
2631da177e4SLinus Torvalds 		goto out;
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2661da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2671da177e4SLinus Torvalds 		goto out_unlock;
2681da177e4SLinus Torvalds 	head = page_buffers(page);
2691da177e4SLinus Torvalds 	bh = head;
2701da177e4SLinus Torvalds 	do {
2711da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
2721da177e4SLinus Torvalds 			ret = bh;
2731da177e4SLinus Torvalds 			get_bh(bh);
2741da177e4SLinus Torvalds 			goto out_unlock;
2751da177e4SLinus Torvalds 		}
2761da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
2771da177e4SLinus Torvalds 			all_mapped = 0;
2781da177e4SLinus Torvalds 		bh = bh->b_this_page;
2791da177e4SLinus Torvalds 	} while (bh != head);
2801da177e4SLinus Torvalds 
2811da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2821da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2831da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2841da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2851da177e4SLinus Torvalds 	 */
2861da177e4SLinus Torvalds 	if (all_mapped) {
2871da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
2881da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
289205f87f6SBadari Pulavarty 			(unsigned long long)block,
290205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
291205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
292205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
2931da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
2941da177e4SLinus Torvalds 	}
2951da177e4SLinus Torvalds out_unlock:
2961da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
2971da177e4SLinus Torvalds 	page_cache_release(page);
2981da177e4SLinus Torvalds out:
2991da177e4SLinus Torvalds 	return ret;
3001da177e4SLinus Torvalds }
3011da177e4SLinus Torvalds 
3021da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
3031da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
3041da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
3051da177e4SLinus Torvalds    by the user.
3061da177e4SLinus Torvalds 
3071da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
3081da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
3091da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
3121da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
3131da177e4SLinus Torvalds 
3141da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
3151da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
3161da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
3171da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
3181da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
3191da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
3201da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
3211da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
3221da177e4SLinus Torvalds 
3231da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
3241da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
3251da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
3281da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
3291da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
3301da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
3311da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
3321da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
3331da177e4SLinus Torvalds    pass does the actual I/O. */
334f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
3351da177e4SLinus Torvalds {
3360e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3370e1dfc66SAndrew Morton 
3380e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
3390e1dfc66SAndrew Morton 		return;
3400e1dfc66SAndrew Morton 
3411da177e4SLinus Torvalds 	invalidate_bh_lrus();
342fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
3431da177e4SLinus Torvalds }
3441da177e4SLinus Torvalds 
3451da177e4SLinus Torvalds /*
3461da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
3471da177e4SLinus Torvalds  */
3481da177e4SLinus Torvalds static void free_more_memory(void)
3491da177e4SLinus Torvalds {
3501da177e4SLinus Torvalds 	struct zone **zones;
3511da177e4SLinus Torvalds 	pg_data_t *pgdat;
3521da177e4SLinus Torvalds 
353687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
3541da177e4SLinus Torvalds 	yield();
3551da177e4SLinus Torvalds 
356ec936fc5SKAMEZAWA Hiroyuki 	for_each_online_pgdat(pgdat) {
357af4ca457SAl Viro 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
3581da177e4SLinus Torvalds 		if (*zones)
3591ad539b2SDarren Hart 			try_to_free_pages(zones, GFP_NOFS);
3601da177e4SLinus Torvalds 	}
3611da177e4SLinus Torvalds }
3621da177e4SLinus Torvalds 
3631da177e4SLinus Torvalds /*
3641da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
3651da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3661da177e4SLinus Torvalds  */
3671da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3681da177e4SLinus Torvalds {
3691da177e4SLinus Torvalds 	unsigned long flags;
370a3972203SNick Piggin 	struct buffer_head *first;
3711da177e4SLinus Torvalds 	struct buffer_head *tmp;
3721da177e4SLinus Torvalds 	struct page *page;
3731da177e4SLinus Torvalds 	int page_uptodate = 1;
3741da177e4SLinus Torvalds 
3751da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3761da177e4SLinus Torvalds 
3771da177e4SLinus Torvalds 	page = bh->b_page;
3781da177e4SLinus Torvalds 	if (uptodate) {
3791da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3801da177e4SLinus Torvalds 	} else {
3811da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3821da177e4SLinus Torvalds 		if (printk_ratelimit())
3831da177e4SLinus Torvalds 			buffer_io_error(bh);
3841da177e4SLinus Torvalds 		SetPageError(page);
3851da177e4SLinus Torvalds 	}
3861da177e4SLinus Torvalds 
3871da177e4SLinus Torvalds 	/*
3881da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
3891da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
3901da177e4SLinus Torvalds 	 * decide that the page is now completely done.
3911da177e4SLinus Torvalds 	 */
392a3972203SNick Piggin 	first = page_buffers(page);
393a3972203SNick Piggin 	local_irq_save(flags);
394a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
3951da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
3961da177e4SLinus Torvalds 	unlock_buffer(bh);
3971da177e4SLinus Torvalds 	tmp = bh;
3981da177e4SLinus Torvalds 	do {
3991da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
4001da177e4SLinus Torvalds 			page_uptodate = 0;
4011da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
4021da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4031da177e4SLinus Torvalds 			goto still_busy;
4041da177e4SLinus Torvalds 		}
4051da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4061da177e4SLinus Torvalds 	} while (tmp != bh);
407a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408a3972203SNick Piggin 	local_irq_restore(flags);
4091da177e4SLinus Torvalds 
4101da177e4SLinus Torvalds 	/*
4111da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
4121da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
4131da177e4SLinus Torvalds 	 */
4141da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
4151da177e4SLinus Torvalds 		SetPageUptodate(page);
4161da177e4SLinus Torvalds 	unlock_page(page);
4171da177e4SLinus Torvalds 	return;
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds still_busy:
420a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421a3972203SNick Piggin 	local_irq_restore(flags);
4221da177e4SLinus Torvalds 	return;
4231da177e4SLinus Torvalds }
4241da177e4SLinus Torvalds 
4251da177e4SLinus Torvalds /*
4261da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
4271da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
4281da177e4SLinus Torvalds  */
429b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
4301da177e4SLinus Torvalds {
4311da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
4321da177e4SLinus Torvalds 	unsigned long flags;
433a3972203SNick Piggin 	struct buffer_head *first;
4341da177e4SLinus Torvalds 	struct buffer_head *tmp;
4351da177e4SLinus Torvalds 	struct page *page;
4361da177e4SLinus Torvalds 
4371da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 	page = bh->b_page;
4401da177e4SLinus Torvalds 	if (uptodate) {
4411da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
4421da177e4SLinus Torvalds 	} else {
4431da177e4SLinus Torvalds 		if (printk_ratelimit()) {
4441da177e4SLinus Torvalds 			buffer_io_error(bh);
4451da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
4461da177e4SLinus Torvalds 					"I/O error on %s\n",
4471da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
4481da177e4SLinus Torvalds 		}
4491da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
45058ff407bSJan Kara 		set_buffer_write_io_error(bh);
4511da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
4521da177e4SLinus Torvalds 		SetPageError(page);
4531da177e4SLinus Torvalds 	}
4541da177e4SLinus Torvalds 
455a3972203SNick Piggin 	first = page_buffers(page);
456a3972203SNick Piggin 	local_irq_save(flags);
457a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
458a3972203SNick Piggin 
4591da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4601da177e4SLinus Torvalds 	unlock_buffer(bh);
4611da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4621da177e4SLinus Torvalds 	while (tmp != bh) {
4631da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4641da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4651da177e4SLinus Torvalds 			goto still_busy;
4661da177e4SLinus Torvalds 		}
4671da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4681da177e4SLinus Torvalds 	}
469a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
470a3972203SNick Piggin 	local_irq_restore(flags);
4711da177e4SLinus Torvalds 	end_page_writeback(page);
4721da177e4SLinus Torvalds 	return;
4731da177e4SLinus Torvalds 
4741da177e4SLinus Torvalds still_busy:
475a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
476a3972203SNick Piggin 	local_irq_restore(flags);
4771da177e4SLinus Torvalds 	return;
4781da177e4SLinus Torvalds }
4791da177e4SLinus Torvalds 
4801da177e4SLinus Torvalds /*
4811da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4821da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4831da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4841da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4851da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4861da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4871da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4881da177e4SLinus Torvalds  *
4891da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4901da177e4SLinus Torvalds  * left.
4911da177e4SLinus Torvalds  *
4921da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4931da177e4SLinus Torvalds  * the buffers.
4941da177e4SLinus Torvalds  *
4951da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4961da177e4SLinus Torvalds  * page.
4971da177e4SLinus Torvalds  *
4981da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4991da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
5001da177e4SLinus Torvalds  */
5011da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
5021da177e4SLinus Torvalds {
5031da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
5041da177e4SLinus Torvalds 	set_buffer_async_read(bh);
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds 
5071da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
5081da177e4SLinus Torvalds {
5091da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
5101da177e4SLinus Torvalds 	set_buffer_async_write(bh);
5111da177e4SLinus Torvalds }
5121da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
5131da177e4SLinus Torvalds 
5141da177e4SLinus Torvalds 
5151da177e4SLinus Torvalds /*
5161da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
5171da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
5181da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
5191da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
5201da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
5211da177e4SLinus Torvalds  *
5221da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
5231da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
5241da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
5251da177e4SLinus Torvalds  *
5261da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
5271da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
5281da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
5291da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
5301da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
5311da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
5321da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
5331da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
5341da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
5351da177e4SLinus Torvalds  * ->private_lock.
5361da177e4SLinus Torvalds  *
5371da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
5381da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
5391da177e4SLinus Torvalds  *
5401da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
5411da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
5421da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
5431da177e4SLinus Torvalds  * be true at clear_inode() time.
5441da177e4SLinus Torvalds  *
5451da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
5461da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
5471da177e4SLinus Torvalds  * BUG_ON(!list_empty).
5481da177e4SLinus Torvalds  *
5491da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
5501da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
5511da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5521da177e4SLinus Torvalds  * queued up.
5531da177e4SLinus Torvalds  *
5541da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5551da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5561da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5571da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5581da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5591da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5601da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5611da177e4SLinus Torvalds  * b_inode back.
5621da177e4SLinus Torvalds  */
5631da177e4SLinus Torvalds 
5641da177e4SLinus Torvalds /*
5651da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5661da177e4SLinus Torvalds  */
5671da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh)
5681da177e4SLinus Torvalds {
5691da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
57058ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
57158ff407bSJan Kara 	if (buffer_write_io_error(bh))
57258ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
57358ff407bSJan Kara 	bh->b_assoc_map = NULL;
5741da177e4SLinus Torvalds }
5751da177e4SLinus Torvalds 
5761da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5771da177e4SLinus Torvalds {
5781da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5791da177e4SLinus Torvalds }
5801da177e4SLinus Torvalds 
5811da177e4SLinus Torvalds /*
5821da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5831da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5841da177e4SLinus Torvalds  * writes to the disk.
5851da177e4SLinus Torvalds  *
5861da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
5871da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
5881da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5891da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5901da177e4SLinus Torvalds  */
5911da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5921da177e4SLinus Torvalds {
5931da177e4SLinus Torvalds 	struct buffer_head *bh;
5941da177e4SLinus Torvalds 	struct list_head *p;
5951da177e4SLinus Torvalds 	int err = 0;
5961da177e4SLinus Torvalds 
5971da177e4SLinus Torvalds 	spin_lock(lock);
5981da177e4SLinus Torvalds repeat:
5991da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
6001da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
6011da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
6021da177e4SLinus Torvalds 			get_bh(bh);
6031da177e4SLinus Torvalds 			spin_unlock(lock);
6041da177e4SLinus Torvalds 			wait_on_buffer(bh);
6051da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
6061da177e4SLinus Torvalds 				err = -EIO;
6071da177e4SLinus Torvalds 			brelse(bh);
6081da177e4SLinus Torvalds 			spin_lock(lock);
6091da177e4SLinus Torvalds 			goto repeat;
6101da177e4SLinus Torvalds 		}
6111da177e4SLinus Torvalds 	}
6121da177e4SLinus Torvalds 	spin_unlock(lock);
6131da177e4SLinus Torvalds 	return err;
6141da177e4SLinus Torvalds }
6151da177e4SLinus Torvalds 
6161da177e4SLinus Torvalds /**
6171da177e4SLinus Torvalds  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
6181da177e4SLinus Torvalds  *                        buffers
61967be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
6201da177e4SLinus Torvalds  *
6211da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6221da177e4SLinus Torvalds  * that I/O.
6231da177e4SLinus Torvalds  *
62467be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
62567be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
62667be2dd1SMartin Waitz  * a successful fsync().
6271da177e4SLinus Torvalds  */
6281da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6291da177e4SLinus Torvalds {
6301da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6311da177e4SLinus Torvalds 
6321da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6331da177e4SLinus Torvalds 		return 0;
6341da177e4SLinus Torvalds 
6351da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6361da177e4SLinus Torvalds 					&mapping->private_list);
6371da177e4SLinus Torvalds }
6381da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6391da177e4SLinus Torvalds 
6401da177e4SLinus Torvalds /*
6411da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6421da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6431da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6441da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6451da177e4SLinus Torvalds  */
6461da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6471da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6481da177e4SLinus Torvalds {
6491da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6501da177e4SLinus Torvalds 	if (bh) {
6511da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6521da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6531da177e4SLinus Torvalds 		put_bh(bh);
6541da177e4SLinus Torvalds 	}
6551da177e4SLinus Torvalds }
6561da177e4SLinus Torvalds 
6571da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6581da177e4SLinus Torvalds {
6591da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6601da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6631da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6641da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6651da177e4SLinus Torvalds 	} else {
666e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6671da177e4SLinus Torvalds 	}
6681da177e4SLinus Torvalds 	if (list_empty(&bh->b_assoc_buffers)) {
6691da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6701da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6711da177e4SLinus Torvalds 				&mapping->private_list);
67258ff407bSJan Kara 		bh->b_assoc_map = mapping;
6731da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6741da177e4SLinus Torvalds 	}
6751da177e4SLinus Torvalds }
6761da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6771da177e4SLinus Torvalds 
6781da177e4SLinus Torvalds /*
6791da177e4SLinus Torvalds  * Add a page to the dirty page list.
6801da177e4SLinus Torvalds  *
6811da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6821da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6831da177e4SLinus Torvalds  *
6841da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6851da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6861da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6871da177e4SLinus Torvalds  * dirty.
6881da177e4SLinus Torvalds  *
6891da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6901da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6911da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6921da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6931da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6941da177e4SLinus Torvalds  * page on the dirty page list.
6951da177e4SLinus Torvalds  *
6961da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6971da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6981da177e4SLinus Torvalds  * added to the page after it was set dirty.
6991da177e4SLinus Torvalds  *
7001da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7011da177e4SLinus Torvalds  * address_space though.
7021da177e4SLinus Torvalds  */
7031da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7041da177e4SLinus Torvalds {
705ebf7a227SNick Piggin 	struct address_space * const mapping = page_mapping(page);
706ebf7a227SNick Piggin 
707ebf7a227SNick Piggin 	if (unlikely(!mapping))
708ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7111da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7121da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7131da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds 		do {
7161da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7171da177e4SLinus Torvalds 			bh = bh->b_this_page;
7181da177e4SLinus Torvalds 		} while (bh != head);
7191da177e4SLinus Torvalds 	}
7201da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7211da177e4SLinus Torvalds 
7228c08540fSAndrew Morton 	if (TestSetPageDirty(page))
7238c08540fSAndrew Morton 		return 0;
7248c08540fSAndrew Morton 
7251da177e4SLinus Torvalds 	write_lock_irq(&mapping->tree_lock);
7261da177e4SLinus Torvalds 	if (page->mapping) {	/* Race with truncate? */
72755e829afSAndrew Morton 		if (mapping_cap_account_dirty(mapping)) {
728b1e7a8fdSChristoph Lameter 			__inc_zone_page_state(page, NR_FILE_DIRTY);
72955e829afSAndrew Morton 			task_io_account_write(PAGE_CACHE_SIZE);
73055e829afSAndrew Morton 		}
7311da177e4SLinus Torvalds 		radix_tree_tag_set(&mapping->page_tree,
7328c08540fSAndrew Morton 				page_index(page), PAGECACHE_TAG_DIRTY);
7331da177e4SLinus Torvalds 	}
7341da177e4SLinus Torvalds 	write_unlock_irq(&mapping->tree_lock);
7351da177e4SLinus Torvalds 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
7364741c9fdSAndrew Morton 	return 1;
7371da177e4SLinus Torvalds }
7381da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7391da177e4SLinus Torvalds 
7401da177e4SLinus Torvalds /*
7411da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7421da177e4SLinus Torvalds  *
7431da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7441da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7451da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7461da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7471da177e4SLinus Torvalds  *
7481da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7491da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7501da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7511da177e4SLinus Torvalds  *
7521da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7531da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7541da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7551da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7561da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7571da177e4SLinus Torvalds  * any newly dirty buffers for write.
7581da177e4SLinus Torvalds  */
7591da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7601da177e4SLinus Torvalds {
7611da177e4SLinus Torvalds 	struct buffer_head *bh;
7621da177e4SLinus Torvalds 	struct list_head tmp;
7631da177e4SLinus Torvalds 	int err = 0, err2;
7641da177e4SLinus Torvalds 
7651da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7661da177e4SLinus Torvalds 
7671da177e4SLinus Torvalds 	spin_lock(lock);
7681da177e4SLinus Torvalds 	while (!list_empty(list)) {
7691da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
77058ff407bSJan Kara 		__remove_assoc_queue(bh);
7711da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7721da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
7731da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7741da177e4SLinus Torvalds 				get_bh(bh);
7751da177e4SLinus Torvalds 				spin_unlock(lock);
7761da177e4SLinus Torvalds 				/*
7771da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7781da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
7791da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
7801da177e4SLinus Torvalds 				 * flight on potentially older contents.
7811da177e4SLinus Torvalds 				 */
782a7662236SJan Kara 				ll_rw_block(SWRITE, 1, &bh);
7831da177e4SLinus Torvalds 				brelse(bh);
7841da177e4SLinus Torvalds 				spin_lock(lock);
7851da177e4SLinus Torvalds 			}
7861da177e4SLinus Torvalds 		}
7871da177e4SLinus Torvalds 	}
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7901da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
79158ff407bSJan Kara 		list_del_init(&bh->b_assoc_buffers);
7921da177e4SLinus Torvalds 		get_bh(bh);
7931da177e4SLinus Torvalds 		spin_unlock(lock);
7941da177e4SLinus Torvalds 		wait_on_buffer(bh);
7951da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
7961da177e4SLinus Torvalds 			err = -EIO;
7971da177e4SLinus Torvalds 		brelse(bh);
7981da177e4SLinus Torvalds 		spin_lock(lock);
7991da177e4SLinus Torvalds 	}
8001da177e4SLinus Torvalds 
8011da177e4SLinus Torvalds 	spin_unlock(lock);
8021da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8031da177e4SLinus Torvalds 	if (err)
8041da177e4SLinus Torvalds 		return err;
8051da177e4SLinus Torvalds 	else
8061da177e4SLinus Torvalds 		return err2;
8071da177e4SLinus Torvalds }
8081da177e4SLinus Torvalds 
8091da177e4SLinus Torvalds /*
8101da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8111da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8121da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8131da177e4SLinus Torvalds  *
8141da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8151da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8161da177e4SLinus Torvalds  * for reiserfs.
8171da177e4SLinus Torvalds  */
8181da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8191da177e4SLinus Torvalds {
8201da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8211da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8221da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8231da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8241da177e4SLinus Torvalds 
8251da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8261da177e4SLinus Torvalds 		while (!list_empty(list))
8271da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8281da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8291da177e4SLinus Torvalds 	}
8301da177e4SLinus Torvalds }
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds /*
8331da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8341da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8351da177e4SLinus Torvalds  *
8361da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8371da177e4SLinus Torvalds  */
8381da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8391da177e4SLinus Torvalds {
8401da177e4SLinus Torvalds 	int ret = 1;
8411da177e4SLinus Torvalds 
8421da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8431da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8441da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8451da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8461da177e4SLinus Torvalds 
8471da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8481da177e4SLinus Torvalds 		while (!list_empty(list)) {
8491da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8501da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8511da177e4SLinus Torvalds 				ret = 0;
8521da177e4SLinus Torvalds 				break;
8531da177e4SLinus Torvalds 			}
8541da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8551da177e4SLinus Torvalds 		}
8561da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8571da177e4SLinus Torvalds 	}
8581da177e4SLinus Torvalds 	return ret;
8591da177e4SLinus Torvalds }
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds /*
8621da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8631da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8641da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8651da177e4SLinus Torvalds  * buffers.
8661da177e4SLinus Torvalds  *
8671da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8681da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8691da177e4SLinus Torvalds  */
8701da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
8711da177e4SLinus Torvalds 		int retry)
8721da177e4SLinus Torvalds {
8731da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
8741da177e4SLinus Torvalds 	long offset;
8751da177e4SLinus Torvalds 
8761da177e4SLinus Torvalds try_again:
8771da177e4SLinus Torvalds 	head = NULL;
8781da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8791da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
8801da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
8811da177e4SLinus Torvalds 		if (!bh)
8821da177e4SLinus Torvalds 			goto no_grow;
8831da177e4SLinus Torvalds 
8841da177e4SLinus Torvalds 		bh->b_bdev = NULL;
8851da177e4SLinus Torvalds 		bh->b_this_page = head;
8861da177e4SLinus Torvalds 		bh->b_blocknr = -1;
8871da177e4SLinus Torvalds 		head = bh;
8881da177e4SLinus Torvalds 
8891da177e4SLinus Torvalds 		bh->b_state = 0;
8901da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
891fc5cd582SChris Mason 		bh->b_private = NULL;
8921da177e4SLinus Torvalds 		bh->b_size = size;
8931da177e4SLinus Torvalds 
8941da177e4SLinus Torvalds 		/* Link the buffer to its page */
8951da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
8961da177e4SLinus Torvalds 
89701ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
8981da177e4SLinus Torvalds 	}
8991da177e4SLinus Torvalds 	return head;
9001da177e4SLinus Torvalds /*
9011da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9021da177e4SLinus Torvalds  */
9031da177e4SLinus Torvalds no_grow:
9041da177e4SLinus Torvalds 	if (head) {
9051da177e4SLinus Torvalds 		do {
9061da177e4SLinus Torvalds 			bh = head;
9071da177e4SLinus Torvalds 			head = head->b_this_page;
9081da177e4SLinus Torvalds 			free_buffer_head(bh);
9091da177e4SLinus Torvalds 		} while (head);
9101da177e4SLinus Torvalds 	}
9111da177e4SLinus Torvalds 
9121da177e4SLinus Torvalds 	/*
9131da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9141da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9151da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9161da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9171da177e4SLinus Torvalds 	 */
9181da177e4SLinus Torvalds 	if (!retry)
9191da177e4SLinus Torvalds 		return NULL;
9201da177e4SLinus Torvalds 
9211da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9221da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9231da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9241da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9251da177e4SLinus Torvalds 	 * async buffer heads in use.
9261da177e4SLinus Torvalds 	 */
9271da177e4SLinus Torvalds 	free_more_memory();
9281da177e4SLinus Torvalds 	goto try_again;
9291da177e4SLinus Torvalds }
9301da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds static inline void
9331da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9341da177e4SLinus Torvalds {
9351da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9361da177e4SLinus Torvalds 
9371da177e4SLinus Torvalds 	bh = head;
9381da177e4SLinus Torvalds 	do {
9391da177e4SLinus Torvalds 		tail = bh;
9401da177e4SLinus Torvalds 		bh = bh->b_this_page;
9411da177e4SLinus Torvalds 	} while (bh);
9421da177e4SLinus Torvalds 	tail->b_this_page = head;
9431da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9441da177e4SLinus Torvalds }
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds /*
9471da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9481da177e4SLinus Torvalds  */
9491da177e4SLinus Torvalds static void
9501da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9511da177e4SLinus Torvalds 			sector_t block, int size)
9521da177e4SLinus Torvalds {
9531da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9541da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9551da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9561da177e4SLinus Torvalds 
9571da177e4SLinus Torvalds 	do {
9581da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9591da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9601da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9611da177e4SLinus Torvalds 			bh->b_blocknr = block;
9621da177e4SLinus Torvalds 			if (uptodate)
9631da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9641da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9651da177e4SLinus Torvalds 		}
9661da177e4SLinus Torvalds 		block++;
9671da177e4SLinus Torvalds 		bh = bh->b_this_page;
9681da177e4SLinus Torvalds 	} while (bh != head);
9691da177e4SLinus Torvalds }
9701da177e4SLinus Torvalds 
9711da177e4SLinus Torvalds /*
9721da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9731da177e4SLinus Torvalds  *
9741da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
9751da177e4SLinus Torvalds  */
9761da177e4SLinus Torvalds static struct page *
9771da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9781da177e4SLinus Torvalds 		pgoff_t index, int size)
9791da177e4SLinus Torvalds {
9801da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9811da177e4SLinus Torvalds 	struct page *page;
9821da177e4SLinus Torvalds 	struct buffer_head *bh;
9831da177e4SLinus Torvalds 
984*ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
985*ea125892SChristoph Lameter 		mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
9861da177e4SLinus Torvalds 	if (!page)
9871da177e4SLinus Torvalds 		return NULL;
9881da177e4SLinus Torvalds 
989e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
9921da177e4SLinus Torvalds 		bh = page_buffers(page);
9931da177e4SLinus Torvalds 		if (bh->b_size == size) {
9941da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
9951da177e4SLinus Torvalds 			return page;
9961da177e4SLinus Torvalds 		}
9971da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
9981da177e4SLinus Torvalds 			goto failed;
9991da177e4SLinus Torvalds 	}
10001da177e4SLinus Torvalds 
10011da177e4SLinus Torvalds 	/*
10021da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10031da177e4SLinus Torvalds 	 */
10041da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10051da177e4SLinus Torvalds 	if (!bh)
10061da177e4SLinus Torvalds 		goto failed;
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 	/*
10091da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10101da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10111da177e4SLinus Torvalds 	 * run under the page lock.
10121da177e4SLinus Torvalds 	 */
10131da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10141da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10151da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10161da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10171da177e4SLinus Torvalds 	return page;
10181da177e4SLinus Torvalds 
10191da177e4SLinus Torvalds failed:
10201da177e4SLinus Torvalds 	BUG();
10211da177e4SLinus Torvalds 	unlock_page(page);
10221da177e4SLinus Torvalds 	page_cache_release(page);
10231da177e4SLinus Torvalds 	return NULL;
10241da177e4SLinus Torvalds }
10251da177e4SLinus Torvalds 
10261da177e4SLinus Torvalds /*
10271da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10281da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10291da177e4SLinus Torvalds  *
10301da177e4SLinus Torvalds  * Except that's a bug.  Attaching dirty buffers to a dirty
10311da177e4SLinus Torvalds  * blockdev's page can result in filesystem corruption, because
10321da177e4SLinus Torvalds  * some of those buffers may be aliases of filesystem data.
10331da177e4SLinus Torvalds  * grow_dev_page() will go BUG() if this happens.
10341da177e4SLinus Torvalds  */
1035858119e1SArjan van de Ven static int
10361da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10371da177e4SLinus Torvalds {
10381da177e4SLinus Torvalds 	struct page *page;
10391da177e4SLinus Torvalds 	pgoff_t index;
10401da177e4SLinus Torvalds 	int sizebits;
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	sizebits = -1;
10431da177e4SLinus Torvalds 	do {
10441da177e4SLinus Torvalds 		sizebits++;
10451da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10461da177e4SLinus Torvalds 
10471da177e4SLinus Torvalds 	index = block >> sizebits;
10481da177e4SLinus Torvalds 
1049e5657933SAndrew Morton 	/*
1050e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1051e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1052e5657933SAndrew Morton 	 */
1053e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1054e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1055e5657933SAndrew Morton 
1056e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1057e5657933SAndrew Morton 			"device %s\n",
1058e5657933SAndrew Morton 			__FUNCTION__, (unsigned long long)block,
1059e5657933SAndrew Morton 			bdevname(bdev, b));
1060e5657933SAndrew Morton 		return -EIO;
1061e5657933SAndrew Morton 	}
1062e5657933SAndrew Morton 	block = index << sizebits;
10631da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10641da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10651da177e4SLinus Torvalds 	if (!page)
10661da177e4SLinus Torvalds 		return 0;
10671da177e4SLinus Torvalds 	unlock_page(page);
10681da177e4SLinus Torvalds 	page_cache_release(page);
10691da177e4SLinus Torvalds 	return 1;
10701da177e4SLinus Torvalds }
10711da177e4SLinus Torvalds 
107275c96f85SAdrian Bunk static struct buffer_head *
10731da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
10741da177e4SLinus Torvalds {
10751da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
10761da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
10771da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10781da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10791da177e4SLinus Torvalds 					size);
10801da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
10811da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
10821da177e4SLinus Torvalds 
10831da177e4SLinus Torvalds 		dump_stack();
10841da177e4SLinus Torvalds 		return NULL;
10851da177e4SLinus Torvalds 	}
10861da177e4SLinus Torvalds 
10871da177e4SLinus Torvalds 	for (;;) {
10881da177e4SLinus Torvalds 		struct buffer_head * bh;
1089e5657933SAndrew Morton 		int ret;
10901da177e4SLinus Torvalds 
10911da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
10921da177e4SLinus Torvalds 		if (bh)
10931da177e4SLinus Torvalds 			return bh;
10941da177e4SLinus Torvalds 
1095e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1096e5657933SAndrew Morton 		if (ret < 0)
1097e5657933SAndrew Morton 			return NULL;
1098e5657933SAndrew Morton 		if (ret == 0)
10991da177e4SLinus Torvalds 			free_more_memory();
11001da177e4SLinus Torvalds 	}
11011da177e4SLinus Torvalds }
11021da177e4SLinus Torvalds 
11031da177e4SLinus Torvalds /*
11041da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11051da177e4SLinus Torvalds  *
11061da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11071da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11081da177e4SLinus Torvalds  *
11091da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11101da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11111da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11121da177e4SLinus Torvalds  *
11131da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11141da177e4SLinus Torvalds  * (if the page has buffers).
11151da177e4SLinus Torvalds  *
11161da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11171da177e4SLinus Torvalds  * buffers are not.
11181da177e4SLinus Torvalds  *
11191da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11201da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11211da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11221da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11231da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11241da177e4SLinus Torvalds  */
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds /**
11271da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
112867be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11291da177e4SLinus Torvalds  *
11301da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11311da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11321da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11331da177e4SLinus Torvalds  * inode list.
11341da177e4SLinus Torvalds  *
11351da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11361da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11371da177e4SLinus Torvalds  */
11381da177e4SLinus Torvalds void fastcall mark_buffer_dirty(struct buffer_head *bh)
11391da177e4SLinus Torvalds {
11401da177e4SLinus Torvalds 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
11411da177e4SLinus Torvalds 		__set_page_dirty_nobuffers(bh->b_page);
11421da177e4SLinus Torvalds }
11431da177e4SLinus Torvalds 
11441da177e4SLinus Torvalds /*
11451da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11461da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11471da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11481da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11491da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11501da177e4SLinus Torvalds  */
11511da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11521da177e4SLinus Torvalds {
11531da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11541da177e4SLinus Torvalds 		put_bh(buf);
11551da177e4SLinus Torvalds 		return;
11561da177e4SLinus Torvalds 	}
11571da177e4SLinus Torvalds 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11581da177e4SLinus Torvalds 	WARN_ON(1);
11591da177e4SLinus Torvalds }
11601da177e4SLinus Torvalds 
11611da177e4SLinus Torvalds /*
11621da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11631da177e4SLinus Torvalds  * potentially dirty data.
11641da177e4SLinus Torvalds  */
11651da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11661da177e4SLinus Torvalds {
11671da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
11681da177e4SLinus Torvalds 	if (!list_empty(&bh->b_assoc_buffers)) {
11691da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11701da177e4SLinus Torvalds 
11711da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
11721da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
117358ff407bSJan Kara 		bh->b_assoc_map = NULL;
11741da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
11751da177e4SLinus Torvalds 	}
11761da177e4SLinus Torvalds 	__brelse(bh);
11771da177e4SLinus Torvalds }
11781da177e4SLinus Torvalds 
11791da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
11801da177e4SLinus Torvalds {
11811da177e4SLinus Torvalds 	lock_buffer(bh);
11821da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
11831da177e4SLinus Torvalds 		unlock_buffer(bh);
11841da177e4SLinus Torvalds 		return bh;
11851da177e4SLinus Torvalds 	} else {
11861da177e4SLinus Torvalds 		get_bh(bh);
11871da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
11881da177e4SLinus Torvalds 		submit_bh(READ, bh);
11891da177e4SLinus Torvalds 		wait_on_buffer(bh);
11901da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
11911da177e4SLinus Torvalds 			return bh;
11921da177e4SLinus Torvalds 	}
11931da177e4SLinus Torvalds 	brelse(bh);
11941da177e4SLinus Torvalds 	return NULL;
11951da177e4SLinus Torvalds }
11961da177e4SLinus Torvalds 
11971da177e4SLinus Torvalds /*
11981da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
11991da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12001da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12011da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12021da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12031da177e4SLinus Torvalds  *
12041da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12051da177e4SLinus Torvalds  * sb_find_get_block().
12061da177e4SLinus Torvalds  *
12071da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12081da177e4SLinus Torvalds  * a local interrupt disable for that.
12091da177e4SLinus Torvalds  */
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12121da177e4SLinus Torvalds 
12131da177e4SLinus Torvalds struct bh_lru {
12141da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12151da177e4SLinus Torvalds };
12161da177e4SLinus Torvalds 
12171da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds #ifdef CONFIG_SMP
12201da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12211da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12221da177e4SLinus Torvalds #else
12231da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12241da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12251da177e4SLinus Torvalds #endif
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds static inline void check_irqs_on(void)
12281da177e4SLinus Torvalds {
12291da177e4SLinus Torvalds #ifdef irqs_disabled
12301da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12311da177e4SLinus Torvalds #endif
12321da177e4SLinus Torvalds }
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds /*
12351da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12361da177e4SLinus Torvalds  */
12371da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12381da177e4SLinus Torvalds {
12391da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12401da177e4SLinus Torvalds 	struct bh_lru *lru;
12411da177e4SLinus Torvalds 
12421da177e4SLinus Torvalds 	check_irqs_on();
12431da177e4SLinus Torvalds 	bh_lru_lock();
12441da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12451da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12461da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12471da177e4SLinus Torvalds 		int in;
12481da177e4SLinus Torvalds 		int out = 0;
12491da177e4SLinus Torvalds 
12501da177e4SLinus Torvalds 		get_bh(bh);
12511da177e4SLinus Torvalds 		bhs[out++] = bh;
12521da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12531da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12541da177e4SLinus Torvalds 
12551da177e4SLinus Torvalds 			if (bh2 == bh) {
12561da177e4SLinus Torvalds 				__brelse(bh2);
12571da177e4SLinus Torvalds 			} else {
12581da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12591da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12601da177e4SLinus Torvalds 					evictee = bh2;
12611da177e4SLinus Torvalds 				} else {
12621da177e4SLinus Torvalds 					bhs[out++] = bh2;
12631da177e4SLinus Torvalds 				}
12641da177e4SLinus Torvalds 			}
12651da177e4SLinus Torvalds 		}
12661da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12671da177e4SLinus Torvalds 			bhs[out++] = NULL;
12681da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
12691da177e4SLinus Torvalds 	}
12701da177e4SLinus Torvalds 	bh_lru_unlock();
12711da177e4SLinus Torvalds 
12721da177e4SLinus Torvalds 	if (evictee)
12731da177e4SLinus Torvalds 		__brelse(evictee);
12741da177e4SLinus Torvalds }
12751da177e4SLinus Torvalds 
12761da177e4SLinus Torvalds /*
12771da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
12781da177e4SLinus Torvalds  */
1279858119e1SArjan van de Ven static struct buffer_head *
12803991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
12811da177e4SLinus Torvalds {
12821da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
12831da177e4SLinus Torvalds 	struct bh_lru *lru;
12843991d3bdSTomasz Kvarsin 	unsigned int i;
12851da177e4SLinus Torvalds 
12861da177e4SLinus Torvalds 	check_irqs_on();
12871da177e4SLinus Torvalds 	bh_lru_lock();
12881da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12891da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
12901da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
12931da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
12941da177e4SLinus Torvalds 			if (i) {
12951da177e4SLinus Torvalds 				while (i) {
12961da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
12971da177e4SLinus Torvalds 					i--;
12981da177e4SLinus Torvalds 				}
12991da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13001da177e4SLinus Torvalds 			}
13011da177e4SLinus Torvalds 			get_bh(bh);
13021da177e4SLinus Torvalds 			ret = bh;
13031da177e4SLinus Torvalds 			break;
13041da177e4SLinus Torvalds 		}
13051da177e4SLinus Torvalds 	}
13061da177e4SLinus Torvalds 	bh_lru_unlock();
13071da177e4SLinus Torvalds 	return ret;
13081da177e4SLinus Torvalds }
13091da177e4SLinus Torvalds 
13101da177e4SLinus Torvalds /*
13111da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13121da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13131da177e4SLinus Torvalds  * NULL
13141da177e4SLinus Torvalds  */
13151da177e4SLinus Torvalds struct buffer_head *
13163991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13171da177e4SLinus Torvalds {
13181da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds 	if (bh == NULL) {
1321385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13221da177e4SLinus Torvalds 		if (bh)
13231da177e4SLinus Torvalds 			bh_lru_install(bh);
13241da177e4SLinus Torvalds 	}
13251da177e4SLinus Torvalds 	if (bh)
13261da177e4SLinus Torvalds 		touch_buffer(bh);
13271da177e4SLinus Torvalds 	return bh;
13281da177e4SLinus Torvalds }
13291da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds /*
13321da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13331da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13341da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13351da177e4SLinus Torvalds  *
13361da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13371da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13381da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13391da177e4SLinus Torvalds  *
13401da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13411da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13421da177e4SLinus Torvalds  */
13431da177e4SLinus Torvalds struct buffer_head *
13443991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13451da177e4SLinus Torvalds {
13461da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	might_sleep();
13491da177e4SLinus Torvalds 	if (bh == NULL)
13501da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13511da177e4SLinus Torvalds 	return bh;
13521da177e4SLinus Torvalds }
13531da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13541da177e4SLinus Torvalds 
13551da177e4SLinus Torvalds /*
13561da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13571da177e4SLinus Torvalds  */
13583991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13591da177e4SLinus Torvalds {
13601da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1361a3e713b5SAndrew Morton 	if (likely(bh)) {
13621da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13631da177e4SLinus Torvalds 		brelse(bh);
13641da177e4SLinus Torvalds 	}
1365a3e713b5SAndrew Morton }
13661da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13671da177e4SLinus Torvalds 
13681da177e4SLinus Torvalds /**
13691da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
137067be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13711da177e4SLinus Torvalds  *  @block: number of block
13721da177e4SLinus Torvalds  *  @size: size (in bytes) to read
13731da177e4SLinus Torvalds  *
13741da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
13751da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
13761da177e4SLinus Torvalds  */
13771da177e4SLinus Torvalds struct buffer_head *
13783991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
13791da177e4SLinus Torvalds {
13801da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
13811da177e4SLinus Torvalds 
1382a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
13831da177e4SLinus Torvalds 		bh = __bread_slow(bh);
13841da177e4SLinus Torvalds 	return bh;
13851da177e4SLinus Torvalds }
13861da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds /*
13891da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
13901da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
13911da177e4SLinus Torvalds  * or with preempt disabled.
13921da177e4SLinus Torvalds  */
13931da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
13941da177e4SLinus Torvalds {
13951da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
13961da177e4SLinus Torvalds 	int i;
13971da177e4SLinus Torvalds 
13981da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13991da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14001da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14011da177e4SLinus Torvalds 	}
14021da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14031da177e4SLinus Torvalds }
14041da177e4SLinus Torvalds 
1405f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14061da177e4SLinus Torvalds {
14071da177e4SLinus Torvalds 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
14081da177e4SLinus Torvalds }
14091da177e4SLinus Torvalds 
14101da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14111da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14121da177e4SLinus Torvalds {
14131da177e4SLinus Torvalds 	bh->b_page = page;
1414e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14151da177e4SLinus Torvalds 	if (PageHighMem(page))
14161da177e4SLinus Torvalds 		/*
14171da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14181da177e4SLinus Torvalds 		 */
14191da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14201da177e4SLinus Torvalds 	else
14211da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14221da177e4SLinus Torvalds }
14231da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14241da177e4SLinus Torvalds 
14251da177e4SLinus Torvalds /*
14261da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14271da177e4SLinus Torvalds  */
1428858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14291da177e4SLinus Torvalds {
14301da177e4SLinus Torvalds 	lock_buffer(bh);
14311da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14321da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14331da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14341da177e4SLinus Torvalds 	clear_buffer_req(bh);
14351da177e4SLinus Torvalds 	clear_buffer_new(bh);
14361da177e4SLinus Torvalds 	clear_buffer_delay(bh);
143733a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14381da177e4SLinus Torvalds 	unlock_buffer(bh);
14391da177e4SLinus Torvalds }
14401da177e4SLinus Torvalds 
14411da177e4SLinus Torvalds /**
14421da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14431da177e4SLinus Torvalds  *
14441da177e4SLinus Torvalds  * @page: the page which is affected
14451da177e4SLinus Torvalds  * @offset: the index of the truncation point
14461da177e4SLinus Torvalds  *
14471da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14481da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14491da177e4SLinus Torvalds  *
14501da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14511da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14521da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14531da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14541da177e4SLinus Torvalds  * blocks on-disk.
14551da177e4SLinus Torvalds  */
14562ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14571da177e4SLinus Torvalds {
14581da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14591da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14601da177e4SLinus Torvalds 
14611da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14621da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14631da177e4SLinus Torvalds 		goto out;
14641da177e4SLinus Torvalds 
14651da177e4SLinus Torvalds 	head = page_buffers(page);
14661da177e4SLinus Torvalds 	bh = head;
14671da177e4SLinus Torvalds 	do {
14681da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
14691da177e4SLinus Torvalds 		next = bh->b_this_page;
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds 		/*
14721da177e4SLinus Torvalds 		 * is this block fully invalidated?
14731da177e4SLinus Torvalds 		 */
14741da177e4SLinus Torvalds 		if (offset <= curr_off)
14751da177e4SLinus Torvalds 			discard_buffer(bh);
14761da177e4SLinus Torvalds 		curr_off = next_off;
14771da177e4SLinus Torvalds 		bh = next;
14781da177e4SLinus Torvalds 	} while (bh != head);
14791da177e4SLinus Torvalds 
14801da177e4SLinus Torvalds 	/*
14811da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
14821da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
14831da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
14841da177e4SLinus Torvalds 	 */
14851da177e4SLinus Torvalds 	if (offset == 0)
14862ff28e22SNeilBrown 		try_to_release_page(page, 0);
14871da177e4SLinus Torvalds out:
14882ff28e22SNeilBrown 	return;
14891da177e4SLinus Torvalds }
14901da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds /*
14931da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
14941da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
14951da177e4SLinus Torvalds  * is already excluded via the page lock.
14961da177e4SLinus Torvalds  */
14971da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
14981da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
14991da177e4SLinus Torvalds {
15001da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15011da177e4SLinus Torvalds 
15021da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15031da177e4SLinus Torvalds 	bh = head;
15041da177e4SLinus Torvalds 	do {
15051da177e4SLinus Torvalds 		bh->b_state |= b_state;
15061da177e4SLinus Torvalds 		tail = bh;
15071da177e4SLinus Torvalds 		bh = bh->b_this_page;
15081da177e4SLinus Torvalds 	} while (bh);
15091da177e4SLinus Torvalds 	tail->b_this_page = head;
15101da177e4SLinus Torvalds 
15111da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15121da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15131da177e4SLinus Torvalds 		bh = head;
15141da177e4SLinus Torvalds 		do {
15151da177e4SLinus Torvalds 			if (PageDirty(page))
15161da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15171da177e4SLinus Torvalds 			if (PageUptodate(page))
15181da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15191da177e4SLinus Torvalds 			bh = bh->b_this_page;
15201da177e4SLinus Torvalds 		} while (bh != head);
15211da177e4SLinus Torvalds 	}
15221da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15231da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15241da177e4SLinus Torvalds }
15251da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15261da177e4SLinus Torvalds 
15271da177e4SLinus Torvalds /*
15281da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15291da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15301da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15311da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15321da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15331da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15341da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15351da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15361da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15371da177e4SLinus Torvalds  *
15381da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15391da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15401da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15411da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15421da177e4SLinus Torvalds  */
15431da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15441da177e4SLinus Torvalds {
15451da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds 	might_sleep();
15481da177e4SLinus Torvalds 
1549385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15501da177e4SLinus Torvalds 	if (old_bh) {
15511da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15521da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15531da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15541da177e4SLinus Torvalds 		__brelse(old_bh);
15551da177e4SLinus Torvalds 	}
15561da177e4SLinus Torvalds }
15571da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15581da177e4SLinus Torvalds 
15591da177e4SLinus Torvalds /*
15601da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15611da177e4SLinus Torvalds  *
15621da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15631da177e4SLinus Torvalds  *
15641da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15651da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15661da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15671da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15681da177e4SLinus Torvalds  *
15691da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
15701da177e4SLinus Torvalds  */
15711da177e4SLinus Torvalds 
15721da177e4SLinus Torvalds /*
15731da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
15741da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
15751da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
15761da177e4SLinus Torvalds  * state inside lock_buffer().
15771da177e4SLinus Torvalds  *
15781da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
15791da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
15801da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
15811da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
15821da177e4SLinus Torvalds  * prevents this contention from occurring.
15831da177e4SLinus Torvalds  */
15841da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
15851da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
15861da177e4SLinus Torvalds {
15871da177e4SLinus Torvalds 	int err;
15881da177e4SLinus Torvalds 	sector_t block;
15891da177e4SLinus Torvalds 	sector_t last_block;
1590f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1591b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
15921da177e4SLinus Torvalds 	int nr_underway = 0;
15931da177e4SLinus Torvalds 
15941da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
15971da177e4SLinus Torvalds 
15981da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1599b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16001da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16011da177e4SLinus Torvalds 	}
16021da177e4SLinus Torvalds 
16031da177e4SLinus Torvalds 	/*
16041da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16051da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16061da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16071da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16081da177e4SLinus Torvalds 	 *
16091da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16101da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16111da177e4SLinus Torvalds 	 */
16121da177e4SLinus Torvalds 
161354b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16141da177e4SLinus Torvalds 	head = page_buffers(page);
16151da177e4SLinus Torvalds 	bh = head;
16161da177e4SLinus Torvalds 
16171da177e4SLinus Torvalds 	/*
16181da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16191da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16201da177e4SLinus Torvalds 	 */
16211da177e4SLinus Torvalds 	do {
16221da177e4SLinus Torvalds 		if (block > last_block) {
16231da177e4SLinus Torvalds 			/*
16241da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16251da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16261da177e4SLinus Torvalds 			 * truncate in progress.
16271da177e4SLinus Torvalds 			 */
16281da177e4SLinus Torvalds 			/*
16291da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16301da177e4SLinus Torvalds 			 */
16311da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16321da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
16331da177e4SLinus Torvalds 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1634b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16351da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16361da177e4SLinus Torvalds 			if (err)
16371da177e4SLinus Torvalds 				goto recover;
16381da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16391da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16401da177e4SLinus Torvalds 				clear_buffer_new(bh);
16411da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16421da177e4SLinus Torvalds 							bh->b_blocknr);
16431da177e4SLinus Torvalds 			}
16441da177e4SLinus Torvalds 		}
16451da177e4SLinus Torvalds 		bh = bh->b_this_page;
16461da177e4SLinus Torvalds 		block++;
16471da177e4SLinus Torvalds 	} while (bh != head);
16481da177e4SLinus Torvalds 
16491da177e4SLinus Torvalds 	do {
16501da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16511da177e4SLinus Torvalds 			continue;
16521da177e4SLinus Torvalds 		/*
16531da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16541da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16551da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
16561da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
16571da177e4SLinus Torvalds 		 * throttling.
16581da177e4SLinus Torvalds 		 */
16591da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
16601da177e4SLinus Torvalds 			lock_buffer(bh);
16611da177e4SLinus Torvalds 		} else if (test_set_buffer_locked(bh)) {
16621da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
16631da177e4SLinus Torvalds 			continue;
16641da177e4SLinus Torvalds 		}
16651da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
16661da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
16671da177e4SLinus Torvalds 		} else {
16681da177e4SLinus Torvalds 			unlock_buffer(bh);
16691da177e4SLinus Torvalds 		}
16701da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
16711da177e4SLinus Torvalds 
16721da177e4SLinus Torvalds 	/*
16731da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
16741da177e4SLinus Torvalds 	 * drop the bh refcounts early.
16751da177e4SLinus Torvalds 	 */
16761da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
16771da177e4SLinus Torvalds 	set_page_writeback(page);
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds 	do {
16801da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
16811da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
16821da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
16831da177e4SLinus Torvalds 			nr_underway++;
1684ad576e63SNick Piggin 		}
16851da177e4SLinus Torvalds 		bh = next;
16861da177e4SLinus Torvalds 	} while (bh != head);
168705937baaSAndrew Morton 	unlock_page(page);
16881da177e4SLinus Torvalds 
16891da177e4SLinus Torvalds 	err = 0;
16901da177e4SLinus Torvalds done:
16911da177e4SLinus Torvalds 	if (nr_underway == 0) {
16921da177e4SLinus Torvalds 		/*
16931da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
16941da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
16951da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
16961da177e4SLinus Torvalds 		 */
16971da177e4SLinus Torvalds 		end_page_writeback(page);
16983d67f2d7SNick Piggin 
16991da177e4SLinus Torvalds 		/*
17001da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17011da177e4SLinus Torvalds 		 * here on.
17021da177e4SLinus Torvalds 		 */
17031da177e4SLinus Torvalds 		wbc->pages_skipped++;	/* We didn't write this page */
17041da177e4SLinus Torvalds 	}
17051da177e4SLinus Torvalds 	return err;
17061da177e4SLinus Torvalds 
17071da177e4SLinus Torvalds recover:
17081da177e4SLinus Torvalds 	/*
17091da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17101da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17111da177e4SLinus Torvalds 	 * exposing stale data.
17121da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17131da177e4SLinus Torvalds 	 */
17141da177e4SLinus Torvalds 	bh = head;
17151da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17161da177e4SLinus Torvalds 	do {
17171da177e4SLinus Torvalds 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
17181da177e4SLinus Torvalds 			lock_buffer(bh);
17191da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17201da177e4SLinus Torvalds 		} else {
17211da177e4SLinus Torvalds 			/*
17221da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17231da177e4SLinus Torvalds 			 * attachment to a dirty page.
17241da177e4SLinus Torvalds 			 */
17251da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17261da177e4SLinus Torvalds 		}
17271da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17281da177e4SLinus Torvalds 	SetPageError(page);
17291da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17307e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17311da177e4SLinus Torvalds 	set_page_writeback(page);
17321da177e4SLinus Torvalds 	do {
17331da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17341da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17351da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17361da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17371da177e4SLinus Torvalds 			nr_underway++;
1738ad576e63SNick Piggin 		}
17391da177e4SLinus Torvalds 		bh = next;
17401da177e4SLinus Torvalds 	} while (bh != head);
1741ffda9d30SNick Piggin 	unlock_page(page);
17421da177e4SLinus Torvalds 	goto done;
17431da177e4SLinus Torvalds }
17441da177e4SLinus Torvalds 
17451da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
17461da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
17471da177e4SLinus Torvalds {
17481da177e4SLinus Torvalds 	unsigned block_start, block_end;
17491da177e4SLinus Torvalds 	sector_t block;
17501da177e4SLinus Torvalds 	int err = 0;
17511da177e4SLinus Torvalds 	unsigned blocksize, bbits;
17521da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
17531da177e4SLinus Torvalds 
17541da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
17551da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
17561da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
17571da177e4SLinus Torvalds 	BUG_ON(from > to);
17581da177e4SLinus Torvalds 
17591da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
17601da177e4SLinus Torvalds 	if (!page_has_buffers(page))
17611da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
17621da177e4SLinus Torvalds 	head = page_buffers(page);
17631da177e4SLinus Torvalds 
17641da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
17651da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
17661da177e4SLinus Torvalds 
17671da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
17681da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
17691da177e4SLinus Torvalds 		block_end = block_start + blocksize;
17701da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
17711da177e4SLinus Torvalds 			if (PageUptodate(page)) {
17721da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
17731da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
17741da177e4SLinus Torvalds 			}
17751da177e4SLinus Torvalds 			continue;
17761da177e4SLinus Torvalds 		}
17771da177e4SLinus Torvalds 		if (buffer_new(bh))
17781da177e4SLinus Torvalds 			clear_buffer_new(bh);
17791da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1780b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
17811da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17821da177e4SLinus Torvalds 			if (err)
1783f3ddbdc6SNick Piggin 				break;
17841da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17851da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
17861da177e4SLinus Torvalds 							bh->b_blocknr);
17871da177e4SLinus Torvalds 				if (PageUptodate(page)) {
17881da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
17891da177e4SLinus Torvalds 					continue;
17901da177e4SLinus Torvalds 				}
17911da177e4SLinus Torvalds 				if (block_end > to || block_start < from) {
17921da177e4SLinus Torvalds 					void *kaddr;
17931da177e4SLinus Torvalds 
17941da177e4SLinus Torvalds 					kaddr = kmap_atomic(page, KM_USER0);
17951da177e4SLinus Torvalds 					if (block_end > to)
17961da177e4SLinus Torvalds 						memset(kaddr+to, 0,
17971da177e4SLinus Torvalds 							block_end-to);
17981da177e4SLinus Torvalds 					if (block_start < from)
17991da177e4SLinus Torvalds 						memset(kaddr+block_start,
18001da177e4SLinus Torvalds 							0, from-block_start);
18011da177e4SLinus Torvalds 					flush_dcache_page(page);
18021da177e4SLinus Torvalds 					kunmap_atomic(kaddr, KM_USER0);
18031da177e4SLinus Torvalds 				}
18041da177e4SLinus Torvalds 				continue;
18051da177e4SLinus Torvalds 			}
18061da177e4SLinus Torvalds 		}
18071da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18081da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18091da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18101da177e4SLinus Torvalds 			continue;
18111da177e4SLinus Torvalds 		}
18121da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
181333a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18141da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18151da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18161da177e4SLinus Torvalds 			*wait_bh++=bh;
18171da177e4SLinus Torvalds 		}
18181da177e4SLinus Torvalds 	}
18191da177e4SLinus Torvalds 	/*
18201da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
18211da177e4SLinus Torvalds 	 */
18221da177e4SLinus Torvalds 	while(wait_bh > wait) {
18231da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
18241da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1825f3ddbdc6SNick Piggin 			err = -EIO;
18261da177e4SLinus Torvalds 	}
1827152becd2SAnton Altaparmakov 	if (!err) {
1828152becd2SAnton Altaparmakov 		bh = head;
1829152becd2SAnton Altaparmakov 		do {
1830152becd2SAnton Altaparmakov 			if (buffer_new(bh))
1831152becd2SAnton Altaparmakov 				clear_buffer_new(bh);
1832152becd2SAnton Altaparmakov 		} while ((bh = bh->b_this_page) != head);
1833152becd2SAnton Altaparmakov 		return 0;
1834152becd2SAnton Altaparmakov 	}
1835f3ddbdc6SNick Piggin 	/* Error case: */
18361da177e4SLinus Torvalds 	/*
18371da177e4SLinus Torvalds 	 * Zero out any newly allocated blocks to avoid exposing stale
18381da177e4SLinus Torvalds 	 * data.  If BH_New is set, we know that the block was newly
18391da177e4SLinus Torvalds 	 * allocated in the above loop.
18401da177e4SLinus Torvalds 	 */
18411da177e4SLinus Torvalds 	bh = head;
18421da177e4SLinus Torvalds 	block_start = 0;
18431da177e4SLinus Torvalds 	do {
18441da177e4SLinus Torvalds 		block_end = block_start+blocksize;
18451da177e4SLinus Torvalds 		if (block_end <= from)
18461da177e4SLinus Torvalds 			goto next_bh;
18471da177e4SLinus Torvalds 		if (block_start >= to)
18481da177e4SLinus Torvalds 			break;
18491da177e4SLinus Torvalds 		if (buffer_new(bh)) {
18501da177e4SLinus Torvalds 			clear_buffer_new(bh);
185101f2705dSNate Diller 			zero_user_page(page, block_start, bh->b_size, KM_USER0);
18521da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
18531da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
18541da177e4SLinus Torvalds 		}
18551da177e4SLinus Torvalds next_bh:
18561da177e4SLinus Torvalds 		block_start = block_end;
18571da177e4SLinus Torvalds 		bh = bh->b_this_page;
18581da177e4SLinus Torvalds 	} while (bh != head);
18591da177e4SLinus Torvalds 	return err;
18601da177e4SLinus Torvalds }
18611da177e4SLinus Torvalds 
18621da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
18631da177e4SLinus Torvalds 		unsigned from, unsigned to)
18641da177e4SLinus Torvalds {
18651da177e4SLinus Torvalds 	unsigned block_start, block_end;
18661da177e4SLinus Torvalds 	int partial = 0;
18671da177e4SLinus Torvalds 	unsigned blocksize;
18681da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
18691da177e4SLinus Torvalds 
18701da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18711da177e4SLinus Torvalds 
18721da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
18731da177e4SLinus Torvalds 	    bh != head || !block_start;
18741da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
18751da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18761da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18771da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18781da177e4SLinus Torvalds 				partial = 1;
18791da177e4SLinus Torvalds 		} else {
18801da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
18811da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
18821da177e4SLinus Torvalds 		}
18831da177e4SLinus Torvalds 	}
18841da177e4SLinus Torvalds 
18851da177e4SLinus Torvalds 	/*
18861da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
18871da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
18881da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
18891da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
18901da177e4SLinus Torvalds 	 */
18911da177e4SLinus Torvalds 	if (!partial)
18921da177e4SLinus Torvalds 		SetPageUptodate(page);
18931da177e4SLinus Torvalds 	return 0;
18941da177e4SLinus Torvalds }
18951da177e4SLinus Torvalds 
18961da177e4SLinus Torvalds /*
18971da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
18981da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
18991da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
19001da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
19011da177e4SLinus Torvalds  * page struct once IO has completed.
19021da177e4SLinus Torvalds  */
19031da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
19041da177e4SLinus Torvalds {
19051da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
19061da177e4SLinus Torvalds 	sector_t iblock, lblock;
19071da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
19081da177e4SLinus Torvalds 	unsigned int blocksize;
19091da177e4SLinus Torvalds 	int nr, i;
19101da177e4SLinus Torvalds 	int fully_mapped = 1;
19111da177e4SLinus Torvalds 
1912cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
19131da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19141da177e4SLinus Torvalds 	if (!page_has_buffers(page))
19151da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
19161da177e4SLinus Torvalds 	head = page_buffers(page);
19171da177e4SLinus Torvalds 
19181da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
19191da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
19201da177e4SLinus Torvalds 	bh = head;
19211da177e4SLinus Torvalds 	nr = 0;
19221da177e4SLinus Torvalds 	i = 0;
19231da177e4SLinus Torvalds 
19241da177e4SLinus Torvalds 	do {
19251da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
19261da177e4SLinus Torvalds 			continue;
19271da177e4SLinus Torvalds 
19281da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1929c64610baSAndrew Morton 			int err = 0;
1930c64610baSAndrew Morton 
19311da177e4SLinus Torvalds 			fully_mapped = 0;
19321da177e4SLinus Torvalds 			if (iblock < lblock) {
1933b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
1934c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
1935c64610baSAndrew Morton 				if (err)
19361da177e4SLinus Torvalds 					SetPageError(page);
19371da177e4SLinus Torvalds 			}
19381da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
193901f2705dSNate Diller 				zero_user_page(page, i * blocksize, blocksize,
194001f2705dSNate Diller 						KM_USER0);
1941c64610baSAndrew Morton 				if (!err)
19421da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19431da177e4SLinus Torvalds 				continue;
19441da177e4SLinus Torvalds 			}
19451da177e4SLinus Torvalds 			/*
19461da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
19471da177e4SLinus Torvalds 			 * synchronously
19481da177e4SLinus Torvalds 			 */
19491da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
19501da177e4SLinus Torvalds 				continue;
19511da177e4SLinus Torvalds 		}
19521da177e4SLinus Torvalds 		arr[nr++] = bh;
19531da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
19541da177e4SLinus Torvalds 
19551da177e4SLinus Torvalds 	if (fully_mapped)
19561da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
19571da177e4SLinus Torvalds 
19581da177e4SLinus Torvalds 	if (!nr) {
19591da177e4SLinus Torvalds 		/*
19601da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
19611da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
19621da177e4SLinus Torvalds 		 */
19631da177e4SLinus Torvalds 		if (!PageError(page))
19641da177e4SLinus Torvalds 			SetPageUptodate(page);
19651da177e4SLinus Torvalds 		unlock_page(page);
19661da177e4SLinus Torvalds 		return 0;
19671da177e4SLinus Torvalds 	}
19681da177e4SLinus Torvalds 
19691da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
19701da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
19711da177e4SLinus Torvalds 		bh = arr[i];
19721da177e4SLinus Torvalds 		lock_buffer(bh);
19731da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
19741da177e4SLinus Torvalds 	}
19751da177e4SLinus Torvalds 
19761da177e4SLinus Torvalds 	/*
19771da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
19781da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
19791da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
19801da177e4SLinus Torvalds 	 */
19811da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
19821da177e4SLinus Torvalds 		bh = arr[i];
19831da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
19841da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
19851da177e4SLinus Torvalds 		else
19861da177e4SLinus Torvalds 			submit_bh(READ, bh);
19871da177e4SLinus Torvalds 	}
19881da177e4SLinus Torvalds 	return 0;
19891da177e4SLinus Torvalds }
19901da177e4SLinus Torvalds 
19911da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
19921da177e4SLinus Torvalds  * truncates.  Uses prepare/commit_write to allow the filesystem to
19931da177e4SLinus Torvalds  * deal with the hole.
19941da177e4SLinus Torvalds  */
199505eb0b51SOGAWA Hirofumi static int __generic_cont_expand(struct inode *inode, loff_t size,
199605eb0b51SOGAWA Hirofumi 				 pgoff_t index, unsigned int offset)
19971da177e4SLinus Torvalds {
19981da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
19991da177e4SLinus Torvalds 	struct page *page;
200005eb0b51SOGAWA Hirofumi 	unsigned long limit;
20011da177e4SLinus Torvalds 	int err;
20021da177e4SLinus Torvalds 
20031da177e4SLinus Torvalds 	err = -EFBIG;
20041da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
20051da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
20061da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
20071da177e4SLinus Torvalds 		goto out;
20081da177e4SLinus Torvalds 	}
20091da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
20101da177e4SLinus Torvalds 		goto out;
20111da177e4SLinus Torvalds 
201205eb0b51SOGAWA Hirofumi 	err = -ENOMEM;
201305eb0b51SOGAWA Hirofumi 	page = grab_cache_page(mapping, index);
201405eb0b51SOGAWA Hirofumi 	if (!page)
201505eb0b51SOGAWA Hirofumi 		goto out;
201605eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
201705eb0b51SOGAWA Hirofumi 	if (err) {
201805eb0b51SOGAWA Hirofumi 		/*
201905eb0b51SOGAWA Hirofumi 		 * ->prepare_write() may have instantiated a few blocks
202005eb0b51SOGAWA Hirofumi 		 * outside i_size.  Trim these off again.
202105eb0b51SOGAWA Hirofumi 		 */
202205eb0b51SOGAWA Hirofumi 		unlock_page(page);
202305eb0b51SOGAWA Hirofumi 		page_cache_release(page);
202405eb0b51SOGAWA Hirofumi 		vmtruncate(inode, inode->i_size);
202505eb0b51SOGAWA Hirofumi 		goto out;
202605eb0b51SOGAWA Hirofumi 	}
202705eb0b51SOGAWA Hirofumi 
202805eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
202905eb0b51SOGAWA Hirofumi 
203005eb0b51SOGAWA Hirofumi 	unlock_page(page);
203105eb0b51SOGAWA Hirofumi 	page_cache_release(page);
203205eb0b51SOGAWA Hirofumi 	if (err > 0)
203305eb0b51SOGAWA Hirofumi 		err = 0;
203405eb0b51SOGAWA Hirofumi out:
203505eb0b51SOGAWA Hirofumi 	return err;
203605eb0b51SOGAWA Hirofumi }
203705eb0b51SOGAWA Hirofumi 
203805eb0b51SOGAWA Hirofumi int generic_cont_expand(struct inode *inode, loff_t size)
203905eb0b51SOGAWA Hirofumi {
204005eb0b51SOGAWA Hirofumi 	pgoff_t index;
204105eb0b51SOGAWA Hirofumi 	unsigned int offset;
204205eb0b51SOGAWA Hirofumi 
20431da177e4SLinus Torvalds 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
20441da177e4SLinus Torvalds 
20451da177e4SLinus Torvalds 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
20461da177e4SLinus Torvalds 	** skip the prepare.  make sure we never send an offset for the start
20471da177e4SLinus Torvalds 	** of a block
20481da177e4SLinus Torvalds 	*/
20491da177e4SLinus Torvalds 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
205005eb0b51SOGAWA Hirofumi 		/* caller must handle this extra byte. */
20511da177e4SLinus Torvalds 		offset++;
20521da177e4SLinus Torvalds 	}
20531da177e4SLinus Torvalds 	index = size >> PAGE_CACHE_SHIFT;
205405eb0b51SOGAWA Hirofumi 
205505eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
20561da177e4SLinus Torvalds }
205705eb0b51SOGAWA Hirofumi 
205805eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size)
205905eb0b51SOGAWA Hirofumi {
206005eb0b51SOGAWA Hirofumi 	loff_t pos = size - 1;
206105eb0b51SOGAWA Hirofumi 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
206205eb0b51SOGAWA Hirofumi 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
206305eb0b51SOGAWA Hirofumi 
206405eb0b51SOGAWA Hirofumi 	/* prepare/commit_write can handle even if from==to==start of block. */
206505eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
20661da177e4SLinus Torvalds }
20671da177e4SLinus Torvalds 
20681da177e4SLinus Torvalds /*
20691da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
20701da177e4SLinus Torvalds  * We may have to extend the file.
20711da177e4SLinus Torvalds  */
20721da177e4SLinus Torvalds 
20731da177e4SLinus Torvalds int cont_prepare_write(struct page *page, unsigned offset,
20741da177e4SLinus Torvalds 		unsigned to, get_block_t *get_block, loff_t *bytes)
20751da177e4SLinus Torvalds {
20761da177e4SLinus Torvalds 	struct address_space *mapping = page->mapping;
20771da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
20781da177e4SLinus Torvalds 	struct page *new_page;
20791da177e4SLinus Torvalds 	pgoff_t pgpos;
20801da177e4SLinus Torvalds 	long status;
20811da177e4SLinus Torvalds 	unsigned zerofrom;
20821da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
20831da177e4SLinus Torvalds 
20841da177e4SLinus Torvalds 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
20851da177e4SLinus Torvalds 		status = -ENOMEM;
20861da177e4SLinus Torvalds 		new_page = grab_cache_page(mapping, pgpos);
20871da177e4SLinus Torvalds 		if (!new_page)
20881da177e4SLinus Torvalds 			goto out;
20891da177e4SLinus Torvalds 		/* we might sleep */
20901da177e4SLinus Torvalds 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
20911da177e4SLinus Torvalds 			unlock_page(new_page);
20921da177e4SLinus Torvalds 			page_cache_release(new_page);
20931da177e4SLinus Torvalds 			continue;
20941da177e4SLinus Torvalds 		}
20951da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
20961da177e4SLinus Torvalds 		if (zerofrom & (blocksize-1)) {
20971da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
20981da177e4SLinus Torvalds 			(*bytes)++;
20991da177e4SLinus Torvalds 		}
21001da177e4SLinus Torvalds 		status = __block_prepare_write(inode, new_page, zerofrom,
21011da177e4SLinus Torvalds 						PAGE_CACHE_SIZE, get_block);
21021da177e4SLinus Torvalds 		if (status)
21031da177e4SLinus Torvalds 			goto out_unmap;
210401f2705dSNate Diller 		zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
210501f2705dSNate Diller 				KM_USER0);
21061da177e4SLinus Torvalds 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
21071da177e4SLinus Torvalds 		unlock_page(new_page);
21081da177e4SLinus Torvalds 		page_cache_release(new_page);
21091da177e4SLinus Torvalds 	}
21101da177e4SLinus Torvalds 
21111da177e4SLinus Torvalds 	if (page->index < pgpos) {
21121da177e4SLinus Torvalds 		/* completely inside the area */
21131da177e4SLinus Torvalds 		zerofrom = offset;
21141da177e4SLinus Torvalds 	} else {
21151da177e4SLinus Torvalds 		/* page covers the boundary, find the boundary offset */
21161da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
21171da177e4SLinus Torvalds 
21181da177e4SLinus Torvalds 		/* if we will expand the thing last block will be filled */
21191da177e4SLinus Torvalds 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
21201da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
21211da177e4SLinus Torvalds 			(*bytes)++;
21221da177e4SLinus Torvalds 		}
21231da177e4SLinus Torvalds 
21241da177e4SLinus Torvalds 		/* starting below the boundary? Nothing to zero out */
21251da177e4SLinus Torvalds 		if (offset <= zerofrom)
21261da177e4SLinus Torvalds 			zerofrom = offset;
21271da177e4SLinus Torvalds 	}
21281da177e4SLinus Torvalds 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
21291da177e4SLinus Torvalds 	if (status)
21301da177e4SLinus Torvalds 		goto out1;
21311da177e4SLinus Torvalds 	if (zerofrom < offset) {
213201f2705dSNate Diller 		zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
21331da177e4SLinus Torvalds 		__block_commit_write(inode, page, zerofrom, offset);
21341da177e4SLinus Torvalds 	}
21351da177e4SLinus Torvalds 	return 0;
21361da177e4SLinus Torvalds out1:
21371da177e4SLinus Torvalds 	ClearPageUptodate(page);
21381da177e4SLinus Torvalds 	return status;
21391da177e4SLinus Torvalds 
21401da177e4SLinus Torvalds out_unmap:
21411da177e4SLinus Torvalds 	ClearPageUptodate(new_page);
21421da177e4SLinus Torvalds 	unlock_page(new_page);
21431da177e4SLinus Torvalds 	page_cache_release(new_page);
21441da177e4SLinus Torvalds out:
21451da177e4SLinus Torvalds 	return status;
21461da177e4SLinus Torvalds }
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
21491da177e4SLinus Torvalds 			get_block_t *get_block)
21501da177e4SLinus Torvalds {
21511da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21521da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
21531da177e4SLinus Torvalds 	if (err)
21541da177e4SLinus Torvalds 		ClearPageUptodate(page);
21551da177e4SLinus Torvalds 	return err;
21561da177e4SLinus Torvalds }
21571da177e4SLinus Torvalds 
21581da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
21591da177e4SLinus Torvalds {
21601da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21611da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
21621da177e4SLinus Torvalds 	return 0;
21631da177e4SLinus Torvalds }
21641da177e4SLinus Torvalds 
21651da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page,
21661da177e4SLinus Torvalds 		unsigned from, unsigned to)
21671da177e4SLinus Torvalds {
21681da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21691da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
21701da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
21711da177e4SLinus Torvalds 	/*
21721da177e4SLinus Torvalds 	 * No need to use i_size_read() here, the i_size
21731b1dcc1bSJes Sorensen 	 * cannot change under us because we hold i_mutex.
21741da177e4SLinus Torvalds 	 */
21751da177e4SLinus Torvalds 	if (pos > inode->i_size) {
21761da177e4SLinus Torvalds 		i_size_write(inode, pos);
21771da177e4SLinus Torvalds 		mark_inode_dirty(inode);
21781da177e4SLinus Torvalds 	}
21791da177e4SLinus Torvalds 	return 0;
21801da177e4SLinus Torvalds }
21811da177e4SLinus Torvalds 
21821da177e4SLinus Torvalds 
21831da177e4SLinus Torvalds /*
21841da177e4SLinus Torvalds  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
21851da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
21861da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
21871da177e4SLinus Torvalds  *
21881da177e4SLinus Torvalds  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
21891da177e4SLinus Torvalds  * a race there is benign: unlock_buffer() only use the bh's address for
21901da177e4SLinus Torvalds  * hashing after unlocking the buffer, so it doesn't actually touch the bh
21911da177e4SLinus Torvalds  * itself.
21921da177e4SLinus Torvalds  */
21931da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
21941da177e4SLinus Torvalds {
21951da177e4SLinus Torvalds 	if (uptodate) {
21961da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
21971da177e4SLinus Torvalds 	} else {
21981da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
21991da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
22001da177e4SLinus Torvalds 	}
22011da177e4SLinus Torvalds 	unlock_buffer(bh);
22021da177e4SLinus Torvalds }
22031da177e4SLinus Torvalds 
22041da177e4SLinus Torvalds /*
22051da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
22061da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
22071da177e4SLinus Torvalds  */
22081da177e4SLinus Torvalds int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
22091da177e4SLinus Torvalds 			get_block_t *get_block)
22101da177e4SLinus Torvalds {
22111da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22121da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
22131da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
22141da177e4SLinus Torvalds 	struct buffer_head map_bh;
22151da177e4SLinus Torvalds 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
22161da177e4SLinus Torvalds 	unsigned block_in_page;
22171da177e4SLinus Torvalds 	unsigned block_start;
22181da177e4SLinus Torvalds 	sector_t block_in_file;
22191da177e4SLinus Torvalds 	char *kaddr;
22201da177e4SLinus Torvalds 	int nr_reads = 0;
22211da177e4SLinus Torvalds 	int i;
22221da177e4SLinus Torvalds 	int ret = 0;
22231da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
22241da177e4SLinus Torvalds 
22251da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
22261da177e4SLinus Torvalds 		return 0;
22271da177e4SLinus Torvalds 
22281da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
22291da177e4SLinus Torvalds 	map_bh.b_page = page;
22301da177e4SLinus Torvalds 
22311da177e4SLinus Torvalds 	/*
22321da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
22331da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
22341da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
22351da177e4SLinus Torvalds 	 */
22361da177e4SLinus Torvalds 	for (block_start = 0, block_in_page = 0;
22371da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
22381da177e4SLinus Torvalds 		  block_in_page++, block_start += blocksize) {
22391da177e4SLinus Torvalds 		unsigned block_end = block_start + blocksize;
22401da177e4SLinus Torvalds 		int create;
22411da177e4SLinus Torvalds 
22421da177e4SLinus Torvalds 		map_bh.b_state = 0;
22431da177e4SLinus Torvalds 		create = 1;
22441da177e4SLinus Torvalds 		if (block_start >= to)
22451da177e4SLinus Torvalds 			create = 0;
2246b0cf2321SBadari Pulavarty 		map_bh.b_size = blocksize;
22471da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
22481da177e4SLinus Torvalds 					&map_bh, create);
22491da177e4SLinus Torvalds 		if (ret)
22501da177e4SLinus Torvalds 			goto failed;
22511da177e4SLinus Torvalds 		if (!buffer_mapped(&map_bh))
22521da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
22531da177e4SLinus Torvalds 		if (buffer_new(&map_bh))
22541da177e4SLinus Torvalds 			unmap_underlying_metadata(map_bh.b_bdev,
22551da177e4SLinus Torvalds 							map_bh.b_blocknr);
22561da177e4SLinus Torvalds 		if (PageUptodate(page))
22571da177e4SLinus Torvalds 			continue;
22581da177e4SLinus Torvalds 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
22591da177e4SLinus Torvalds 			kaddr = kmap_atomic(page, KM_USER0);
226022c8ca78SNick Piggin 			if (block_start < from)
22611da177e4SLinus Torvalds 				memset(kaddr+block_start, 0, from-block_start);
226222c8ca78SNick Piggin 			if (block_end > to)
22631da177e4SLinus Torvalds 				memset(kaddr + to, 0, block_end - to);
22641da177e4SLinus Torvalds 			flush_dcache_page(page);
22651da177e4SLinus Torvalds 			kunmap_atomic(kaddr, KM_USER0);
22661da177e4SLinus Torvalds 			continue;
22671da177e4SLinus Torvalds 		}
22681da177e4SLinus Torvalds 		if (buffer_uptodate(&map_bh))
22691da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
22701da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
22711da177e4SLinus Torvalds 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
22721da177e4SLinus Torvalds 
22731da177e4SLinus Torvalds 			if (!bh) {
22741da177e4SLinus Torvalds 				ret = -ENOMEM;
22751da177e4SLinus Torvalds 				goto failed;
22761da177e4SLinus Torvalds 			}
22771da177e4SLinus Torvalds 			bh->b_state = map_bh.b_state;
22781da177e4SLinus Torvalds 			atomic_set(&bh->b_count, 0);
22791da177e4SLinus Torvalds 			bh->b_this_page = NULL;
22801da177e4SLinus Torvalds 			bh->b_page = page;
22811da177e4SLinus Torvalds 			bh->b_blocknr = map_bh.b_blocknr;
22821da177e4SLinus Torvalds 			bh->b_size = blocksize;
22831da177e4SLinus Torvalds 			bh->b_data = (char *)(long)block_start;
22841da177e4SLinus Torvalds 			bh->b_bdev = map_bh.b_bdev;
22851da177e4SLinus Torvalds 			bh->b_private = NULL;
22861da177e4SLinus Torvalds 			read_bh[nr_reads++] = bh;
22871da177e4SLinus Torvalds 		}
22881da177e4SLinus Torvalds 	}
22891da177e4SLinus Torvalds 
22901da177e4SLinus Torvalds 	if (nr_reads) {
22911da177e4SLinus Torvalds 		struct buffer_head *bh;
22921da177e4SLinus Torvalds 
22931da177e4SLinus Torvalds 		/*
22941da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
22951da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
22961da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
22971da177e4SLinus Torvalds 		 */
22981da177e4SLinus Torvalds 		for (i = 0; i < nr_reads; i++) {
22991da177e4SLinus Torvalds 			bh = read_bh[i];
23001da177e4SLinus Torvalds 			lock_buffer(bh);
23011da177e4SLinus Torvalds 			bh->b_end_io = end_buffer_read_nobh;
23021da177e4SLinus Torvalds 			submit_bh(READ, bh);
23031da177e4SLinus Torvalds 		}
23041da177e4SLinus Torvalds 		for (i = 0; i < nr_reads; i++) {
23051da177e4SLinus Torvalds 			bh = read_bh[i];
23061da177e4SLinus Torvalds 			wait_on_buffer(bh);
23071da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
23081da177e4SLinus Torvalds 				ret = -EIO;
23091da177e4SLinus Torvalds 			free_buffer_head(bh);
23101da177e4SLinus Torvalds 			read_bh[i] = NULL;
23111da177e4SLinus Torvalds 		}
23121da177e4SLinus Torvalds 		if (ret)
23131da177e4SLinus Torvalds 			goto failed;
23141da177e4SLinus Torvalds 	}
23151da177e4SLinus Torvalds 
23161da177e4SLinus Torvalds 	if (is_mapped_to_disk)
23171da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
23181da177e4SLinus Torvalds 
23191da177e4SLinus Torvalds 	return 0;
23201da177e4SLinus Torvalds 
23211da177e4SLinus Torvalds failed:
23221da177e4SLinus Torvalds 	for (i = 0; i < nr_reads; i++) {
23231da177e4SLinus Torvalds 		if (read_bh[i])
23241da177e4SLinus Torvalds 			free_buffer_head(read_bh[i]);
23251da177e4SLinus Torvalds 	}
23261da177e4SLinus Torvalds 
23271da177e4SLinus Torvalds 	/*
23281da177e4SLinus Torvalds 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
23291da177e4SLinus Torvalds 	 * so we'll later zero out any blocks which _were_ allocated.
23301da177e4SLinus Torvalds 	 */
233101f2705dSNate Diller 	zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
23321da177e4SLinus Torvalds 	SetPageUptodate(page);
23331da177e4SLinus Torvalds 	set_page_dirty(page);
23341da177e4SLinus Torvalds 	return ret;
23351da177e4SLinus Torvalds }
23361da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_prepare_write);
23371da177e4SLinus Torvalds 
233857bf63d6SDave Kleikamp /*
233957bf63d6SDave Kleikamp  * Make sure any changes to nobh_commit_write() are reflected in
234057bf63d6SDave Kleikamp  * nobh_truncate_page(), since it doesn't call commit_write().
234157bf63d6SDave Kleikamp  */
23421da177e4SLinus Torvalds int nobh_commit_write(struct file *file, struct page *page,
23431da177e4SLinus Torvalds 		unsigned from, unsigned to)
23441da177e4SLinus Torvalds {
23451da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23461da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
23471da177e4SLinus Torvalds 
234822c8ca78SNick Piggin 	SetPageUptodate(page);
23491da177e4SLinus Torvalds 	set_page_dirty(page);
23501da177e4SLinus Torvalds 	if (pos > inode->i_size) {
23511da177e4SLinus Torvalds 		i_size_write(inode, pos);
23521da177e4SLinus Torvalds 		mark_inode_dirty(inode);
23531da177e4SLinus Torvalds 	}
23541da177e4SLinus Torvalds 	return 0;
23551da177e4SLinus Torvalds }
23561da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_commit_write);
23571da177e4SLinus Torvalds 
23581da177e4SLinus Torvalds /*
23591da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
23601da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
23611da177e4SLinus Torvalds  * the page.
23621da177e4SLinus Torvalds  */
23631da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
23641da177e4SLinus Torvalds 			struct writeback_control *wbc)
23651da177e4SLinus Torvalds {
23661da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
23671da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
23681da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
23691da177e4SLinus Torvalds 	unsigned offset;
23701da177e4SLinus Torvalds 	int ret;
23711da177e4SLinus Torvalds 
23721da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
23731da177e4SLinus Torvalds 	if (page->index < end_index)
23741da177e4SLinus Torvalds 		goto out;
23751da177e4SLinus Torvalds 
23761da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
23771da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
23781da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
23791da177e4SLinus Torvalds 		/*
23801da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
23811da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
23821da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
23831da177e4SLinus Torvalds 		 */
23841da177e4SLinus Torvalds #if 0
23851da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
23861da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
23871da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
23881da177e4SLinus Torvalds #endif
23891da177e4SLinus Torvalds 		unlock_page(page);
23901da177e4SLinus Torvalds 		return 0; /* don't care */
23911da177e4SLinus Torvalds 	}
23921da177e4SLinus Torvalds 
23931da177e4SLinus Torvalds 	/*
23941da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
23951da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
23961da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
23971da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
23981da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
23991da177e4SLinus Torvalds 	 */
240001f2705dSNate Diller 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
24011da177e4SLinus Torvalds out:
24021da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
24031da177e4SLinus Torvalds 	if (ret == -EAGAIN)
24041da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
24051da177e4SLinus Torvalds 	return ret;
24061da177e4SLinus Torvalds }
24071da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
24081da177e4SLinus Torvalds 
24091da177e4SLinus Torvalds /*
24101da177e4SLinus Torvalds  * This function assumes that ->prepare_write() uses nobh_prepare_write().
24111da177e4SLinus Torvalds  */
24121da177e4SLinus Torvalds int nobh_truncate_page(struct address_space *mapping, loff_t from)
24131da177e4SLinus Torvalds {
24141da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
24151da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
24161da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
24171da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
24181da177e4SLinus Torvalds 	unsigned to;
24191da177e4SLinus Torvalds 	struct page *page;
2420f5e54d6eSChristoph Hellwig 	const struct address_space_operations *a_ops = mapping->a_ops;
24211da177e4SLinus Torvalds 	int ret = 0;
24221da177e4SLinus Torvalds 
24231da177e4SLinus Torvalds 	if ((offset & (blocksize - 1)) == 0)
24241da177e4SLinus Torvalds 		goto out;
24251da177e4SLinus Torvalds 
24261da177e4SLinus Torvalds 	ret = -ENOMEM;
24271da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
24281da177e4SLinus Torvalds 	if (!page)
24291da177e4SLinus Torvalds 		goto out;
24301da177e4SLinus Torvalds 
24311da177e4SLinus Torvalds 	to = (offset + blocksize) & ~(blocksize - 1);
24321da177e4SLinus Torvalds 	ret = a_ops->prepare_write(NULL, page, offset, to);
24331da177e4SLinus Torvalds 	if (ret == 0) {
243401f2705dSNate Diller 		zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
243501f2705dSNate Diller 				KM_USER0);
243657bf63d6SDave Kleikamp 		/*
243757bf63d6SDave Kleikamp 		 * It would be more correct to call aops->commit_write()
243857bf63d6SDave Kleikamp 		 * here, but this is more efficient.
243957bf63d6SDave Kleikamp 		 */
244057bf63d6SDave Kleikamp 		SetPageUptodate(page);
24411da177e4SLinus Torvalds 		set_page_dirty(page);
24421da177e4SLinus Torvalds 	}
24431da177e4SLinus Torvalds 	unlock_page(page);
24441da177e4SLinus Torvalds 	page_cache_release(page);
24451da177e4SLinus Torvalds out:
24461da177e4SLinus Torvalds 	return ret;
24471da177e4SLinus Torvalds }
24481da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
24491da177e4SLinus Torvalds 
24501da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
24511da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
24521da177e4SLinus Torvalds {
24531da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
24541da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
24551da177e4SLinus Torvalds 	unsigned blocksize;
245654b21a79SAndrew Morton 	sector_t iblock;
24571da177e4SLinus Torvalds 	unsigned length, pos;
24581da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
24591da177e4SLinus Torvalds 	struct page *page;
24601da177e4SLinus Torvalds 	struct buffer_head *bh;
24611da177e4SLinus Torvalds 	int err;
24621da177e4SLinus Torvalds 
24631da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
24641da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
24651da177e4SLinus Torvalds 
24661da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
24671da177e4SLinus Torvalds 	if (!length)
24681da177e4SLinus Torvalds 		return 0;
24691da177e4SLinus Torvalds 
24701da177e4SLinus Torvalds 	length = blocksize - length;
247154b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
24721da177e4SLinus Torvalds 
24731da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
24741da177e4SLinus Torvalds 	err = -ENOMEM;
24751da177e4SLinus Torvalds 	if (!page)
24761da177e4SLinus Torvalds 		goto out;
24771da177e4SLinus Torvalds 
24781da177e4SLinus Torvalds 	if (!page_has_buffers(page))
24791da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
24801da177e4SLinus Torvalds 
24811da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
24821da177e4SLinus Torvalds 	bh = page_buffers(page);
24831da177e4SLinus Torvalds 	pos = blocksize;
24841da177e4SLinus Torvalds 	while (offset >= pos) {
24851da177e4SLinus Torvalds 		bh = bh->b_this_page;
24861da177e4SLinus Torvalds 		iblock++;
24871da177e4SLinus Torvalds 		pos += blocksize;
24881da177e4SLinus Torvalds 	}
24891da177e4SLinus Torvalds 
24901da177e4SLinus Torvalds 	err = 0;
24911da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2492b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
24931da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
24941da177e4SLinus Torvalds 		if (err)
24951da177e4SLinus Torvalds 			goto unlock;
24961da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
24971da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
24981da177e4SLinus Torvalds 			goto unlock;
24991da177e4SLinus Torvalds 	}
25001da177e4SLinus Torvalds 
25011da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
25021da177e4SLinus Torvalds 	if (PageUptodate(page))
25031da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
25041da177e4SLinus Torvalds 
250533a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
25061da177e4SLinus Torvalds 		err = -EIO;
25071da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
25081da177e4SLinus Torvalds 		wait_on_buffer(bh);
25091da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
25101da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
25111da177e4SLinus Torvalds 			goto unlock;
25121da177e4SLinus Torvalds 	}
25131da177e4SLinus Torvalds 
251401f2705dSNate Diller 	zero_user_page(page, offset, length, KM_USER0);
25151da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
25161da177e4SLinus Torvalds 	err = 0;
25171da177e4SLinus Torvalds 
25181da177e4SLinus Torvalds unlock:
25191da177e4SLinus Torvalds 	unlock_page(page);
25201da177e4SLinus Torvalds 	page_cache_release(page);
25211da177e4SLinus Torvalds out:
25221da177e4SLinus Torvalds 	return err;
25231da177e4SLinus Torvalds }
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds /*
25261da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
25271da177e4SLinus Torvalds  */
25281da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
25291da177e4SLinus Torvalds 			struct writeback_control *wbc)
25301da177e4SLinus Torvalds {
25311da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
25321da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
25331da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
25341da177e4SLinus Torvalds 	unsigned offset;
25351da177e4SLinus Torvalds 
25361da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
25371da177e4SLinus Torvalds 	if (page->index < end_index)
25381da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
25391da177e4SLinus Torvalds 
25401da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
25411da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
25421da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
25431da177e4SLinus Torvalds 		/*
25441da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
25451da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
25461da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
25471da177e4SLinus Torvalds 		 */
2548aaa4059bSJan Kara 		do_invalidatepage(page, 0);
25491da177e4SLinus Torvalds 		unlock_page(page);
25501da177e4SLinus Torvalds 		return 0; /* don't care */
25511da177e4SLinus Torvalds 	}
25521da177e4SLinus Torvalds 
25531da177e4SLinus Torvalds 	/*
25541da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
25551da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
25561da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
25571da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
25581da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
25591da177e4SLinus Torvalds 	 */
256001f2705dSNate Diller 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
25611da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
25621da177e4SLinus Torvalds }
25631da177e4SLinus Torvalds 
25641da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
25651da177e4SLinus Torvalds 			    get_block_t *get_block)
25661da177e4SLinus Torvalds {
25671da177e4SLinus Torvalds 	struct buffer_head tmp;
25681da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
25691da177e4SLinus Torvalds 	tmp.b_state = 0;
25701da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2571b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
25721da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
25731da177e4SLinus Torvalds 	return tmp.b_blocknr;
25741da177e4SLinus Torvalds }
25751da177e4SLinus Torvalds 
25761da177e4SLinus Torvalds static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
25771da177e4SLinus Torvalds {
25781da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
25791da177e4SLinus Torvalds 
25801da177e4SLinus Torvalds 	if (bio->bi_size)
25811da177e4SLinus Torvalds 		return 1;
25821da177e4SLinus Torvalds 
25831da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
25841da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
25851da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
25861da177e4SLinus Torvalds 	}
25871da177e4SLinus Torvalds 
25881da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
25891da177e4SLinus Torvalds 	bio_put(bio);
25901da177e4SLinus Torvalds 	return 0;
25911da177e4SLinus Torvalds }
25921da177e4SLinus Torvalds 
25931da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
25941da177e4SLinus Torvalds {
25951da177e4SLinus Torvalds 	struct bio *bio;
25961da177e4SLinus Torvalds 	int ret = 0;
25971da177e4SLinus Torvalds 
25981da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
25991da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
26001da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
26011da177e4SLinus Torvalds 
26021da177e4SLinus Torvalds 	if (buffer_ordered(bh) && (rw == WRITE))
26031da177e4SLinus Torvalds 		rw = WRITE_BARRIER;
26041da177e4SLinus Torvalds 
26051da177e4SLinus Torvalds 	/*
26061da177e4SLinus Torvalds 	 * Only clear out a write error when rewriting, should this
26071da177e4SLinus Torvalds 	 * include WRITE_SYNC as well?
26081da177e4SLinus Torvalds 	 */
26091da177e4SLinus Torvalds 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
26101da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
26111da177e4SLinus Torvalds 
26121da177e4SLinus Torvalds 	/*
26131da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
26141da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
26151da177e4SLinus Torvalds 	 */
26161da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
26171da177e4SLinus Torvalds 
26181da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
26191da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
26201da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
26211da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
26221da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
26231da177e4SLinus Torvalds 
26241da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
26251da177e4SLinus Torvalds 	bio->bi_idx = 0;
26261da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
26271da177e4SLinus Torvalds 
26281da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
26291da177e4SLinus Torvalds 	bio->bi_private = bh;
26301da177e4SLinus Torvalds 
26311da177e4SLinus Torvalds 	bio_get(bio);
26321da177e4SLinus Torvalds 	submit_bio(rw, bio);
26331da177e4SLinus Torvalds 
26341da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
26351da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
26361da177e4SLinus Torvalds 
26371da177e4SLinus Torvalds 	bio_put(bio);
26381da177e4SLinus Torvalds 	return ret;
26391da177e4SLinus Torvalds }
26401da177e4SLinus Torvalds 
26411da177e4SLinus Torvalds /**
26421da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2643a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
26441da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
26451da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
26461da177e4SLinus Torvalds  *
2647a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2648a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2649a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2650a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2651a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
26521da177e4SLinus Torvalds  *
26531da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2654a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2655a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2656a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2657a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2658a7662236SJan Kara  * actually clean until the buffer gets unlocked).
26591da177e4SLinus Torvalds  *
26601da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
26611da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
26621da177e4SLinus Torvalds  * any waiters.
26631da177e4SLinus Torvalds  *
26641da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
26651da177e4SLinus Torvalds  * multiple of the current approved size for the device.
26661da177e4SLinus Torvalds  */
26671da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
26681da177e4SLinus Torvalds {
26691da177e4SLinus Torvalds 	int i;
26701da177e4SLinus Torvalds 
26711da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
26721da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
26731da177e4SLinus Torvalds 
2674a7662236SJan Kara 		if (rw == SWRITE)
2675a7662236SJan Kara 			lock_buffer(bh);
2676a7662236SJan Kara 		else if (test_set_buffer_locked(bh))
26771da177e4SLinus Torvalds 			continue;
26781da177e4SLinus Torvalds 
2679a7662236SJan Kara 		if (rw == WRITE || rw == SWRITE) {
26801da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
268176c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2682e60e5c50SOGAWA Hirofumi 				get_bh(bh);
26831da177e4SLinus Torvalds 				submit_bh(WRITE, bh);
26841da177e4SLinus Torvalds 				continue;
26851da177e4SLinus Torvalds 			}
26861da177e4SLinus Torvalds 		} else {
26871da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
268876c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2689e60e5c50SOGAWA Hirofumi 				get_bh(bh);
26901da177e4SLinus Torvalds 				submit_bh(rw, bh);
26911da177e4SLinus Torvalds 				continue;
26921da177e4SLinus Torvalds 			}
26931da177e4SLinus Torvalds 		}
26941da177e4SLinus Torvalds 		unlock_buffer(bh);
26951da177e4SLinus Torvalds 	}
26961da177e4SLinus Torvalds }
26971da177e4SLinus Torvalds 
26981da177e4SLinus Torvalds /*
26991da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
27001da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
27011da177e4SLinus Torvalds  * the buffer_head.
27021da177e4SLinus Torvalds  */
27031da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
27041da177e4SLinus Torvalds {
27051da177e4SLinus Torvalds 	int ret = 0;
27061da177e4SLinus Torvalds 
27071da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
27081da177e4SLinus Torvalds 	lock_buffer(bh);
27091da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
27101da177e4SLinus Torvalds 		get_bh(bh);
27111da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
27121da177e4SLinus Torvalds 		ret = submit_bh(WRITE, bh);
27131da177e4SLinus Torvalds 		wait_on_buffer(bh);
27141da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
27151da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
27161da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
27171da177e4SLinus Torvalds 		}
27181da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
27191da177e4SLinus Torvalds 			ret = -EIO;
27201da177e4SLinus Torvalds 	} else {
27211da177e4SLinus Torvalds 		unlock_buffer(bh);
27221da177e4SLinus Torvalds 	}
27231da177e4SLinus Torvalds 	return ret;
27241da177e4SLinus Torvalds }
27251da177e4SLinus Torvalds 
27261da177e4SLinus Torvalds /*
27271da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
27281da177e4SLinus Torvalds  * are unused, and releases them if so.
27291da177e4SLinus Torvalds  *
27301da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
27311da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
27321da177e4SLinus Torvalds  *
27331da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
27341da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
27351da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
27361da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
27371da177e4SLinus Torvalds  * filesystem data on the same device.
27381da177e4SLinus Torvalds  *
27391da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
27401da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
27411da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
27421da177e4SLinus Torvalds  * private_lock.
27431da177e4SLinus Torvalds  *
27441da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
27451da177e4SLinus Torvalds  */
27461da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
27471da177e4SLinus Torvalds {
27481da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
27491da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
27501da177e4SLinus Torvalds }
27511da177e4SLinus Torvalds 
27521da177e4SLinus Torvalds static int
27531da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
27541da177e4SLinus Torvalds {
27551da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
27561da177e4SLinus Torvalds 	struct buffer_head *bh;
27571da177e4SLinus Torvalds 
27581da177e4SLinus Torvalds 	bh = head;
27591da177e4SLinus Torvalds 	do {
2760de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
27611da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
27621da177e4SLinus Torvalds 		if (buffer_busy(bh))
27631da177e4SLinus Torvalds 			goto failed;
27641da177e4SLinus Torvalds 		bh = bh->b_this_page;
27651da177e4SLinus Torvalds 	} while (bh != head);
27661da177e4SLinus Torvalds 
27671da177e4SLinus Torvalds 	do {
27681da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
27691da177e4SLinus Torvalds 
27701da177e4SLinus Torvalds 		if (!list_empty(&bh->b_assoc_buffers))
27711da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
27721da177e4SLinus Torvalds 		bh = next;
27731da177e4SLinus Torvalds 	} while (bh != head);
27741da177e4SLinus Torvalds 	*buffers_to_free = head;
27751da177e4SLinus Torvalds 	__clear_page_buffers(page);
27761da177e4SLinus Torvalds 	return 1;
27771da177e4SLinus Torvalds failed:
27781da177e4SLinus Torvalds 	return 0;
27791da177e4SLinus Torvalds }
27801da177e4SLinus Torvalds 
27811da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
27821da177e4SLinus Torvalds {
27831da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
27841da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
27851da177e4SLinus Torvalds 	int ret = 0;
27861da177e4SLinus Torvalds 
27871da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
2788ecdfc978SLinus Torvalds 	if (PageWriteback(page))
27891da177e4SLinus Torvalds 		return 0;
27901da177e4SLinus Torvalds 
27911da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
27921da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
27931da177e4SLinus Torvalds 		goto out;
27941da177e4SLinus Torvalds 	}
27951da177e4SLinus Torvalds 
27961da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
27971da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
2798ecdfc978SLinus Torvalds 
2799ecdfc978SLinus Torvalds 	/*
2800ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
2801ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
2802ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
2803ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2804ecdfc978SLinus Torvalds 	 *
2805ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
2806ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
2807ecdfc978SLinus Torvalds 	 * the page also.
280887df7241SNick Piggin 	 *
280987df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
281087df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
281187df7241SNick Piggin 	 * dirty bit from being lost.
2812ecdfc978SLinus Torvalds 	 */
2813ecdfc978SLinus Torvalds 	if (ret)
2814ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
281587df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
28161da177e4SLinus Torvalds out:
28171da177e4SLinus Torvalds 	if (buffers_to_free) {
28181da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
28191da177e4SLinus Torvalds 
28201da177e4SLinus Torvalds 		do {
28211da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
28221da177e4SLinus Torvalds 			free_buffer_head(bh);
28231da177e4SLinus Torvalds 			bh = next;
28241da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
28251da177e4SLinus Torvalds 	}
28261da177e4SLinus Torvalds 	return ret;
28271da177e4SLinus Torvalds }
28281da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
28291da177e4SLinus Torvalds 
28303978d717SNeilBrown void block_sync_page(struct page *page)
28311da177e4SLinus Torvalds {
28321da177e4SLinus Torvalds 	struct address_space *mapping;
28331da177e4SLinus Torvalds 
28341da177e4SLinus Torvalds 	smp_mb();
28351da177e4SLinus Torvalds 	mapping = page_mapping(page);
28361da177e4SLinus Torvalds 	if (mapping)
28371da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
28381da177e4SLinus Torvalds }
28391da177e4SLinus Torvalds 
28401da177e4SLinus Torvalds /*
28411da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
28421da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
28431da177e4SLinus Torvalds  *
28441da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
28451da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
28461da177e4SLinus Torvalds  */
28471da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data)
28481da177e4SLinus Torvalds {
28491da177e4SLinus Torvalds 	static int msg_count;
28501da177e4SLinus Torvalds 
28511da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
28521da177e4SLinus Torvalds 		return -EPERM;
28531da177e4SLinus Torvalds 
28541da177e4SLinus Torvalds 	if (msg_count < 5) {
28551da177e4SLinus Torvalds 		msg_count++;
28561da177e4SLinus Torvalds 		printk(KERN_INFO
28571da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
28581da177e4SLinus Torvalds 			" system call\n", current->comm);
28591da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
28601da177e4SLinus Torvalds 	}
28611da177e4SLinus Torvalds 
28621da177e4SLinus Torvalds 	if (func == 1)
28631da177e4SLinus Torvalds 		do_exit(0);
28641da177e4SLinus Torvalds 	return 0;
28651da177e4SLinus Torvalds }
28661da177e4SLinus Torvalds 
28671da177e4SLinus Torvalds /*
28681da177e4SLinus Torvalds  * Buffer-head allocation
28691da177e4SLinus Torvalds  */
2870e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds /*
28731da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
28741da177e4SLinus Torvalds  * stripping them in writeback.
28751da177e4SLinus Torvalds  */
28761da177e4SLinus Torvalds static int max_buffer_heads;
28771da177e4SLinus Torvalds 
28781da177e4SLinus Torvalds int buffer_heads_over_limit;
28791da177e4SLinus Torvalds 
28801da177e4SLinus Torvalds struct bh_accounting {
28811da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
28821da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
28831da177e4SLinus Torvalds };
28841da177e4SLinus Torvalds 
28851da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
28861da177e4SLinus Torvalds 
28871da177e4SLinus Torvalds static void recalc_bh_state(void)
28881da177e4SLinus Torvalds {
28891da177e4SLinus Torvalds 	int i;
28901da177e4SLinus Torvalds 	int tot = 0;
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
28931da177e4SLinus Torvalds 		return;
28941da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
28958a143426SEric Dumazet 	for_each_online_cpu(i)
28961da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
28971da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
28981da177e4SLinus Torvalds }
28991da177e4SLinus Torvalds 
2900dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
29011da177e4SLinus Torvalds {
2902a35afb83SChristoph Lameter 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
29031da177e4SLinus Torvalds 	if (ret) {
2904a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2905736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
29061da177e4SLinus Torvalds 		recalc_bh_state();
2907736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
29081da177e4SLinus Torvalds 	}
29091da177e4SLinus Torvalds 	return ret;
29101da177e4SLinus Torvalds }
29111da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
29121da177e4SLinus Torvalds 
29131da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
29141da177e4SLinus Torvalds {
29151da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
29161da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
2917736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
29181da177e4SLinus Torvalds 	recalc_bh_state();
2919736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
29201da177e4SLinus Torvalds }
29211da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
29221da177e4SLinus Torvalds 
29231da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
29241da177e4SLinus Torvalds {
29251da177e4SLinus Torvalds 	int i;
29261da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
29271da177e4SLinus Torvalds 
29281da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
29291da177e4SLinus Torvalds 		brelse(b->bhs[i]);
29301da177e4SLinus Torvalds 		b->bhs[i] = NULL;
29311da177e4SLinus Torvalds 	}
29328a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
29338a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
29348a143426SEric Dumazet 	put_cpu_var(bh_accounting);
29351da177e4SLinus Torvalds }
29361da177e4SLinus Torvalds 
29371da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
29381da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
29391da177e4SLinus Torvalds {
29408bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
29411da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
29421da177e4SLinus Torvalds 	return NOTIFY_OK;
29431da177e4SLinus Torvalds }
29441da177e4SLinus Torvalds 
29451da177e4SLinus Torvalds void __init buffer_init(void)
29461da177e4SLinus Torvalds {
29471da177e4SLinus Torvalds 	int nrpages;
29481da177e4SLinus Torvalds 
2949a35afb83SChristoph Lameter 	bh_cachep = KMEM_CACHE(buffer_head,
2950a35afb83SChristoph Lameter 			SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
29511da177e4SLinus Torvalds 
29521da177e4SLinus Torvalds 	/*
29531da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
29541da177e4SLinus Torvalds 	 */
29551da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
29561da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
29571da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
29581da177e4SLinus Torvalds }
29591da177e4SLinus Torvalds 
29601da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
29611da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
29621da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
29631da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
29641da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
29651da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
29661da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
29671da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
29681da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
29691da177e4SLinus Torvalds EXPORT_SYMBOL(cont_prepare_write);
29701da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
29711da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
29721da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
29731da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
29741da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
29751da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write);
29761da177e4SLinus Torvalds EXPORT_SYMBOL(generic_cont_expand);
297705eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
29781da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
29791da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
29801da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
29811da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
29821da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
29831da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
29841da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
2985