xref: /linux/fs/buffer.c (revision 51cc50685a4275c6a02653670af9f108a64e01cf)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
70fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
77fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7972ed3d03SNick Piggin 	smp_mb__before_clear_bit();
801da177e4SLinus Torvalds 	clear_buffer_locked(bh);
811da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
821da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /*
861da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
871da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
881da177e4SLinus Torvalds  * if you want to preserve its state.
891da177e4SLinus Torvalds  */
901da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
911da177e4SLinus Torvalds {
921da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
931da177e4SLinus Torvalds }
941da177e4SLinus Torvalds 
951da177e4SLinus Torvalds static void
961da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
971da177e4SLinus Torvalds {
981da177e4SLinus Torvalds 	ClearPagePrivate(page);
994c21e2f2SHugh Dickins 	set_page_private(page, 0);
1001da177e4SLinus Torvalds 	page_cache_release(page);
1011da177e4SLinus Torvalds }
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1041da177e4SLinus Torvalds {
1051da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1081da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1091da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1101da177e4SLinus Torvalds }
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds /*
11368671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
11468671f35SDmitry Monakhov  * unlocking it.
11568671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
11668671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
11768671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
11868671f35SDmitry Monakhov  * itself.
1191da177e4SLinus Torvalds  */
12068671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1211da177e4SLinus Torvalds {
1221da177e4SLinus Torvalds 	if (uptodate) {
1231da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1241da177e4SLinus Torvalds 	} else {
1251da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1261da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1271da177e4SLinus Torvalds 	}
1281da177e4SLinus Torvalds 	unlock_buffer(bh);
12968671f35SDmitry Monakhov }
13068671f35SDmitry Monakhov 
13168671f35SDmitry Monakhov /*
13268671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
13368671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
13468671f35SDmitry Monakhov  */
13568671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
13668671f35SDmitry Monakhov {
13768671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1381da177e4SLinus Torvalds 	put_bh(bh);
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1421da177e4SLinus Torvalds {
1431da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1441da177e4SLinus Torvalds 
1451da177e4SLinus Torvalds 	if (uptodate) {
1461da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds 	} else {
1481da177e4SLinus Torvalds 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1491da177e4SLinus Torvalds 			buffer_io_error(bh);
1501da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1511da177e4SLinus Torvalds 					"I/O error on %s\n",
1521da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1531da177e4SLinus Torvalds 		}
1541da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1551da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1561da177e4SLinus Torvalds 	}
1571da177e4SLinus Torvalds 	unlock_buffer(bh);
1581da177e4SLinus Torvalds 	put_bh(bh);
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds /*
1621da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1631da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1641da177e4SLinus Torvalds  */
1651da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds 	int ret = 0;
1681da177e4SLinus Torvalds 
16928fd1298SOGAWA Hirofumi 	if (bdev)
17028fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1711da177e4SLinus Torvalds 	return ret;
1721da177e4SLinus Torvalds }
1731da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds /*
1761da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1771da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1781da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1791da177e4SLinus Torvalds  */
1801da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1811da177e4SLinus Torvalds {
1821da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1831da177e4SLinus Torvalds 	if (sb) {
1841da177e4SLinus Torvalds 		int res = fsync_super(sb);
1851da177e4SLinus Torvalds 		drop_super(sb);
1861da177e4SLinus Torvalds 		return res;
1871da177e4SLinus Torvalds 	}
1881da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1891da177e4SLinus Torvalds }
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds /**
1921da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
1931da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
1941da177e4SLinus Torvalds  *
195f73ca1b7SDavid Chinner  * This takes the block device bd_mount_sem to make sure no new mounts
1961da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
1971da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
1981da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
1991da177e4SLinus Torvalds  */
2001da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
2011da177e4SLinus Torvalds {
2021da177e4SLinus Torvalds 	struct super_block *sb;
2031da177e4SLinus Torvalds 
204f73ca1b7SDavid Chinner 	down(&bdev->bd_mount_sem);
2051da177e4SLinus Torvalds 	sb = get_super(bdev);
2061da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
2071da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
208d59dd462Sakpm@osdl.org 		smp_wmb();
2091da177e4SLinus Torvalds 
210d25b9a1fSOGAWA Hirofumi 		__fsync_super(sb);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
213d59dd462Sakpm@osdl.org 		smp_wmb();
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 		if (sb->s_op->write_super_lockfs)
2181da177e4SLinus Torvalds 			sb->s_op->write_super_lockfs(sb);
2191da177e4SLinus Torvalds 	}
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	sync_blockdev(bdev);
2221da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2231da177e4SLinus Torvalds }
2241da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds /**
2271da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2281da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2291da177e4SLinus Torvalds  * @sb:		associated superblock
2301da177e4SLinus Torvalds  *
2311da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2321da177e4SLinus Torvalds  */
2331da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb)
2341da177e4SLinus Torvalds {
2351da177e4SLinus Torvalds 	if (sb) {
2361da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds 		if (sb->s_op->unlockfs)
2391da177e4SLinus Torvalds 			sb->s_op->unlockfs(sb);
2401da177e4SLinus Torvalds 		sb->s_frozen = SB_UNFROZEN;
241d59dd462Sakpm@osdl.org 		smp_wmb();
2421da177e4SLinus Torvalds 		wake_up(&sb->s_wait_unfrozen);
2431da177e4SLinus Torvalds 		drop_super(sb);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 
246f73ca1b7SDavid Chinner 	up(&bdev->bd_mount_sem);
2471da177e4SLinus Torvalds }
2481da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /*
2511da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
2521da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
2531da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
2541da177e4SLinus Torvalds  * private_lock.
2551da177e4SLinus Torvalds  *
2561da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
2571da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
2581da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
2591da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
2601da177e4SLinus Torvalds  */
2611da177e4SLinus Torvalds static struct buffer_head *
262385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
2651da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
2661da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
2671da177e4SLinus Torvalds 	pgoff_t index;
2681da177e4SLinus Torvalds 	struct buffer_head *bh;
2691da177e4SLinus Torvalds 	struct buffer_head *head;
2701da177e4SLinus Torvalds 	struct page *page;
2711da177e4SLinus Torvalds 	int all_mapped = 1;
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
2741da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
2751da177e4SLinus Torvalds 	if (!page)
2761da177e4SLinus Torvalds 		goto out;
2771da177e4SLinus Torvalds 
2781da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2791da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2801da177e4SLinus Torvalds 		goto out_unlock;
2811da177e4SLinus Torvalds 	head = page_buffers(page);
2821da177e4SLinus Torvalds 	bh = head;
2831da177e4SLinus Torvalds 	do {
2841da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
2851da177e4SLinus Torvalds 			ret = bh;
2861da177e4SLinus Torvalds 			get_bh(bh);
2871da177e4SLinus Torvalds 			goto out_unlock;
2881da177e4SLinus Torvalds 		}
2891da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
2901da177e4SLinus Torvalds 			all_mapped = 0;
2911da177e4SLinus Torvalds 		bh = bh->b_this_page;
2921da177e4SLinus Torvalds 	} while (bh != head);
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2951da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2961da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2971da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2981da177e4SLinus Torvalds 	 */
2991da177e4SLinus Torvalds 	if (all_mapped) {
3001da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
3011da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
302205f87f6SBadari Pulavarty 			(unsigned long long)block,
303205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
304205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
305205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
3061da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
3071da177e4SLinus Torvalds 	}
3081da177e4SLinus Torvalds out_unlock:
3091da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
3101da177e4SLinus Torvalds 	page_cache_release(page);
3111da177e4SLinus Torvalds out:
3121da177e4SLinus Torvalds 	return ret;
3131da177e4SLinus Torvalds }
3141da177e4SLinus Torvalds 
3151da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
3161da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
3171da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
3181da177e4SLinus Torvalds    by the user.
3191da177e4SLinus Torvalds 
3201da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
3211da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
3221da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
3231da177e4SLinus Torvalds 
3241da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
3251da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
3281da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
3291da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
3301da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
3311da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
3321da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
3331da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
3341da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
3351da177e4SLinus Torvalds 
3361da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
3371da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
3381da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
3411da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
3421da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
3431da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
3441da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
3451da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
3461da177e4SLinus Torvalds    pass does the actual I/O. */
347f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
3481da177e4SLinus Torvalds {
3490e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3500e1dfc66SAndrew Morton 
3510e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
3520e1dfc66SAndrew Morton 		return;
3530e1dfc66SAndrew Morton 
3541da177e4SLinus Torvalds 	invalidate_bh_lrus();
355fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
3561da177e4SLinus Torvalds }
3571da177e4SLinus Torvalds 
3581da177e4SLinus Torvalds /*
3591da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
3601da177e4SLinus Torvalds  */
3611da177e4SLinus Torvalds static void free_more_memory(void)
3621da177e4SLinus Torvalds {
36319770b32SMel Gorman 	struct zone *zone;
3640e88460dSMel Gorman 	int nid;
3651da177e4SLinus Torvalds 
366687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
3671da177e4SLinus Torvalds 	yield();
3681da177e4SLinus Torvalds 
3690e88460dSMel Gorman 	for_each_online_node(nid) {
37019770b32SMel Gorman 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
37119770b32SMel Gorman 						gfp_zone(GFP_NOFS), NULL,
37219770b32SMel Gorman 						&zone);
37319770b32SMel Gorman 		if (zone)
37454a6eb5cSMel Gorman 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
37554a6eb5cSMel Gorman 						GFP_NOFS);
3761da177e4SLinus Torvalds 	}
3771da177e4SLinus Torvalds }
3781da177e4SLinus Torvalds 
3791da177e4SLinus Torvalds /*
3801da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
3811da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3821da177e4SLinus Torvalds  */
3831da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3841da177e4SLinus Torvalds {
3851da177e4SLinus Torvalds 	unsigned long flags;
386a3972203SNick Piggin 	struct buffer_head *first;
3871da177e4SLinus Torvalds 	struct buffer_head *tmp;
3881da177e4SLinus Torvalds 	struct page *page;
3891da177e4SLinus Torvalds 	int page_uptodate = 1;
3901da177e4SLinus Torvalds 
3911da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3921da177e4SLinus Torvalds 
3931da177e4SLinus Torvalds 	page = bh->b_page;
3941da177e4SLinus Torvalds 	if (uptodate) {
3951da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3961da177e4SLinus Torvalds 	} else {
3971da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3981da177e4SLinus Torvalds 		if (printk_ratelimit())
3991da177e4SLinus Torvalds 			buffer_io_error(bh);
4001da177e4SLinus Torvalds 		SetPageError(page);
4011da177e4SLinus Torvalds 	}
4021da177e4SLinus Torvalds 
4031da177e4SLinus Torvalds 	/*
4041da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
4051da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
4061da177e4SLinus Torvalds 	 * decide that the page is now completely done.
4071da177e4SLinus Torvalds 	 */
408a3972203SNick Piggin 	first = page_buffers(page);
409a3972203SNick Piggin 	local_irq_save(flags);
410a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
4111da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
4121da177e4SLinus Torvalds 	unlock_buffer(bh);
4131da177e4SLinus Torvalds 	tmp = bh;
4141da177e4SLinus Torvalds 	do {
4151da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
4161da177e4SLinus Torvalds 			page_uptodate = 0;
4171da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
4181da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4191da177e4SLinus Torvalds 			goto still_busy;
4201da177e4SLinus Torvalds 		}
4211da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4221da177e4SLinus Torvalds 	} while (tmp != bh);
423a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
424a3972203SNick Piggin 	local_irq_restore(flags);
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds 	/*
4271da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
4281da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
4291da177e4SLinus Torvalds 	 */
4301da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
4311da177e4SLinus Torvalds 		SetPageUptodate(page);
4321da177e4SLinus Torvalds 	unlock_page(page);
4331da177e4SLinus Torvalds 	return;
4341da177e4SLinus Torvalds 
4351da177e4SLinus Torvalds still_busy:
436a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
437a3972203SNick Piggin 	local_irq_restore(flags);
4381da177e4SLinus Torvalds 	return;
4391da177e4SLinus Torvalds }
4401da177e4SLinus Torvalds 
4411da177e4SLinus Torvalds /*
4421da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
4431da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
4441da177e4SLinus Torvalds  */
445b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
4461da177e4SLinus Torvalds {
4471da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
4481da177e4SLinus Torvalds 	unsigned long flags;
449a3972203SNick Piggin 	struct buffer_head *first;
4501da177e4SLinus Torvalds 	struct buffer_head *tmp;
4511da177e4SLinus Torvalds 	struct page *page;
4521da177e4SLinus Torvalds 
4531da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
4541da177e4SLinus Torvalds 
4551da177e4SLinus Torvalds 	page = bh->b_page;
4561da177e4SLinus Torvalds 	if (uptodate) {
4571da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
4581da177e4SLinus Torvalds 	} else {
4591da177e4SLinus Torvalds 		if (printk_ratelimit()) {
4601da177e4SLinus Torvalds 			buffer_io_error(bh);
4611da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
4621da177e4SLinus Torvalds 					"I/O error on %s\n",
4631da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
4641da177e4SLinus Torvalds 		}
4651da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
46658ff407bSJan Kara 		set_buffer_write_io_error(bh);
4671da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
4681da177e4SLinus Torvalds 		SetPageError(page);
4691da177e4SLinus Torvalds 	}
4701da177e4SLinus Torvalds 
471a3972203SNick Piggin 	first = page_buffers(page);
472a3972203SNick Piggin 	local_irq_save(flags);
473a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
474a3972203SNick Piggin 
4751da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4761da177e4SLinus Torvalds 	unlock_buffer(bh);
4771da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4781da177e4SLinus Torvalds 	while (tmp != bh) {
4791da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4801da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4811da177e4SLinus Torvalds 			goto still_busy;
4821da177e4SLinus Torvalds 		}
4831da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4841da177e4SLinus Torvalds 	}
485a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
486a3972203SNick Piggin 	local_irq_restore(flags);
4871da177e4SLinus Torvalds 	end_page_writeback(page);
4881da177e4SLinus Torvalds 	return;
4891da177e4SLinus Torvalds 
4901da177e4SLinus Torvalds still_busy:
491a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
492a3972203SNick Piggin 	local_irq_restore(flags);
4931da177e4SLinus Torvalds 	return;
4941da177e4SLinus Torvalds }
4951da177e4SLinus Torvalds 
4961da177e4SLinus Torvalds /*
4971da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4981da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4991da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
5001da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
5011da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
5021da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
5031da177e4SLinus Torvalds  * that this buffer is not under async I/O.
5041da177e4SLinus Torvalds  *
5051da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
5061da177e4SLinus Torvalds  * left.
5071da177e4SLinus Torvalds  *
5081da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
5091da177e4SLinus Torvalds  * the buffers.
5101da177e4SLinus Torvalds  *
5111da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
5121da177e4SLinus Torvalds  * page.
5131da177e4SLinus Torvalds  *
5141da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
5151da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
5161da177e4SLinus Torvalds  */
5171da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
5181da177e4SLinus Torvalds {
5191da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
5201da177e4SLinus Torvalds 	set_buffer_async_read(bh);
5211da177e4SLinus Torvalds }
5221da177e4SLinus Torvalds 
5231da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
5241da177e4SLinus Torvalds {
5251da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
5261da177e4SLinus Torvalds 	set_buffer_async_write(bh);
5271da177e4SLinus Torvalds }
5281da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
5291da177e4SLinus Torvalds 
5301da177e4SLinus Torvalds 
5311da177e4SLinus Torvalds /*
5321da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
5331da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
5341da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
5351da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
5361da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
5371da177e4SLinus Torvalds  *
5381da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
5391da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
5401da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
5411da177e4SLinus Torvalds  *
5421da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
5431da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
5441da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
5451da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
5461da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
5471da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
5481da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
5491da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
5501da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
5511da177e4SLinus Torvalds  * ->private_lock.
5521da177e4SLinus Torvalds  *
5531da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
5541da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
5551da177e4SLinus Torvalds  *
5561da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
5571da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
5581da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
5591da177e4SLinus Torvalds  * be true at clear_inode() time.
5601da177e4SLinus Torvalds  *
5611da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
5621da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
5631da177e4SLinus Torvalds  * BUG_ON(!list_empty).
5641da177e4SLinus Torvalds  *
5651da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
5661da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
5671da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5681da177e4SLinus Torvalds  * queued up.
5691da177e4SLinus Torvalds  *
5701da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5711da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5721da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5731da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5741da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5751da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5761da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5771da177e4SLinus Torvalds  * b_inode back.
5781da177e4SLinus Torvalds  */
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds /*
5811da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5821da177e4SLinus Torvalds  */
5831da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh)
5841da177e4SLinus Torvalds {
5851da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
58658ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
58758ff407bSJan Kara 	if (buffer_write_io_error(bh))
58858ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
58958ff407bSJan Kara 	bh->b_assoc_map = NULL;
5901da177e4SLinus Torvalds }
5911da177e4SLinus Torvalds 
5921da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5931da177e4SLinus Torvalds {
5941da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5951da177e4SLinus Torvalds }
5961da177e4SLinus Torvalds 
5971da177e4SLinus Torvalds /*
5981da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5991da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
6001da177e4SLinus Torvalds  * writes to the disk.
6011da177e4SLinus Torvalds  *
6021da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
6031da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
6041da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
6051da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
6061da177e4SLinus Torvalds  */
6071da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
6081da177e4SLinus Torvalds {
6091da177e4SLinus Torvalds 	struct buffer_head *bh;
6101da177e4SLinus Torvalds 	struct list_head *p;
6111da177e4SLinus Torvalds 	int err = 0;
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds 	spin_lock(lock);
6141da177e4SLinus Torvalds repeat:
6151da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
6161da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
6171da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
6181da177e4SLinus Torvalds 			get_bh(bh);
6191da177e4SLinus Torvalds 			spin_unlock(lock);
6201da177e4SLinus Torvalds 			wait_on_buffer(bh);
6211da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
6221da177e4SLinus Torvalds 				err = -EIO;
6231da177e4SLinus Torvalds 			brelse(bh);
6241da177e4SLinus Torvalds 			spin_lock(lock);
6251da177e4SLinus Torvalds 			goto repeat;
6261da177e4SLinus Torvalds 		}
6271da177e4SLinus Torvalds 	}
6281da177e4SLinus Torvalds 	spin_unlock(lock);
6291da177e4SLinus Torvalds 	return err;
6301da177e4SLinus Torvalds }
6311da177e4SLinus Torvalds 
6321da177e4SLinus Torvalds /**
63378a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
63467be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
6351da177e4SLinus Torvalds  *
6361da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6371da177e4SLinus Torvalds  * that I/O.
6381da177e4SLinus Torvalds  *
63967be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
64067be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
64167be2dd1SMartin Waitz  * a successful fsync().
6421da177e4SLinus Torvalds  */
6431da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6441da177e4SLinus Torvalds {
6451da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6461da177e4SLinus Torvalds 
6471da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6481da177e4SLinus Torvalds 		return 0;
6491da177e4SLinus Torvalds 
6501da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6511da177e4SLinus Torvalds 					&mapping->private_list);
6521da177e4SLinus Torvalds }
6531da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds /*
6561da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6571da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6581da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6591da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6601da177e4SLinus Torvalds  */
6611da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6621da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6631da177e4SLinus Torvalds {
6641da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6651da177e4SLinus Torvalds 	if (bh) {
6661da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6671da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6681da177e4SLinus Torvalds 		put_bh(bh);
6691da177e4SLinus Torvalds 	}
6701da177e4SLinus Torvalds }
6711da177e4SLinus Torvalds 
6721da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6731da177e4SLinus Torvalds {
6741da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6751da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6781da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6791da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6801da177e4SLinus Torvalds 	} else {
681e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6821da177e4SLinus Torvalds 	}
683535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6841da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6851da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6861da177e4SLinus Torvalds 				&mapping->private_list);
68758ff407bSJan Kara 		bh->b_assoc_map = mapping;
6881da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6891da177e4SLinus Torvalds 	}
6901da177e4SLinus Torvalds }
6911da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds /*
694787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
695787d2214SNick Piggin  * dirty.
696787d2214SNick Piggin  *
697787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
698787d2214SNick Piggin  * not been truncated.
699787d2214SNick Piggin  */
700787d2214SNick Piggin static int __set_page_dirty(struct page *page,
701787d2214SNick Piggin 		struct address_space *mapping, int warn)
702787d2214SNick Piggin {
703787d2214SNick Piggin 	if (unlikely(!mapping))
704787d2214SNick Piggin 		return !TestSetPageDirty(page);
705787d2214SNick Piggin 
706787d2214SNick Piggin 	if (TestSetPageDirty(page))
707787d2214SNick Piggin 		return 0;
708787d2214SNick Piggin 
70919fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
710787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
711787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
712787d2214SNick Piggin 
713787d2214SNick Piggin 		if (mapping_cap_account_dirty(mapping)) {
714787d2214SNick Piggin 			__inc_zone_page_state(page, NR_FILE_DIRTY);
715c9e51e41SPeter Zijlstra 			__inc_bdi_stat(mapping->backing_dev_info,
716c9e51e41SPeter Zijlstra 					BDI_RECLAIMABLE);
717787d2214SNick Piggin 			task_io_account_write(PAGE_CACHE_SIZE);
718787d2214SNick Piggin 		}
719787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
720787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
721787d2214SNick Piggin 	}
72219fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
723787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
724787d2214SNick Piggin 
725787d2214SNick Piggin 	return 1;
726787d2214SNick Piggin }
727787d2214SNick Piggin 
728787d2214SNick Piggin /*
7291da177e4SLinus Torvalds  * Add a page to the dirty page list.
7301da177e4SLinus Torvalds  *
7311da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
7321da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
7331da177e4SLinus Torvalds  *
7341da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
7351da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
7361da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
7371da177e4SLinus Torvalds  * dirty.
7381da177e4SLinus Torvalds  *
7391da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
7401da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
7411da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
7421da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
7431da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
7441da177e4SLinus Torvalds  * page on the dirty page list.
7451da177e4SLinus Torvalds  *
7461da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
7471da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
7481da177e4SLinus Torvalds  * added to the page after it was set dirty.
7491da177e4SLinus Torvalds  *
7501da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7511da177e4SLinus Torvalds  * address_space though.
7521da177e4SLinus Torvalds  */
7531da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7541da177e4SLinus Torvalds {
755787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
756ebf7a227SNick Piggin 
757ebf7a227SNick Piggin 	if (unlikely(!mapping))
758ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7611da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7621da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7631da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7641da177e4SLinus Torvalds 
7651da177e4SLinus Torvalds 		do {
7661da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7671da177e4SLinus Torvalds 			bh = bh->b_this_page;
7681da177e4SLinus Torvalds 		} while (bh != head);
7691da177e4SLinus Torvalds 	}
7701da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7711da177e4SLinus Torvalds 
772787d2214SNick Piggin 	return __set_page_dirty(page, mapping, 1);
7731da177e4SLinus Torvalds }
7741da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7751da177e4SLinus Torvalds 
7761da177e4SLinus Torvalds /*
7771da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7781da177e4SLinus Torvalds  *
7791da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7801da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7811da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7821da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7831da177e4SLinus Torvalds  *
7841da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7851da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7861da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7871da177e4SLinus Torvalds  *
7881da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7891da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7901da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7911da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7921da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7931da177e4SLinus Torvalds  * any newly dirty buffers for write.
7941da177e4SLinus Torvalds  */
7951da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7961da177e4SLinus Torvalds {
7971da177e4SLinus Torvalds 	struct buffer_head *bh;
7981da177e4SLinus Torvalds 	struct list_head tmp;
799535ee2fbSJan Kara 	struct address_space *mapping;
8001da177e4SLinus Torvalds 	int err = 0, err2;
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
8031da177e4SLinus Torvalds 
8041da177e4SLinus Torvalds 	spin_lock(lock);
8051da177e4SLinus Torvalds 	while (!list_empty(list)) {
8061da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
807535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
80858ff407bSJan Kara 		__remove_assoc_queue(bh);
809535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
810535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
811535ee2fbSJan Kara 		smp_mb();
8121da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
8131da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
814535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
8151da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8161da177e4SLinus Torvalds 				get_bh(bh);
8171da177e4SLinus Torvalds 				spin_unlock(lock);
8181da177e4SLinus Torvalds 				/*
8191da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
8201da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
8211da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
8221da177e4SLinus Torvalds 				 * flight on potentially older contents.
8231da177e4SLinus Torvalds 				 */
82418ce3751SJens Axboe 				ll_rw_block(SWRITE_SYNC, 1, &bh);
8251da177e4SLinus Torvalds 				brelse(bh);
8261da177e4SLinus Torvalds 				spin_lock(lock);
8271da177e4SLinus Torvalds 			}
8281da177e4SLinus Torvalds 		}
8291da177e4SLinus Torvalds 	}
8301da177e4SLinus Torvalds 
8311da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
8321da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
8331da177e4SLinus Torvalds 		get_bh(bh);
834535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
835535ee2fbSJan Kara 		__remove_assoc_queue(bh);
836535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
837535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
838535ee2fbSJan Kara 		smp_mb();
839535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
840535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
841e3892296SJan Kara 				 &mapping->private_list);
842535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
843535ee2fbSJan Kara 		}
8441da177e4SLinus Torvalds 		spin_unlock(lock);
8451da177e4SLinus Torvalds 		wait_on_buffer(bh);
8461da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8471da177e4SLinus Torvalds 			err = -EIO;
8481da177e4SLinus Torvalds 		brelse(bh);
8491da177e4SLinus Torvalds 		spin_lock(lock);
8501da177e4SLinus Torvalds 	}
8511da177e4SLinus Torvalds 
8521da177e4SLinus Torvalds 	spin_unlock(lock);
8531da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8541da177e4SLinus Torvalds 	if (err)
8551da177e4SLinus Torvalds 		return err;
8561da177e4SLinus Torvalds 	else
8571da177e4SLinus Torvalds 		return err2;
8581da177e4SLinus Torvalds }
8591da177e4SLinus Torvalds 
8601da177e4SLinus Torvalds /*
8611da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8621da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8631da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8641da177e4SLinus Torvalds  *
8651da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8661da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8671da177e4SLinus Torvalds  * for reiserfs.
8681da177e4SLinus Torvalds  */
8691da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8701da177e4SLinus Torvalds {
8711da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8721da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8731da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8741da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8751da177e4SLinus Torvalds 
8761da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8771da177e4SLinus Torvalds 		while (!list_empty(list))
8781da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8791da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8801da177e4SLinus Torvalds 	}
8811da177e4SLinus Torvalds }
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds /*
8841da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8851da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8861da177e4SLinus Torvalds  *
8871da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8881da177e4SLinus Torvalds  */
8891da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8901da177e4SLinus Torvalds {
8911da177e4SLinus Torvalds 	int ret = 1;
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8941da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8951da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8961da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8971da177e4SLinus Torvalds 
8981da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8991da177e4SLinus Torvalds 		while (!list_empty(list)) {
9001da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
9011da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
9021da177e4SLinus Torvalds 				ret = 0;
9031da177e4SLinus Torvalds 				break;
9041da177e4SLinus Torvalds 			}
9051da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
9061da177e4SLinus Torvalds 		}
9071da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
9081da177e4SLinus Torvalds 	}
9091da177e4SLinus Torvalds 	return ret;
9101da177e4SLinus Torvalds }
9111da177e4SLinus Torvalds 
9121da177e4SLinus Torvalds /*
9131da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
9141da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
9151da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
9161da177e4SLinus Torvalds  * buffers.
9171da177e4SLinus Torvalds  *
9181da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
9191da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
9201da177e4SLinus Torvalds  */
9211da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
9221da177e4SLinus Torvalds 		int retry)
9231da177e4SLinus Torvalds {
9241da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
9251da177e4SLinus Torvalds 	long offset;
9261da177e4SLinus Torvalds 
9271da177e4SLinus Torvalds try_again:
9281da177e4SLinus Torvalds 	head = NULL;
9291da177e4SLinus Torvalds 	offset = PAGE_SIZE;
9301da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
9311da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
9321da177e4SLinus Torvalds 		if (!bh)
9331da177e4SLinus Torvalds 			goto no_grow;
9341da177e4SLinus Torvalds 
9351da177e4SLinus Torvalds 		bh->b_bdev = NULL;
9361da177e4SLinus Torvalds 		bh->b_this_page = head;
9371da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9381da177e4SLinus Torvalds 		head = bh;
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds 		bh->b_state = 0;
9411da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
942fc5cd582SChris Mason 		bh->b_private = NULL;
9431da177e4SLinus Torvalds 		bh->b_size = size;
9441da177e4SLinus Torvalds 
9451da177e4SLinus Torvalds 		/* Link the buffer to its page */
9461da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9471da177e4SLinus Torvalds 
94801ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9491da177e4SLinus Torvalds 	}
9501da177e4SLinus Torvalds 	return head;
9511da177e4SLinus Torvalds /*
9521da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9531da177e4SLinus Torvalds  */
9541da177e4SLinus Torvalds no_grow:
9551da177e4SLinus Torvalds 	if (head) {
9561da177e4SLinus Torvalds 		do {
9571da177e4SLinus Torvalds 			bh = head;
9581da177e4SLinus Torvalds 			head = head->b_this_page;
9591da177e4SLinus Torvalds 			free_buffer_head(bh);
9601da177e4SLinus Torvalds 		} while (head);
9611da177e4SLinus Torvalds 	}
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 	/*
9641da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9651da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9661da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9671da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9681da177e4SLinus Torvalds 	 */
9691da177e4SLinus Torvalds 	if (!retry)
9701da177e4SLinus Torvalds 		return NULL;
9711da177e4SLinus Torvalds 
9721da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9731da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9741da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9751da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9761da177e4SLinus Torvalds 	 * async buffer heads in use.
9771da177e4SLinus Torvalds 	 */
9781da177e4SLinus Torvalds 	free_more_memory();
9791da177e4SLinus Torvalds 	goto try_again;
9801da177e4SLinus Torvalds }
9811da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9821da177e4SLinus Torvalds 
9831da177e4SLinus Torvalds static inline void
9841da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9851da177e4SLinus Torvalds {
9861da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9871da177e4SLinus Torvalds 
9881da177e4SLinus Torvalds 	bh = head;
9891da177e4SLinus Torvalds 	do {
9901da177e4SLinus Torvalds 		tail = bh;
9911da177e4SLinus Torvalds 		bh = bh->b_this_page;
9921da177e4SLinus Torvalds 	} while (bh);
9931da177e4SLinus Torvalds 	tail->b_this_page = head;
9941da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9951da177e4SLinus Torvalds }
9961da177e4SLinus Torvalds 
9971da177e4SLinus Torvalds /*
9981da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9991da177e4SLinus Torvalds  */
10001da177e4SLinus Torvalds static void
10011da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
10021da177e4SLinus Torvalds 			sector_t block, int size)
10031da177e4SLinus Torvalds {
10041da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
10051da177e4SLinus Torvalds 	struct buffer_head *bh = head;
10061da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 	do {
10091da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
10101da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
10111da177e4SLinus Torvalds 			bh->b_bdev = bdev;
10121da177e4SLinus Torvalds 			bh->b_blocknr = block;
10131da177e4SLinus Torvalds 			if (uptodate)
10141da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
10151da177e4SLinus Torvalds 			set_buffer_mapped(bh);
10161da177e4SLinus Torvalds 		}
10171da177e4SLinus Torvalds 		block++;
10181da177e4SLinus Torvalds 		bh = bh->b_this_page;
10191da177e4SLinus Torvalds 	} while (bh != head);
10201da177e4SLinus Torvalds }
10211da177e4SLinus Torvalds 
10221da177e4SLinus Torvalds /*
10231da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
10241da177e4SLinus Torvalds  *
10251da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
10261da177e4SLinus Torvalds  */
10271da177e4SLinus Torvalds static struct page *
10281da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
10291da177e4SLinus Torvalds 		pgoff_t index, int size)
10301da177e4SLinus Torvalds {
10311da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
10321da177e4SLinus Torvalds 	struct page *page;
10331da177e4SLinus Torvalds 	struct buffer_head *bh;
10341da177e4SLinus Torvalds 
1035ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
1036769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
10371da177e4SLinus Torvalds 	if (!page)
10381da177e4SLinus Torvalds 		return NULL;
10391da177e4SLinus Torvalds 
1040e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
10431da177e4SLinus Torvalds 		bh = page_buffers(page);
10441da177e4SLinus Torvalds 		if (bh->b_size == size) {
10451da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
10461da177e4SLinus Torvalds 			return page;
10471da177e4SLinus Torvalds 		}
10481da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10491da177e4SLinus Torvalds 			goto failed;
10501da177e4SLinus Torvalds 	}
10511da177e4SLinus Torvalds 
10521da177e4SLinus Torvalds 	/*
10531da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10541da177e4SLinus Torvalds 	 */
10551da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10561da177e4SLinus Torvalds 	if (!bh)
10571da177e4SLinus Torvalds 		goto failed;
10581da177e4SLinus Torvalds 
10591da177e4SLinus Torvalds 	/*
10601da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10611da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10621da177e4SLinus Torvalds 	 * run under the page lock.
10631da177e4SLinus Torvalds 	 */
10641da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10651da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10661da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10671da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10681da177e4SLinus Torvalds 	return page;
10691da177e4SLinus Torvalds 
10701da177e4SLinus Torvalds failed:
10711da177e4SLinus Torvalds 	BUG();
10721da177e4SLinus Torvalds 	unlock_page(page);
10731da177e4SLinus Torvalds 	page_cache_release(page);
10741da177e4SLinus Torvalds 	return NULL;
10751da177e4SLinus Torvalds }
10761da177e4SLinus Torvalds 
10771da177e4SLinus Torvalds /*
10781da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10791da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10801da177e4SLinus Torvalds  */
1081858119e1SArjan van de Ven static int
10821da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10831da177e4SLinus Torvalds {
10841da177e4SLinus Torvalds 	struct page *page;
10851da177e4SLinus Torvalds 	pgoff_t index;
10861da177e4SLinus Torvalds 	int sizebits;
10871da177e4SLinus Torvalds 
10881da177e4SLinus Torvalds 	sizebits = -1;
10891da177e4SLinus Torvalds 	do {
10901da177e4SLinus Torvalds 		sizebits++;
10911da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10921da177e4SLinus Torvalds 
10931da177e4SLinus Torvalds 	index = block >> sizebits;
10941da177e4SLinus Torvalds 
1095e5657933SAndrew Morton 	/*
1096e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1097e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1098e5657933SAndrew Morton 	 */
1099e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1100e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1101e5657933SAndrew Morton 
1102e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1103e5657933SAndrew Morton 			"device %s\n",
11048e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1105e5657933SAndrew Morton 			bdevname(bdev, b));
1106e5657933SAndrew Morton 		return -EIO;
1107e5657933SAndrew Morton 	}
1108e5657933SAndrew Morton 	block = index << sizebits;
11091da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
11101da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
11111da177e4SLinus Torvalds 	if (!page)
11121da177e4SLinus Torvalds 		return 0;
11131da177e4SLinus Torvalds 	unlock_page(page);
11141da177e4SLinus Torvalds 	page_cache_release(page);
11151da177e4SLinus Torvalds 	return 1;
11161da177e4SLinus Torvalds }
11171da177e4SLinus Torvalds 
111875c96f85SAdrian Bunk static struct buffer_head *
11191da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
11201da177e4SLinus Torvalds {
11211da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
11221da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
11231da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11241da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11251da177e4SLinus Torvalds 					size);
11261da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
11271da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
11281da177e4SLinus Torvalds 
11291da177e4SLinus Torvalds 		dump_stack();
11301da177e4SLinus Torvalds 		return NULL;
11311da177e4SLinus Torvalds 	}
11321da177e4SLinus Torvalds 
11331da177e4SLinus Torvalds 	for (;;) {
11341da177e4SLinus Torvalds 		struct buffer_head * bh;
1135e5657933SAndrew Morton 		int ret;
11361da177e4SLinus Torvalds 
11371da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11381da177e4SLinus Torvalds 		if (bh)
11391da177e4SLinus Torvalds 			return bh;
11401da177e4SLinus Torvalds 
1141e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1142e5657933SAndrew Morton 		if (ret < 0)
1143e5657933SAndrew Morton 			return NULL;
1144e5657933SAndrew Morton 		if (ret == 0)
11451da177e4SLinus Torvalds 			free_more_memory();
11461da177e4SLinus Torvalds 	}
11471da177e4SLinus Torvalds }
11481da177e4SLinus Torvalds 
11491da177e4SLinus Torvalds /*
11501da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11511da177e4SLinus Torvalds  *
11521da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11531da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11541da177e4SLinus Torvalds  *
11551da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11561da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11571da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11581da177e4SLinus Torvalds  *
11591da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11601da177e4SLinus Torvalds  * (if the page has buffers).
11611da177e4SLinus Torvalds  *
11621da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11631da177e4SLinus Torvalds  * buffers are not.
11641da177e4SLinus Torvalds  *
11651da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11661da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11671da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11681da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11691da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11701da177e4SLinus Torvalds  */
11711da177e4SLinus Torvalds 
11721da177e4SLinus Torvalds /**
11731da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
117467be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11751da177e4SLinus Torvalds  *
11761da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11771da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11781da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11791da177e4SLinus Torvalds  * inode list.
11801da177e4SLinus Torvalds  *
11811da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11821da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11831da177e4SLinus Torvalds  */
1184fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11851da177e4SLinus Torvalds {
1186787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11871be62dc1SLinus Torvalds 
11881be62dc1SLinus Torvalds 	/*
11891be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11901be62dc1SLinus Torvalds 	 *
11911be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11921be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11931be62dc1SLinus Torvalds 	 */
11941be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11951be62dc1SLinus Torvalds 		smp_mb();
11961be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11971be62dc1SLinus Torvalds 			return;
11981be62dc1SLinus Torvalds 	}
11991be62dc1SLinus Torvalds 
12001be62dc1SLinus Torvalds 	if (!test_set_buffer_dirty(bh))
1201787d2214SNick Piggin 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
12021da177e4SLinus Torvalds }
12031da177e4SLinus Torvalds 
12041da177e4SLinus Torvalds /*
12051da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
12061da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
12071da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
12081da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
12091da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
12101da177e4SLinus Torvalds  */
12111da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
12121da177e4SLinus Torvalds {
12131da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
12141da177e4SLinus Torvalds 		put_bh(buf);
12151da177e4SLinus Torvalds 		return;
12161da177e4SLinus Torvalds 	}
12171da177e4SLinus Torvalds 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12181da177e4SLinus Torvalds 	WARN_ON(1);
12191da177e4SLinus Torvalds }
12201da177e4SLinus Torvalds 
12211da177e4SLinus Torvalds /*
12221da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
12231da177e4SLinus Torvalds  * potentially dirty data.
12241da177e4SLinus Torvalds  */
12251da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12261da177e4SLinus Torvalds {
12271da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1228535ee2fbSJan Kara 	if (bh->b_assoc_map) {
12291da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12321da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
123358ff407bSJan Kara 		bh->b_assoc_map = NULL;
12341da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12351da177e4SLinus Torvalds 	}
12361da177e4SLinus Torvalds 	__brelse(bh);
12371da177e4SLinus Torvalds }
12381da177e4SLinus Torvalds 
12391da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12401da177e4SLinus Torvalds {
12411da177e4SLinus Torvalds 	lock_buffer(bh);
12421da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12431da177e4SLinus Torvalds 		unlock_buffer(bh);
12441da177e4SLinus Torvalds 		return bh;
12451da177e4SLinus Torvalds 	} else {
12461da177e4SLinus Torvalds 		get_bh(bh);
12471da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12481da177e4SLinus Torvalds 		submit_bh(READ, bh);
12491da177e4SLinus Torvalds 		wait_on_buffer(bh);
12501da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12511da177e4SLinus Torvalds 			return bh;
12521da177e4SLinus Torvalds 	}
12531da177e4SLinus Torvalds 	brelse(bh);
12541da177e4SLinus Torvalds 	return NULL;
12551da177e4SLinus Torvalds }
12561da177e4SLinus Torvalds 
12571da177e4SLinus Torvalds /*
12581da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12591da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12601da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12611da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12621da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12631da177e4SLinus Torvalds  *
12641da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12651da177e4SLinus Torvalds  * sb_find_get_block().
12661da177e4SLinus Torvalds  *
12671da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12681da177e4SLinus Torvalds  * a local interrupt disable for that.
12691da177e4SLinus Torvalds  */
12701da177e4SLinus Torvalds 
12711da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12721da177e4SLinus Torvalds 
12731da177e4SLinus Torvalds struct bh_lru {
12741da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12751da177e4SLinus Torvalds };
12761da177e4SLinus Torvalds 
12771da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12781da177e4SLinus Torvalds 
12791da177e4SLinus Torvalds #ifdef CONFIG_SMP
12801da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12811da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12821da177e4SLinus Torvalds #else
12831da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12841da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12851da177e4SLinus Torvalds #endif
12861da177e4SLinus Torvalds 
12871da177e4SLinus Torvalds static inline void check_irqs_on(void)
12881da177e4SLinus Torvalds {
12891da177e4SLinus Torvalds #ifdef irqs_disabled
12901da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12911da177e4SLinus Torvalds #endif
12921da177e4SLinus Torvalds }
12931da177e4SLinus Torvalds 
12941da177e4SLinus Torvalds /*
12951da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12961da177e4SLinus Torvalds  */
12971da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12981da177e4SLinus Torvalds {
12991da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
13001da177e4SLinus Torvalds 	struct bh_lru *lru;
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds 	check_irqs_on();
13031da177e4SLinus Torvalds 	bh_lru_lock();
13041da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13051da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
13061da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
13071da177e4SLinus Torvalds 		int in;
13081da177e4SLinus Torvalds 		int out = 0;
13091da177e4SLinus Torvalds 
13101da177e4SLinus Torvalds 		get_bh(bh);
13111da177e4SLinus Torvalds 		bhs[out++] = bh;
13121da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
13131da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
13141da177e4SLinus Torvalds 
13151da177e4SLinus Torvalds 			if (bh2 == bh) {
13161da177e4SLinus Torvalds 				__brelse(bh2);
13171da177e4SLinus Torvalds 			} else {
13181da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
13191da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
13201da177e4SLinus Torvalds 					evictee = bh2;
13211da177e4SLinus Torvalds 				} else {
13221da177e4SLinus Torvalds 					bhs[out++] = bh2;
13231da177e4SLinus Torvalds 				}
13241da177e4SLinus Torvalds 			}
13251da177e4SLinus Torvalds 		}
13261da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
13271da177e4SLinus Torvalds 			bhs[out++] = NULL;
13281da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
13291da177e4SLinus Torvalds 	}
13301da177e4SLinus Torvalds 	bh_lru_unlock();
13311da177e4SLinus Torvalds 
13321da177e4SLinus Torvalds 	if (evictee)
13331da177e4SLinus Torvalds 		__brelse(evictee);
13341da177e4SLinus Torvalds }
13351da177e4SLinus Torvalds 
13361da177e4SLinus Torvalds /*
13371da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13381da177e4SLinus Torvalds  */
1339858119e1SArjan van de Ven static struct buffer_head *
13403991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13411da177e4SLinus Torvalds {
13421da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13431da177e4SLinus Torvalds 	struct bh_lru *lru;
13443991d3bdSTomasz Kvarsin 	unsigned int i;
13451da177e4SLinus Torvalds 
13461da177e4SLinus Torvalds 	check_irqs_on();
13471da177e4SLinus Torvalds 	bh_lru_lock();
13481da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13491da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13501da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
13511da177e4SLinus Torvalds 
13521da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
13531da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13541da177e4SLinus Torvalds 			if (i) {
13551da177e4SLinus Torvalds 				while (i) {
13561da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
13571da177e4SLinus Torvalds 					i--;
13581da177e4SLinus Torvalds 				}
13591da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13601da177e4SLinus Torvalds 			}
13611da177e4SLinus Torvalds 			get_bh(bh);
13621da177e4SLinus Torvalds 			ret = bh;
13631da177e4SLinus Torvalds 			break;
13641da177e4SLinus Torvalds 		}
13651da177e4SLinus Torvalds 	}
13661da177e4SLinus Torvalds 	bh_lru_unlock();
13671da177e4SLinus Torvalds 	return ret;
13681da177e4SLinus Torvalds }
13691da177e4SLinus Torvalds 
13701da177e4SLinus Torvalds /*
13711da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13721da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13731da177e4SLinus Torvalds  * NULL
13741da177e4SLinus Torvalds  */
13751da177e4SLinus Torvalds struct buffer_head *
13763991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13771da177e4SLinus Torvalds {
13781da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13791da177e4SLinus Torvalds 
13801da177e4SLinus Torvalds 	if (bh == NULL) {
1381385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13821da177e4SLinus Torvalds 		if (bh)
13831da177e4SLinus Torvalds 			bh_lru_install(bh);
13841da177e4SLinus Torvalds 	}
13851da177e4SLinus Torvalds 	if (bh)
13861da177e4SLinus Torvalds 		touch_buffer(bh);
13871da177e4SLinus Torvalds 	return bh;
13881da177e4SLinus Torvalds }
13891da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13901da177e4SLinus Torvalds 
13911da177e4SLinus Torvalds /*
13921da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13931da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13941da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13951da177e4SLinus Torvalds  *
13961da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13971da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13981da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13991da177e4SLinus Torvalds  *
14001da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
14011da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
14021da177e4SLinus Torvalds  */
14031da177e4SLinus Torvalds struct buffer_head *
14043991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
14051da177e4SLinus Torvalds {
14061da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
14071da177e4SLinus Torvalds 
14081da177e4SLinus Torvalds 	might_sleep();
14091da177e4SLinus Torvalds 	if (bh == NULL)
14101da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
14111da177e4SLinus Torvalds 	return bh;
14121da177e4SLinus Torvalds }
14131da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds /*
14161da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
14171da177e4SLinus Torvalds  */
14183991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14191da177e4SLinus Torvalds {
14201da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1421a3e713b5SAndrew Morton 	if (likely(bh)) {
14221da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
14231da177e4SLinus Torvalds 		brelse(bh);
14241da177e4SLinus Torvalds 	}
1425a3e713b5SAndrew Morton }
14261da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14271da177e4SLinus Torvalds 
14281da177e4SLinus Torvalds /**
14291da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
143067be2dd1SMartin Waitz  *  @bdev: the block_device to read from
14311da177e4SLinus Torvalds  *  @block: number of block
14321da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14331da177e4SLinus Torvalds  *
14341da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14351da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14361da177e4SLinus Torvalds  */
14371da177e4SLinus Torvalds struct buffer_head *
14383991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
14391da177e4SLinus Torvalds {
14401da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14411da177e4SLinus Torvalds 
1442a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14431da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14441da177e4SLinus Torvalds 	return bh;
14451da177e4SLinus Torvalds }
14461da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
14471da177e4SLinus Torvalds 
14481da177e4SLinus Torvalds /*
14491da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14501da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14511da177e4SLinus Torvalds  * or with preempt disabled.
14521da177e4SLinus Torvalds  */
14531da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14541da177e4SLinus Torvalds {
14551da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14561da177e4SLinus Torvalds 	int i;
14571da177e4SLinus Torvalds 
14581da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14591da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14601da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14611da177e4SLinus Torvalds 	}
14621da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14631da177e4SLinus Torvalds }
14641da177e4SLinus Torvalds 
1465f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14661da177e4SLinus Torvalds {
146715c8b6c1SJens Axboe 	on_each_cpu(invalidate_bh_lru, NULL, 1);
14681da177e4SLinus Torvalds }
14699db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14721da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14731da177e4SLinus Torvalds {
14741da177e4SLinus Torvalds 	bh->b_page = page;
1475e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14761da177e4SLinus Torvalds 	if (PageHighMem(page))
14771da177e4SLinus Torvalds 		/*
14781da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14791da177e4SLinus Torvalds 		 */
14801da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14811da177e4SLinus Torvalds 	else
14821da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14831da177e4SLinus Torvalds }
14841da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14851da177e4SLinus Torvalds 
14861da177e4SLinus Torvalds /*
14871da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14881da177e4SLinus Torvalds  */
1489858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14901da177e4SLinus Torvalds {
14911da177e4SLinus Torvalds 	lock_buffer(bh);
14921da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14931da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14941da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14951da177e4SLinus Torvalds 	clear_buffer_req(bh);
14961da177e4SLinus Torvalds 	clear_buffer_new(bh);
14971da177e4SLinus Torvalds 	clear_buffer_delay(bh);
149833a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14991da177e4SLinus Torvalds 	unlock_buffer(bh);
15001da177e4SLinus Torvalds }
15011da177e4SLinus Torvalds 
15021da177e4SLinus Torvalds /**
15031da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
15041da177e4SLinus Torvalds  *
15051da177e4SLinus Torvalds  * @page: the page which is affected
15061da177e4SLinus Torvalds  * @offset: the index of the truncation point
15071da177e4SLinus Torvalds  *
15081da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
15091da177e4SLinus Torvalds  * invalidatedby a truncate operation.
15101da177e4SLinus Torvalds  *
15111da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
15121da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
15131da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
15141da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
15151da177e4SLinus Torvalds  * blocks on-disk.
15161da177e4SLinus Torvalds  */
15172ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
15181da177e4SLinus Torvalds {
15191da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
15201da177e4SLinus Torvalds 	unsigned int curr_off = 0;
15211da177e4SLinus Torvalds 
15221da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
15231da177e4SLinus Torvalds 	if (!page_has_buffers(page))
15241da177e4SLinus Torvalds 		goto out;
15251da177e4SLinus Torvalds 
15261da177e4SLinus Torvalds 	head = page_buffers(page);
15271da177e4SLinus Torvalds 	bh = head;
15281da177e4SLinus Torvalds 	do {
15291da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
15301da177e4SLinus Torvalds 		next = bh->b_this_page;
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds 		/*
15331da177e4SLinus Torvalds 		 * is this block fully invalidated?
15341da177e4SLinus Torvalds 		 */
15351da177e4SLinus Torvalds 		if (offset <= curr_off)
15361da177e4SLinus Torvalds 			discard_buffer(bh);
15371da177e4SLinus Torvalds 		curr_off = next_off;
15381da177e4SLinus Torvalds 		bh = next;
15391da177e4SLinus Torvalds 	} while (bh != head);
15401da177e4SLinus Torvalds 
15411da177e4SLinus Torvalds 	/*
15421da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
15431da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15441da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15451da177e4SLinus Torvalds 	 */
15461da177e4SLinus Torvalds 	if (offset == 0)
15472ff28e22SNeilBrown 		try_to_release_page(page, 0);
15481da177e4SLinus Torvalds out:
15492ff28e22SNeilBrown 	return;
15501da177e4SLinus Torvalds }
15511da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds /*
15541da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15551da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15561da177e4SLinus Torvalds  * is already excluded via the page lock.
15571da177e4SLinus Torvalds  */
15581da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15591da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15601da177e4SLinus Torvalds {
15611da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15641da177e4SLinus Torvalds 	bh = head;
15651da177e4SLinus Torvalds 	do {
15661da177e4SLinus Torvalds 		bh->b_state |= b_state;
15671da177e4SLinus Torvalds 		tail = bh;
15681da177e4SLinus Torvalds 		bh = bh->b_this_page;
15691da177e4SLinus Torvalds 	} while (bh);
15701da177e4SLinus Torvalds 	tail->b_this_page = head;
15711da177e4SLinus Torvalds 
15721da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15731da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15741da177e4SLinus Torvalds 		bh = head;
15751da177e4SLinus Torvalds 		do {
15761da177e4SLinus Torvalds 			if (PageDirty(page))
15771da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15781da177e4SLinus Torvalds 			if (PageUptodate(page))
15791da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15801da177e4SLinus Torvalds 			bh = bh->b_this_page;
15811da177e4SLinus Torvalds 		} while (bh != head);
15821da177e4SLinus Torvalds 	}
15831da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15841da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15851da177e4SLinus Torvalds }
15861da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15871da177e4SLinus Torvalds 
15881da177e4SLinus Torvalds /*
15891da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15901da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15911da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15921da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15931da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15941da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15951da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15961da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15971da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15981da177e4SLinus Torvalds  *
15991da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
16001da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
16011da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
16021da177e4SLinus Torvalds  * only if we really need to.  That happens here.
16031da177e4SLinus Torvalds  */
16041da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
16051da177e4SLinus Torvalds {
16061da177e4SLinus Torvalds 	struct buffer_head *old_bh;
16071da177e4SLinus Torvalds 
16081da177e4SLinus Torvalds 	might_sleep();
16091da177e4SLinus Torvalds 
1610385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
16111da177e4SLinus Torvalds 	if (old_bh) {
16121da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
16131da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
16141da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
16151da177e4SLinus Torvalds 		__brelse(old_bh);
16161da177e4SLinus Torvalds 	}
16171da177e4SLinus Torvalds }
16181da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
16191da177e4SLinus Torvalds 
16201da177e4SLinus Torvalds /*
16211da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
16221da177e4SLinus Torvalds  *
16231da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
16241da177e4SLinus Torvalds  *
16251da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
16261da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
16271da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
16281da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
16291da177e4SLinus Torvalds  *
16301da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
16311da177e4SLinus Torvalds  */
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds /*
16341da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16351da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16361da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
16371da177e4SLinus Torvalds  * state inside lock_buffer().
16381da177e4SLinus Torvalds  *
16391da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
16401da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
16411da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
16421da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
16431da177e4SLinus Torvalds  * prevents this contention from occurring.
16441da177e4SLinus Torvalds  */
16451da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
16461da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
16471da177e4SLinus Torvalds {
16481da177e4SLinus Torvalds 	int err;
16491da177e4SLinus Torvalds 	sector_t block;
16501da177e4SLinus Torvalds 	sector_t last_block;
1651f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1652b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
16531da177e4SLinus Torvalds 	int nr_underway = 0;
16541da177e4SLinus Torvalds 
16551da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16561da177e4SLinus Torvalds 
16571da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16581da177e4SLinus Torvalds 
16591da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1660b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16611da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16621da177e4SLinus Torvalds 	}
16631da177e4SLinus Torvalds 
16641da177e4SLinus Torvalds 	/*
16651da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16661da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16671da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16681da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16691da177e4SLinus Torvalds 	 *
16701da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16711da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16721da177e4SLinus Torvalds 	 */
16731da177e4SLinus Torvalds 
167454b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16751da177e4SLinus Torvalds 	head = page_buffers(page);
16761da177e4SLinus Torvalds 	bh = head;
16771da177e4SLinus Torvalds 
16781da177e4SLinus Torvalds 	/*
16791da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16801da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16811da177e4SLinus Torvalds 	 */
16821da177e4SLinus Torvalds 	do {
16831da177e4SLinus Torvalds 		if (block > last_block) {
16841da177e4SLinus Torvalds 			/*
16851da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16861da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16871da177e4SLinus Torvalds 			 * truncate in progress.
16881da177e4SLinus Torvalds 			 */
16891da177e4SLinus Torvalds 			/*
16901da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16911da177e4SLinus Torvalds 			 */
16921da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16931da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
169429a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
169529a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1696b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16971da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16981da177e4SLinus Torvalds 			if (err)
16991da177e4SLinus Torvalds 				goto recover;
170029a814d2SAlex Tomas 			clear_buffer_delay(bh);
17011da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17021da177e4SLinus Torvalds 				/* blockdev mappings never come here */
17031da177e4SLinus Torvalds 				clear_buffer_new(bh);
17041da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
17051da177e4SLinus Torvalds 							bh->b_blocknr);
17061da177e4SLinus Torvalds 			}
17071da177e4SLinus Torvalds 		}
17081da177e4SLinus Torvalds 		bh = bh->b_this_page;
17091da177e4SLinus Torvalds 		block++;
17101da177e4SLinus Torvalds 	} while (bh != head);
17111da177e4SLinus Torvalds 
17121da177e4SLinus Torvalds 	do {
17131da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
17141da177e4SLinus Torvalds 			continue;
17151da177e4SLinus Torvalds 		/*
17161da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
17171da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
17181da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
17191da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
17201da177e4SLinus Torvalds 		 * throttling.
17211da177e4SLinus Torvalds 		 */
17221da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
17231da177e4SLinus Torvalds 			lock_buffer(bh);
17241da177e4SLinus Torvalds 		} else if (test_set_buffer_locked(bh)) {
17251da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
17261da177e4SLinus Torvalds 			continue;
17271da177e4SLinus Torvalds 		}
17281da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
17291da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17301da177e4SLinus Torvalds 		} else {
17311da177e4SLinus Torvalds 			unlock_buffer(bh);
17321da177e4SLinus Torvalds 		}
17331da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17341da177e4SLinus Torvalds 
17351da177e4SLinus Torvalds 	/*
17361da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17371da177e4SLinus Torvalds 	 * drop the bh refcounts early.
17381da177e4SLinus Torvalds 	 */
17391da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17401da177e4SLinus Torvalds 	set_page_writeback(page);
17411da177e4SLinus Torvalds 
17421da177e4SLinus Torvalds 	do {
17431da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17441da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17451da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17461da177e4SLinus Torvalds 			nr_underway++;
1747ad576e63SNick Piggin 		}
17481da177e4SLinus Torvalds 		bh = next;
17491da177e4SLinus Torvalds 	} while (bh != head);
175005937baaSAndrew Morton 	unlock_page(page);
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds 	err = 0;
17531da177e4SLinus Torvalds done:
17541da177e4SLinus Torvalds 	if (nr_underway == 0) {
17551da177e4SLinus Torvalds 		/*
17561da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17571da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17581da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17591da177e4SLinus Torvalds 		 */
17601da177e4SLinus Torvalds 		end_page_writeback(page);
17613d67f2d7SNick Piggin 
17621da177e4SLinus Torvalds 		/*
17631da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17641da177e4SLinus Torvalds 		 * here on.
17651da177e4SLinus Torvalds 		 */
17661da177e4SLinus Torvalds 	}
17671da177e4SLinus Torvalds 	return err;
17681da177e4SLinus Torvalds 
17691da177e4SLinus Torvalds recover:
17701da177e4SLinus Torvalds 	/*
17711da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17721da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17731da177e4SLinus Torvalds 	 * exposing stale data.
17741da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17751da177e4SLinus Torvalds 	 */
17761da177e4SLinus Torvalds 	bh = head;
17771da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17781da177e4SLinus Torvalds 	do {
177929a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
178029a814d2SAlex Tomas 		    !buffer_delay(bh)) {
17811da177e4SLinus Torvalds 			lock_buffer(bh);
17821da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17831da177e4SLinus Torvalds 		} else {
17841da177e4SLinus Torvalds 			/*
17851da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17861da177e4SLinus Torvalds 			 * attachment to a dirty page.
17871da177e4SLinus Torvalds 			 */
17881da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17891da177e4SLinus Torvalds 		}
17901da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17911da177e4SLinus Torvalds 	SetPageError(page);
17921da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17937e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17941da177e4SLinus Torvalds 	set_page_writeback(page);
17951da177e4SLinus Torvalds 	do {
17961da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17971da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17981da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17991da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
18001da177e4SLinus Torvalds 			nr_underway++;
1801ad576e63SNick Piggin 		}
18021da177e4SLinus Torvalds 		bh = next;
18031da177e4SLinus Torvalds 	} while (bh != head);
1804ffda9d30SNick Piggin 	unlock_page(page);
18051da177e4SLinus Torvalds 	goto done;
18061da177e4SLinus Torvalds }
18071da177e4SLinus Torvalds 
1808afddba49SNick Piggin /*
1809afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1810afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1811afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1812afddba49SNick Piggin  */
1813afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1814afddba49SNick Piggin {
1815afddba49SNick Piggin 	unsigned int block_start, block_end;
1816afddba49SNick Piggin 	struct buffer_head *head, *bh;
1817afddba49SNick Piggin 
1818afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1819afddba49SNick Piggin 	if (!page_has_buffers(page))
1820afddba49SNick Piggin 		return;
1821afddba49SNick Piggin 
1822afddba49SNick Piggin 	bh = head = page_buffers(page);
1823afddba49SNick Piggin 	block_start = 0;
1824afddba49SNick Piggin 	do {
1825afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1826afddba49SNick Piggin 
1827afddba49SNick Piggin 		if (buffer_new(bh)) {
1828afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1829afddba49SNick Piggin 				if (!PageUptodate(page)) {
1830afddba49SNick Piggin 					unsigned start, size;
1831afddba49SNick Piggin 
1832afddba49SNick Piggin 					start = max(from, block_start);
1833afddba49SNick Piggin 					size = min(to, block_end) - start;
1834afddba49SNick Piggin 
1835eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1836afddba49SNick Piggin 					set_buffer_uptodate(bh);
1837afddba49SNick Piggin 				}
1838afddba49SNick Piggin 
1839afddba49SNick Piggin 				clear_buffer_new(bh);
1840afddba49SNick Piggin 				mark_buffer_dirty(bh);
1841afddba49SNick Piggin 			}
1842afddba49SNick Piggin 		}
1843afddba49SNick Piggin 
1844afddba49SNick Piggin 		block_start = block_end;
1845afddba49SNick Piggin 		bh = bh->b_this_page;
1846afddba49SNick Piggin 	} while (bh != head);
1847afddba49SNick Piggin }
1848afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1849afddba49SNick Piggin 
18501da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
18511da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
18521da177e4SLinus Torvalds {
18531da177e4SLinus Torvalds 	unsigned block_start, block_end;
18541da177e4SLinus Torvalds 	sector_t block;
18551da177e4SLinus Torvalds 	int err = 0;
18561da177e4SLinus Torvalds 	unsigned blocksize, bbits;
18571da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
18581da177e4SLinus Torvalds 
18591da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
18601da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
18611da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
18621da177e4SLinus Torvalds 	BUG_ON(from > to);
18631da177e4SLinus Torvalds 
18641da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18651da177e4SLinus Torvalds 	if (!page_has_buffers(page))
18661da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
18671da177e4SLinus Torvalds 	head = page_buffers(page);
18681da177e4SLinus Torvalds 
18691da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
18701da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
18711da177e4SLinus Torvalds 
18721da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
18731da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
18741da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18751da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18761da177e4SLinus Torvalds 			if (PageUptodate(page)) {
18771da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
18781da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18791da177e4SLinus Torvalds 			}
18801da177e4SLinus Torvalds 			continue;
18811da177e4SLinus Torvalds 		}
18821da177e4SLinus Torvalds 		if (buffer_new(bh))
18831da177e4SLinus Torvalds 			clear_buffer_new(bh);
18841da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1885b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18861da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18871da177e4SLinus Torvalds 			if (err)
1888f3ddbdc6SNick Piggin 				break;
18891da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18901da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18911da177e4SLinus Torvalds 							bh->b_blocknr);
18921da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1893637aff46SNick Piggin 					clear_buffer_new(bh);
18941da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1895637aff46SNick Piggin 					mark_buffer_dirty(bh);
18961da177e4SLinus Torvalds 					continue;
18971da177e4SLinus Torvalds 				}
1898eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1899eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1900eebd2aa3SChristoph Lameter 						to, block_end,
1901eebd2aa3SChristoph Lameter 						block_start, from);
19021da177e4SLinus Torvalds 				continue;
19031da177e4SLinus Torvalds 			}
19041da177e4SLinus Torvalds 		}
19051da177e4SLinus Torvalds 		if (PageUptodate(page)) {
19061da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19071da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
19081da177e4SLinus Torvalds 			continue;
19091da177e4SLinus Torvalds 		}
19101da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
191133a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
19121da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
19131da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
19141da177e4SLinus Torvalds 			*wait_bh++=bh;
19151da177e4SLinus Torvalds 		}
19161da177e4SLinus Torvalds 	}
19171da177e4SLinus Torvalds 	/*
19181da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
19191da177e4SLinus Torvalds 	 */
19201da177e4SLinus Torvalds 	while(wait_bh > wait) {
19211da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
19221da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1923f3ddbdc6SNick Piggin 			err = -EIO;
19241da177e4SLinus Torvalds 	}
1925afddba49SNick Piggin 	if (unlikely(err))
1926afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
19271da177e4SLinus Torvalds 	return err;
19281da177e4SLinus Torvalds }
19291da177e4SLinus Torvalds 
19301da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
19311da177e4SLinus Torvalds 		unsigned from, unsigned to)
19321da177e4SLinus Torvalds {
19331da177e4SLinus Torvalds 	unsigned block_start, block_end;
19341da177e4SLinus Torvalds 	int partial = 0;
19351da177e4SLinus Torvalds 	unsigned blocksize;
19361da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
19371da177e4SLinus Torvalds 
19381da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19391da177e4SLinus Torvalds 
19401da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
19411da177e4SLinus Torvalds 	    bh != head || !block_start;
19421da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
19431da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19441da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19451da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19461da177e4SLinus Torvalds 				partial = 1;
19471da177e4SLinus Torvalds 		} else {
19481da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19491da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19501da177e4SLinus Torvalds 		}
1951afddba49SNick Piggin 		clear_buffer_new(bh);
19521da177e4SLinus Torvalds 	}
19531da177e4SLinus Torvalds 
19541da177e4SLinus Torvalds 	/*
19551da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19561da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19571da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19581da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19591da177e4SLinus Torvalds 	 */
19601da177e4SLinus Torvalds 	if (!partial)
19611da177e4SLinus Torvalds 		SetPageUptodate(page);
19621da177e4SLinus Torvalds 	return 0;
19631da177e4SLinus Torvalds }
19641da177e4SLinus Torvalds 
19651da177e4SLinus Torvalds /*
1966afddba49SNick Piggin  * block_write_begin takes care of the basic task of block allocation and
1967afddba49SNick Piggin  * bringing partial write blocks uptodate first.
1968afddba49SNick Piggin  *
1969afddba49SNick Piggin  * If *pagep is not NULL, then block_write_begin uses the locked page
1970afddba49SNick Piggin  * at *pagep rather than allocating its own. In this case, the page will
1971afddba49SNick Piggin  * not be unlocked or deallocated on failure.
1972afddba49SNick Piggin  */
1973afddba49SNick Piggin int block_write_begin(struct file *file, struct address_space *mapping,
1974afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
1975afddba49SNick Piggin 			struct page **pagep, void **fsdata,
1976afddba49SNick Piggin 			get_block_t *get_block)
1977afddba49SNick Piggin {
1978afddba49SNick Piggin 	struct inode *inode = mapping->host;
1979afddba49SNick Piggin 	int status = 0;
1980afddba49SNick Piggin 	struct page *page;
1981afddba49SNick Piggin 	pgoff_t index;
1982afddba49SNick Piggin 	unsigned start, end;
1983afddba49SNick Piggin 	int ownpage = 0;
1984afddba49SNick Piggin 
1985afddba49SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
1986afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1987afddba49SNick Piggin 	end = start + len;
1988afddba49SNick Piggin 
1989afddba49SNick Piggin 	page = *pagep;
1990afddba49SNick Piggin 	if (page == NULL) {
1991afddba49SNick Piggin 		ownpage = 1;
1992afddba49SNick Piggin 		page = __grab_cache_page(mapping, index);
1993afddba49SNick Piggin 		if (!page) {
1994afddba49SNick Piggin 			status = -ENOMEM;
1995afddba49SNick Piggin 			goto out;
1996afddba49SNick Piggin 		}
1997afddba49SNick Piggin 		*pagep = page;
1998afddba49SNick Piggin 	} else
1999afddba49SNick Piggin 		BUG_ON(!PageLocked(page));
2000afddba49SNick Piggin 
2001afddba49SNick Piggin 	status = __block_prepare_write(inode, page, start, end, get_block);
2002afddba49SNick Piggin 	if (unlikely(status)) {
2003afddba49SNick Piggin 		ClearPageUptodate(page);
2004afddba49SNick Piggin 
2005afddba49SNick Piggin 		if (ownpage) {
2006afddba49SNick Piggin 			unlock_page(page);
2007afddba49SNick Piggin 			page_cache_release(page);
2008afddba49SNick Piggin 			*pagep = NULL;
2009afddba49SNick Piggin 
2010afddba49SNick Piggin 			/*
2011afddba49SNick Piggin 			 * prepare_write() may have instantiated a few blocks
2012afddba49SNick Piggin 			 * outside i_size.  Trim these off again. Don't need
2013afddba49SNick Piggin 			 * i_size_read because we hold i_mutex.
2014afddba49SNick Piggin 			 */
2015afddba49SNick Piggin 			if (pos + len > inode->i_size)
2016afddba49SNick Piggin 				vmtruncate(inode, inode->i_size);
2017afddba49SNick Piggin 		}
2018afddba49SNick Piggin 		goto out;
2019afddba49SNick Piggin 	}
2020afddba49SNick Piggin 
2021afddba49SNick Piggin out:
2022afddba49SNick Piggin 	return status;
2023afddba49SNick Piggin }
2024afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2025afddba49SNick Piggin 
2026afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2027afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2028afddba49SNick Piggin 			struct page *page, void *fsdata)
2029afddba49SNick Piggin {
2030afddba49SNick Piggin 	struct inode *inode = mapping->host;
2031afddba49SNick Piggin 	unsigned start;
2032afddba49SNick Piggin 
2033afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
2034afddba49SNick Piggin 
2035afddba49SNick Piggin 	if (unlikely(copied < len)) {
2036afddba49SNick Piggin 		/*
2037afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
2038afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
2039afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
2040afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
2041afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
2042afddba49SNick Piggin 		 * destroy our partial write.
2043afddba49SNick Piggin 		 *
2044afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2045afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2046afddba49SNick Piggin 		 * caller to redo the whole thing.
2047afddba49SNick Piggin 		 */
2048afddba49SNick Piggin 		if (!PageUptodate(page))
2049afddba49SNick Piggin 			copied = 0;
2050afddba49SNick Piggin 
2051afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2052afddba49SNick Piggin 	}
2053afddba49SNick Piggin 	flush_dcache_page(page);
2054afddba49SNick Piggin 
2055afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2056afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2057afddba49SNick Piggin 
2058afddba49SNick Piggin 	return copied;
2059afddba49SNick Piggin }
2060afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2061afddba49SNick Piggin 
2062afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2063afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2064afddba49SNick Piggin 			struct page *page, void *fsdata)
2065afddba49SNick Piggin {
2066afddba49SNick Piggin 	struct inode *inode = mapping->host;
2067c7d206b3SJan Kara 	int i_size_changed = 0;
2068afddba49SNick Piggin 
2069afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2070afddba49SNick Piggin 
2071afddba49SNick Piggin 	/*
2072afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
2073afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
2074afddba49SNick Piggin 	 *
2075afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
2076afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
2077afddba49SNick Piggin 	 */
2078afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
2079afddba49SNick Piggin 		i_size_write(inode, pos+copied);
2080c7d206b3SJan Kara 		i_size_changed = 1;
2081afddba49SNick Piggin 	}
2082afddba49SNick Piggin 
2083afddba49SNick Piggin 	unlock_page(page);
2084afddba49SNick Piggin 	page_cache_release(page);
2085afddba49SNick Piggin 
2086c7d206b3SJan Kara 	/*
2087c7d206b3SJan Kara 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2088c7d206b3SJan Kara 	 * makes the holding time of page lock longer. Second, it forces lock
2089c7d206b3SJan Kara 	 * ordering of page lock and transaction start for journaling
2090c7d206b3SJan Kara 	 * filesystems.
2091c7d206b3SJan Kara 	 */
2092c7d206b3SJan Kara 	if (i_size_changed)
2093c7d206b3SJan Kara 		mark_inode_dirty(inode);
2094c7d206b3SJan Kara 
2095afddba49SNick Piggin 	return copied;
2096afddba49SNick Piggin }
2097afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2098afddba49SNick Piggin 
2099afddba49SNick Piggin /*
21001da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
21011da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
21021da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
21031da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
21041da177e4SLinus Torvalds  * page struct once IO has completed.
21051da177e4SLinus Torvalds  */
21061da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
21071da177e4SLinus Torvalds {
21081da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21091da177e4SLinus Torvalds 	sector_t iblock, lblock;
21101da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
21111da177e4SLinus Torvalds 	unsigned int blocksize;
21121da177e4SLinus Torvalds 	int nr, i;
21131da177e4SLinus Torvalds 	int fully_mapped = 1;
21141da177e4SLinus Torvalds 
2115cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
21161da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
21171da177e4SLinus Torvalds 	if (!page_has_buffers(page))
21181da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
21191da177e4SLinus Torvalds 	head = page_buffers(page);
21201da177e4SLinus Torvalds 
21211da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
21221da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
21231da177e4SLinus Torvalds 	bh = head;
21241da177e4SLinus Torvalds 	nr = 0;
21251da177e4SLinus Torvalds 	i = 0;
21261da177e4SLinus Torvalds 
21271da177e4SLinus Torvalds 	do {
21281da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21291da177e4SLinus Torvalds 			continue;
21301da177e4SLinus Torvalds 
21311da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2132c64610baSAndrew Morton 			int err = 0;
2133c64610baSAndrew Morton 
21341da177e4SLinus Torvalds 			fully_mapped = 0;
21351da177e4SLinus Torvalds 			if (iblock < lblock) {
2136b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2137c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2138c64610baSAndrew Morton 				if (err)
21391da177e4SLinus Torvalds 					SetPageError(page);
21401da177e4SLinus Torvalds 			}
21411da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2142eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2143c64610baSAndrew Morton 				if (!err)
21441da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21451da177e4SLinus Torvalds 				continue;
21461da177e4SLinus Torvalds 			}
21471da177e4SLinus Torvalds 			/*
21481da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
21491da177e4SLinus Torvalds 			 * synchronously
21501da177e4SLinus Torvalds 			 */
21511da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
21521da177e4SLinus Torvalds 				continue;
21531da177e4SLinus Torvalds 		}
21541da177e4SLinus Torvalds 		arr[nr++] = bh;
21551da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds 	if (fully_mapped)
21581da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds 	if (!nr) {
21611da177e4SLinus Torvalds 		/*
21621da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
21631da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
21641da177e4SLinus Torvalds 		 */
21651da177e4SLinus Torvalds 		if (!PageError(page))
21661da177e4SLinus Torvalds 			SetPageUptodate(page);
21671da177e4SLinus Torvalds 		unlock_page(page);
21681da177e4SLinus Torvalds 		return 0;
21691da177e4SLinus Torvalds 	}
21701da177e4SLinus Torvalds 
21711da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
21721da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21731da177e4SLinus Torvalds 		bh = arr[i];
21741da177e4SLinus Torvalds 		lock_buffer(bh);
21751da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
21761da177e4SLinus Torvalds 	}
21771da177e4SLinus Torvalds 
21781da177e4SLinus Torvalds 	/*
21791da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
21801da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
21811da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
21821da177e4SLinus Torvalds 	 */
21831da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21841da177e4SLinus Torvalds 		bh = arr[i];
21851da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21861da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
21871da177e4SLinus Torvalds 		else
21881da177e4SLinus Torvalds 			submit_bh(READ, bh);
21891da177e4SLinus Torvalds 	}
21901da177e4SLinus Torvalds 	return 0;
21911da177e4SLinus Torvalds }
21921da177e4SLinus Torvalds 
21931da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
219489e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
21951da177e4SLinus Torvalds  * deal with the hole.
21961da177e4SLinus Torvalds  */
219789e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
21981da177e4SLinus Torvalds {
21991da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
22001da177e4SLinus Torvalds 	struct page *page;
220189e10787SNick Piggin 	void *fsdata;
220205eb0b51SOGAWA Hirofumi 	unsigned long limit;
22031da177e4SLinus Torvalds 	int err;
22041da177e4SLinus Torvalds 
22051da177e4SLinus Torvalds 	err = -EFBIG;
22061da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
22071da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
22081da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
22091da177e4SLinus Torvalds 		goto out;
22101da177e4SLinus Torvalds 	}
22111da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
22121da177e4SLinus Torvalds 		goto out;
22131da177e4SLinus Torvalds 
221489e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
221589e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
221689e10787SNick Piggin 				&page, &fsdata);
221789e10787SNick Piggin 	if (err)
221805eb0b51SOGAWA Hirofumi 		goto out;
221905eb0b51SOGAWA Hirofumi 
222089e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
222189e10787SNick Piggin 	BUG_ON(err > 0);
222205eb0b51SOGAWA Hirofumi 
222305eb0b51SOGAWA Hirofumi out:
222405eb0b51SOGAWA Hirofumi 	return err;
222505eb0b51SOGAWA Hirofumi }
222605eb0b51SOGAWA Hirofumi 
2227f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
222889e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
222905eb0b51SOGAWA Hirofumi {
223089e10787SNick Piggin 	struct inode *inode = mapping->host;
223189e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
223289e10787SNick Piggin 	struct page *page;
223389e10787SNick Piggin 	void *fsdata;
223489e10787SNick Piggin 	pgoff_t index, curidx;
223589e10787SNick Piggin 	loff_t curpos;
223689e10787SNick Piggin 	unsigned zerofrom, offset, len;
223789e10787SNick Piggin 	int err = 0;
223805eb0b51SOGAWA Hirofumi 
223989e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
224089e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
224189e10787SNick Piggin 
224289e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
224389e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
224489e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
224589e10787SNick Piggin 			*bytes |= (blocksize-1);
224689e10787SNick Piggin 			(*bytes)++;
224789e10787SNick Piggin 		}
224889e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
224989e10787SNick Piggin 
225089e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
225189e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
225289e10787SNick Piggin 						&page, &fsdata);
225389e10787SNick Piggin 		if (err)
225489e10787SNick Piggin 			goto out;
2255eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
225689e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
225789e10787SNick Piggin 						page, fsdata);
225889e10787SNick Piggin 		if (err < 0)
225989e10787SNick Piggin 			goto out;
226089e10787SNick Piggin 		BUG_ON(err != len);
226189e10787SNick Piggin 		err = 0;
2262061e9746SOGAWA Hirofumi 
2263061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
226489e10787SNick Piggin 	}
226589e10787SNick Piggin 
226689e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
226789e10787SNick Piggin 	if (index == curidx) {
226889e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
226989e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
227089e10787SNick Piggin 		if (offset <= zerofrom) {
227189e10787SNick Piggin 			goto out;
227289e10787SNick Piggin 		}
227389e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
227489e10787SNick Piggin 			*bytes |= (blocksize-1);
227589e10787SNick Piggin 			(*bytes)++;
227689e10787SNick Piggin 		}
227789e10787SNick Piggin 		len = offset - zerofrom;
227889e10787SNick Piggin 
227989e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
228089e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
228189e10787SNick Piggin 						&page, &fsdata);
228289e10787SNick Piggin 		if (err)
228389e10787SNick Piggin 			goto out;
2284eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
228589e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
228689e10787SNick Piggin 						page, fsdata);
228789e10787SNick Piggin 		if (err < 0)
228889e10787SNick Piggin 			goto out;
228989e10787SNick Piggin 		BUG_ON(err != len);
229089e10787SNick Piggin 		err = 0;
229189e10787SNick Piggin 	}
229289e10787SNick Piggin out:
229389e10787SNick Piggin 	return err;
22941da177e4SLinus Torvalds }
22951da177e4SLinus Torvalds 
22961da177e4SLinus Torvalds /*
22971da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
22981da177e4SLinus Torvalds  * We may have to extend the file.
22991da177e4SLinus Torvalds  */
230089e10787SNick Piggin int cont_write_begin(struct file *file, struct address_space *mapping,
230189e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
230289e10787SNick Piggin 			struct page **pagep, void **fsdata,
230389e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
23041da177e4SLinus Torvalds {
23051da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
23061da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
230789e10787SNick Piggin 	unsigned zerofrom;
230889e10787SNick Piggin 	int err;
23091da177e4SLinus Torvalds 
231089e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
231189e10787SNick Piggin 	if (err)
23121da177e4SLinus Torvalds 		goto out;
23131da177e4SLinus Torvalds 
23141da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
231589e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
23161da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
23171da177e4SLinus Torvalds 		(*bytes)++;
23181da177e4SLinus Torvalds 	}
23191da177e4SLinus Torvalds 
232089e10787SNick Piggin 	*pagep = NULL;
232189e10787SNick Piggin 	err = block_write_begin(file, mapping, pos, len,
232289e10787SNick Piggin 				flags, pagep, fsdata, get_block);
23231da177e4SLinus Torvalds out:
232489e10787SNick Piggin 	return err;
23251da177e4SLinus Torvalds }
23261da177e4SLinus Torvalds 
23271da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
23281da177e4SLinus Torvalds 			get_block_t *get_block)
23291da177e4SLinus Torvalds {
23301da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23311da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
23321da177e4SLinus Torvalds 	if (err)
23331da177e4SLinus Torvalds 		ClearPageUptodate(page);
23341da177e4SLinus Torvalds 	return err;
23351da177e4SLinus Torvalds }
23361da177e4SLinus Torvalds 
23371da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
23381da177e4SLinus Torvalds {
23391da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23401da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
23411da177e4SLinus Torvalds 	return 0;
23421da177e4SLinus Torvalds }
23431da177e4SLinus Torvalds 
234454171690SDavid Chinner /*
234554171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
234654171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
234754171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
234854171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
234954171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
235054171690SDavid Chinner  * support these features.
235154171690SDavid Chinner  *
235254171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
235354171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
235454171690SDavid Chinner  * vmtruncate() writes the inode size before removing pages, once we have the
235554171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
235654171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
235754171690SDavid Chinner  * unlock the page.
235854171690SDavid Chinner  */
235954171690SDavid Chinner int
236054171690SDavid Chinner block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
236154171690SDavid Chinner 		   get_block_t get_block)
236254171690SDavid Chinner {
236354171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
236454171690SDavid Chinner 	unsigned long end;
236554171690SDavid Chinner 	loff_t size;
236654171690SDavid Chinner 	int ret = -EINVAL;
236754171690SDavid Chinner 
236854171690SDavid Chinner 	lock_page(page);
236954171690SDavid Chinner 	size = i_size_read(inode);
237054171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
237118336338SNick Piggin 	    (page_offset(page) > size)) {
237254171690SDavid Chinner 		/* page got truncated out from underneath us */
237354171690SDavid Chinner 		goto out_unlock;
237454171690SDavid Chinner 	}
237554171690SDavid Chinner 
237654171690SDavid Chinner 	/* page is wholly or partially inside EOF */
237754171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
237854171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
237954171690SDavid Chinner 	else
238054171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
238154171690SDavid Chinner 
238254171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
238354171690SDavid Chinner 	if (!ret)
238454171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
238554171690SDavid Chinner 
238654171690SDavid Chinner out_unlock:
238754171690SDavid Chinner 	unlock_page(page);
238854171690SDavid Chinner 	return ret;
238954171690SDavid Chinner }
23901da177e4SLinus Torvalds 
23911da177e4SLinus Torvalds /*
239203158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
23931da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
23941da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
23951da177e4SLinus Torvalds  */
23961da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
23971da177e4SLinus Torvalds {
239868671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
23991da177e4SLinus Torvalds }
24001da177e4SLinus Torvalds 
24011da177e4SLinus Torvalds /*
240203158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
240303158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
240403158cd7SNick Piggin  * dirty races).
240503158cd7SNick Piggin  */
240603158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
240703158cd7SNick Piggin {
240803158cd7SNick Piggin 	struct buffer_head *bh;
240903158cd7SNick Piggin 
241003158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
241103158cd7SNick Piggin 
241203158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
241303158cd7SNick Piggin 	bh = head;
241403158cd7SNick Piggin 	do {
241503158cd7SNick Piggin 		if (PageDirty(page))
241603158cd7SNick Piggin 			set_buffer_dirty(bh);
241703158cd7SNick Piggin 		if (!bh->b_this_page)
241803158cd7SNick Piggin 			bh->b_this_page = head;
241903158cd7SNick Piggin 		bh = bh->b_this_page;
242003158cd7SNick Piggin 	} while (bh != head);
242103158cd7SNick Piggin 	attach_page_buffers(page, head);
242203158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
242303158cd7SNick Piggin }
242403158cd7SNick Piggin 
242503158cd7SNick Piggin /*
24261da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
24271da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
24281da177e4SLinus Torvalds  */
242903158cd7SNick Piggin int nobh_write_begin(struct file *file, struct address_space *mapping,
243003158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
243103158cd7SNick Piggin 			struct page **pagep, void **fsdata,
24321da177e4SLinus Torvalds 			get_block_t *get_block)
24331da177e4SLinus Torvalds {
243403158cd7SNick Piggin 	struct inode *inode = mapping->host;
24351da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
24361da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2437a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
243803158cd7SNick Piggin 	struct page *page;
243903158cd7SNick Piggin 	pgoff_t index;
244003158cd7SNick Piggin 	unsigned from, to;
24411da177e4SLinus Torvalds 	unsigned block_in_page;
2442a4b0672dSNick Piggin 	unsigned block_start, block_end;
24431da177e4SLinus Torvalds 	sector_t block_in_file;
24441da177e4SLinus Torvalds 	int nr_reads = 0;
24451da177e4SLinus Torvalds 	int ret = 0;
24461da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
24471da177e4SLinus Torvalds 
244803158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
244903158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
245003158cd7SNick Piggin 	to = from + len;
245103158cd7SNick Piggin 
245203158cd7SNick Piggin 	page = __grab_cache_page(mapping, index);
245303158cd7SNick Piggin 	if (!page)
245403158cd7SNick Piggin 		return -ENOMEM;
245503158cd7SNick Piggin 	*pagep = page;
245603158cd7SNick Piggin 	*fsdata = NULL;
245703158cd7SNick Piggin 
245803158cd7SNick Piggin 	if (page_has_buffers(page)) {
245903158cd7SNick Piggin 		unlock_page(page);
246003158cd7SNick Piggin 		page_cache_release(page);
246103158cd7SNick Piggin 		*pagep = NULL;
246203158cd7SNick Piggin 		return block_write_begin(file, mapping, pos, len, flags, pagep,
246303158cd7SNick Piggin 					fsdata, get_block);
246403158cd7SNick Piggin 	}
2465a4b0672dSNick Piggin 
24661da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
24671da177e4SLinus Torvalds 		return 0;
24681da177e4SLinus Torvalds 
2469a4b0672dSNick Piggin 	/*
2470a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2471a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2472a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2473a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2474a4b0672dSNick Piggin 	 *
2475a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2476a4b0672dSNick Piggin 	 * than the circular one we're used to.
2477a4b0672dSNick Piggin 	 */
2478a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
247903158cd7SNick Piggin 	if (!head) {
248003158cd7SNick Piggin 		ret = -ENOMEM;
248103158cd7SNick Piggin 		goto out_release;
248203158cd7SNick Piggin 	}
2483a4b0672dSNick Piggin 
24841da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
24851da177e4SLinus Torvalds 
24861da177e4SLinus Torvalds 	/*
24871da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
24881da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
24891da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
24901da177e4SLinus Torvalds 	 */
2491a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
24921da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2493a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
24941da177e4SLinus Torvalds 		int create;
24951da177e4SLinus Torvalds 
2496a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2497a4b0672dSNick Piggin 		bh->b_state = 0;
24981da177e4SLinus Torvalds 		create = 1;
24991da177e4SLinus Torvalds 		if (block_start >= to)
25001da177e4SLinus Torvalds 			create = 0;
25011da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2502a4b0672dSNick Piggin 					bh, create);
25031da177e4SLinus Torvalds 		if (ret)
25041da177e4SLinus Torvalds 			goto failed;
2505a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
25061da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2507a4b0672dSNick Piggin 		if (buffer_new(bh))
2508a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2509a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2510a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
25111da177e4SLinus Torvalds 			continue;
2512a4b0672dSNick Piggin 		}
2513a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2514eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2515eebd2aa3SChristoph Lameter 							to, block_end);
25161da177e4SLinus Torvalds 			continue;
25171da177e4SLinus Torvalds 		}
2518a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
25191da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
25201da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2521a4b0672dSNick Piggin 			lock_buffer(bh);
2522a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2523a4b0672dSNick Piggin 			submit_bh(READ, bh);
2524a4b0672dSNick Piggin 			nr_reads++;
25251da177e4SLinus Torvalds 		}
25261da177e4SLinus Torvalds 	}
25271da177e4SLinus Torvalds 
25281da177e4SLinus Torvalds 	if (nr_reads) {
25291da177e4SLinus Torvalds 		/*
25301da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
25311da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
25321da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
25331da177e4SLinus Torvalds 		 */
2534a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
25351da177e4SLinus Torvalds 			wait_on_buffer(bh);
25361da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
25371da177e4SLinus Torvalds 				ret = -EIO;
25381da177e4SLinus Torvalds 		}
25391da177e4SLinus Torvalds 		if (ret)
25401da177e4SLinus Torvalds 			goto failed;
25411da177e4SLinus Torvalds 	}
25421da177e4SLinus Torvalds 
25431da177e4SLinus Torvalds 	if (is_mapped_to_disk)
25441da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
25451da177e4SLinus Torvalds 
254603158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2547a4b0672dSNick Piggin 
25481da177e4SLinus Torvalds 	return 0;
25491da177e4SLinus Torvalds 
25501da177e4SLinus Torvalds failed:
255103158cd7SNick Piggin 	BUG_ON(!ret);
25521da177e4SLinus Torvalds 	/*
2553a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2554a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2555a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2556a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2557a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
25581da177e4SLinus Torvalds 	 */
255903158cd7SNick Piggin 	attach_nobh_buffers(page, head);
256003158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2561a4b0672dSNick Piggin 
256203158cd7SNick Piggin out_release:
256303158cd7SNick Piggin 	unlock_page(page);
256403158cd7SNick Piggin 	page_cache_release(page);
256503158cd7SNick Piggin 	*pagep = NULL;
2566a4b0672dSNick Piggin 
256703158cd7SNick Piggin 	if (pos + len > inode->i_size)
256803158cd7SNick Piggin 		vmtruncate(inode, inode->i_size);
2569a4b0672dSNick Piggin 
25701da177e4SLinus Torvalds 	return ret;
25711da177e4SLinus Torvalds }
257203158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
25731da177e4SLinus Torvalds 
257403158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
257503158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
257603158cd7SNick Piggin 			struct page *page, void *fsdata)
25771da177e4SLinus Torvalds {
25781da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2579efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
258003158cd7SNick Piggin 	struct buffer_head *bh;
25815b41e74aSDmitri Monakhov 	BUG_ON(fsdata != NULL && page_has_buffers(page));
25821da177e4SLinus Torvalds 
258303158cd7SNick Piggin 	if (unlikely(copied < len) && !page_has_buffers(page))
258403158cd7SNick Piggin 		attach_nobh_buffers(page, head);
2585a4b0672dSNick Piggin 	if (page_has_buffers(page))
258603158cd7SNick Piggin 		return generic_write_end(file, mapping, pos, len,
258703158cd7SNick Piggin 					copied, page, fsdata);
2588a4b0672dSNick Piggin 
258922c8ca78SNick Piggin 	SetPageUptodate(page);
25901da177e4SLinus Torvalds 	set_page_dirty(page);
259103158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
259203158cd7SNick Piggin 		i_size_write(inode, pos+copied);
25931da177e4SLinus Torvalds 		mark_inode_dirty(inode);
25941da177e4SLinus Torvalds 	}
259503158cd7SNick Piggin 
259603158cd7SNick Piggin 	unlock_page(page);
259703158cd7SNick Piggin 	page_cache_release(page);
259803158cd7SNick Piggin 
259903158cd7SNick Piggin 	while (head) {
260003158cd7SNick Piggin 		bh = head;
260103158cd7SNick Piggin 		head = head->b_this_page;
260203158cd7SNick Piggin 		free_buffer_head(bh);
26031da177e4SLinus Torvalds 	}
260403158cd7SNick Piggin 
260503158cd7SNick Piggin 	return copied;
260603158cd7SNick Piggin }
260703158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
26081da177e4SLinus Torvalds 
26091da177e4SLinus Torvalds /*
26101da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
26111da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
26121da177e4SLinus Torvalds  * the page.
26131da177e4SLinus Torvalds  */
26141da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
26151da177e4SLinus Torvalds 			struct writeback_control *wbc)
26161da177e4SLinus Torvalds {
26171da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
26181da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
26191da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
26201da177e4SLinus Torvalds 	unsigned offset;
26211da177e4SLinus Torvalds 	int ret;
26221da177e4SLinus Torvalds 
26231da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
26241da177e4SLinus Torvalds 	if (page->index < end_index)
26251da177e4SLinus Torvalds 		goto out;
26261da177e4SLinus Torvalds 
26271da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
26281da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
26291da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26301da177e4SLinus Torvalds 		/*
26311da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
26321da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
26331da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
26341da177e4SLinus Torvalds 		 */
26351da177e4SLinus Torvalds #if 0
26361da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
26371da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
26381da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
26391da177e4SLinus Torvalds #endif
26401da177e4SLinus Torvalds 		unlock_page(page);
26411da177e4SLinus Torvalds 		return 0; /* don't care */
26421da177e4SLinus Torvalds 	}
26431da177e4SLinus Torvalds 
26441da177e4SLinus Torvalds 	/*
26451da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26461da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
26471da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26481da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26491da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26501da177e4SLinus Torvalds 	 */
2651eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
26521da177e4SLinus Torvalds out:
26531da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
26541da177e4SLinus Torvalds 	if (ret == -EAGAIN)
26551da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
26561da177e4SLinus Torvalds 	return ret;
26571da177e4SLinus Torvalds }
26581da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
26591da177e4SLinus Torvalds 
266003158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
266103158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
26621da177e4SLinus Torvalds {
26631da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
26641da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
266503158cd7SNick Piggin 	unsigned blocksize;
266603158cd7SNick Piggin 	sector_t iblock;
266703158cd7SNick Piggin 	unsigned length, pos;
266803158cd7SNick Piggin 	struct inode *inode = mapping->host;
26691da177e4SLinus Torvalds 	struct page *page;
267003158cd7SNick Piggin 	struct buffer_head map_bh;
267103158cd7SNick Piggin 	int err;
26721da177e4SLinus Torvalds 
267303158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
267403158cd7SNick Piggin 	length = offset & (blocksize - 1);
26751da177e4SLinus Torvalds 
267603158cd7SNick Piggin 	/* Block boundary? Nothing to do */
267703158cd7SNick Piggin 	if (!length)
267803158cd7SNick Piggin 		return 0;
267903158cd7SNick Piggin 
268003158cd7SNick Piggin 	length = blocksize - length;
268103158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
268203158cd7SNick Piggin 
26831da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
268403158cd7SNick Piggin 	err = -ENOMEM;
26851da177e4SLinus Torvalds 	if (!page)
26861da177e4SLinus Torvalds 		goto out;
26871da177e4SLinus Torvalds 
268803158cd7SNick Piggin 	if (page_has_buffers(page)) {
268903158cd7SNick Piggin has_buffers:
269003158cd7SNick Piggin 		unlock_page(page);
269103158cd7SNick Piggin 		page_cache_release(page);
269203158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
26931da177e4SLinus Torvalds 	}
269403158cd7SNick Piggin 
269503158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
269603158cd7SNick Piggin 	pos = blocksize;
269703158cd7SNick Piggin 	while (offset >= pos) {
269803158cd7SNick Piggin 		iblock++;
269903158cd7SNick Piggin 		pos += blocksize;
270003158cd7SNick Piggin 	}
270103158cd7SNick Piggin 
270203158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
270303158cd7SNick Piggin 	if (err)
270403158cd7SNick Piggin 		goto unlock;
270503158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
270603158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
270703158cd7SNick Piggin 		goto unlock;
270803158cd7SNick Piggin 
270903158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
271003158cd7SNick Piggin 	if (!PageUptodate(page)) {
271103158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
271203158cd7SNick Piggin 		if (err) {
271303158cd7SNick Piggin 			page_cache_release(page);
271403158cd7SNick Piggin 			goto out;
271503158cd7SNick Piggin 		}
271603158cd7SNick Piggin 		lock_page(page);
271703158cd7SNick Piggin 		if (!PageUptodate(page)) {
271803158cd7SNick Piggin 			err = -EIO;
271903158cd7SNick Piggin 			goto unlock;
272003158cd7SNick Piggin 		}
272103158cd7SNick Piggin 		if (page_has_buffers(page))
272203158cd7SNick Piggin 			goto has_buffers;
272303158cd7SNick Piggin 	}
2724eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
272503158cd7SNick Piggin 	set_page_dirty(page);
272603158cd7SNick Piggin 	err = 0;
272703158cd7SNick Piggin 
272803158cd7SNick Piggin unlock:
27291da177e4SLinus Torvalds 	unlock_page(page);
27301da177e4SLinus Torvalds 	page_cache_release(page);
27311da177e4SLinus Torvalds out:
273203158cd7SNick Piggin 	return err;
27331da177e4SLinus Torvalds }
27341da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
27351da177e4SLinus Torvalds 
27361da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
27371da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
27381da177e4SLinus Torvalds {
27391da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27401da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
27411da177e4SLinus Torvalds 	unsigned blocksize;
274254b21a79SAndrew Morton 	sector_t iblock;
27431da177e4SLinus Torvalds 	unsigned length, pos;
27441da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27451da177e4SLinus Torvalds 	struct page *page;
27461da177e4SLinus Torvalds 	struct buffer_head *bh;
27471da177e4SLinus Torvalds 	int err;
27481da177e4SLinus Torvalds 
27491da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
27501da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
27511da177e4SLinus Torvalds 
27521da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
27531da177e4SLinus Torvalds 	if (!length)
27541da177e4SLinus Torvalds 		return 0;
27551da177e4SLinus Torvalds 
27561da177e4SLinus Torvalds 	length = blocksize - length;
275754b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
27581da177e4SLinus Torvalds 
27591da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
27601da177e4SLinus Torvalds 	err = -ENOMEM;
27611da177e4SLinus Torvalds 	if (!page)
27621da177e4SLinus Torvalds 		goto out;
27631da177e4SLinus Torvalds 
27641da177e4SLinus Torvalds 	if (!page_has_buffers(page))
27651da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
27661da177e4SLinus Torvalds 
27671da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
27681da177e4SLinus Torvalds 	bh = page_buffers(page);
27691da177e4SLinus Torvalds 	pos = blocksize;
27701da177e4SLinus Torvalds 	while (offset >= pos) {
27711da177e4SLinus Torvalds 		bh = bh->b_this_page;
27721da177e4SLinus Torvalds 		iblock++;
27731da177e4SLinus Torvalds 		pos += blocksize;
27741da177e4SLinus Torvalds 	}
27751da177e4SLinus Torvalds 
27761da177e4SLinus Torvalds 	err = 0;
27771da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2778b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
27791da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
27801da177e4SLinus Torvalds 		if (err)
27811da177e4SLinus Torvalds 			goto unlock;
27821da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
27831da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
27841da177e4SLinus Torvalds 			goto unlock;
27851da177e4SLinus Torvalds 	}
27861da177e4SLinus Torvalds 
27871da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
27881da177e4SLinus Torvalds 	if (PageUptodate(page))
27891da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
27901da177e4SLinus Torvalds 
279133a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
27921da177e4SLinus Torvalds 		err = -EIO;
27931da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
27941da177e4SLinus Torvalds 		wait_on_buffer(bh);
27951da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
27961da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
27971da177e4SLinus Torvalds 			goto unlock;
27981da177e4SLinus Torvalds 	}
27991da177e4SLinus Torvalds 
2800eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
28011da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
28021da177e4SLinus Torvalds 	err = 0;
28031da177e4SLinus Torvalds 
28041da177e4SLinus Torvalds unlock:
28051da177e4SLinus Torvalds 	unlock_page(page);
28061da177e4SLinus Torvalds 	page_cache_release(page);
28071da177e4SLinus Torvalds out:
28081da177e4SLinus Torvalds 	return err;
28091da177e4SLinus Torvalds }
28101da177e4SLinus Torvalds 
28111da177e4SLinus Torvalds /*
28121da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
28131da177e4SLinus Torvalds  */
28141da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
28151da177e4SLinus Torvalds 			struct writeback_control *wbc)
28161da177e4SLinus Torvalds {
28171da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
28181da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
28191da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
28201da177e4SLinus Torvalds 	unsigned offset;
28211da177e4SLinus Torvalds 
28221da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
28231da177e4SLinus Torvalds 	if (page->index < end_index)
28241da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
28251da177e4SLinus Torvalds 
28261da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
28271da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
28281da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
28291da177e4SLinus Torvalds 		/*
28301da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
28311da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
28321da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
28331da177e4SLinus Torvalds 		 */
2834aaa4059bSJan Kara 		do_invalidatepage(page, 0);
28351da177e4SLinus Torvalds 		unlock_page(page);
28361da177e4SLinus Torvalds 		return 0; /* don't care */
28371da177e4SLinus Torvalds 	}
28381da177e4SLinus Torvalds 
28391da177e4SLinus Torvalds 	/*
28401da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
28411da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
28421da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
28431da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
28441da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
28451da177e4SLinus Torvalds 	 */
2846eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
28471da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
28481da177e4SLinus Torvalds }
28491da177e4SLinus Torvalds 
28501da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
28511da177e4SLinus Torvalds 			    get_block_t *get_block)
28521da177e4SLinus Torvalds {
28531da177e4SLinus Torvalds 	struct buffer_head tmp;
28541da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
28551da177e4SLinus Torvalds 	tmp.b_state = 0;
28561da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2857b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
28581da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
28591da177e4SLinus Torvalds 	return tmp.b_blocknr;
28601da177e4SLinus Torvalds }
28611da177e4SLinus Torvalds 
28626712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
28631da177e4SLinus Torvalds {
28641da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
28651da177e4SLinus Torvalds 
28661da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
28671da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
28681da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
28691da177e4SLinus Torvalds 	}
28701da177e4SLinus Torvalds 
28711da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
28721da177e4SLinus Torvalds 	bio_put(bio);
28731da177e4SLinus Torvalds }
28741da177e4SLinus Torvalds 
28751da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
28761da177e4SLinus Torvalds {
28771da177e4SLinus Torvalds 	struct bio *bio;
28781da177e4SLinus Torvalds 	int ret = 0;
28791da177e4SLinus Torvalds 
28801da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
28811da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
28821da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
28831da177e4SLinus Torvalds 
28841da177e4SLinus Torvalds 	if (buffer_ordered(bh) && (rw == WRITE))
28851da177e4SLinus Torvalds 		rw = WRITE_BARRIER;
28861da177e4SLinus Torvalds 
28871da177e4SLinus Torvalds 	/*
28881da177e4SLinus Torvalds 	 * Only clear out a write error when rewriting, should this
28891da177e4SLinus Torvalds 	 * include WRITE_SYNC as well?
28901da177e4SLinus Torvalds 	 */
28911da177e4SLinus Torvalds 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
28921da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
28931da177e4SLinus Torvalds 
28941da177e4SLinus Torvalds 	/*
28951da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
28961da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
28971da177e4SLinus Torvalds 	 */
28981da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
28991da177e4SLinus Torvalds 
29001da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
29011da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
29021da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
29031da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
29041da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
29051da177e4SLinus Torvalds 
29061da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
29071da177e4SLinus Torvalds 	bio->bi_idx = 0;
29081da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
29091da177e4SLinus Torvalds 
29101da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
29111da177e4SLinus Torvalds 	bio->bi_private = bh;
29121da177e4SLinus Torvalds 
29131da177e4SLinus Torvalds 	bio_get(bio);
29141da177e4SLinus Torvalds 	submit_bio(rw, bio);
29151da177e4SLinus Torvalds 
29161da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
29171da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
29181da177e4SLinus Torvalds 
29191da177e4SLinus Torvalds 	bio_put(bio);
29201da177e4SLinus Torvalds 	return ret;
29211da177e4SLinus Torvalds }
29221da177e4SLinus Torvalds 
29231da177e4SLinus Torvalds /**
29241da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2925a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
29261da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
29271da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
29281da177e4SLinus Torvalds  *
2929a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2930a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2931a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2932a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2933a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
29341da177e4SLinus Torvalds  *
29351da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2936a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2937a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2938a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2939a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2940a7662236SJan Kara  * actually clean until the buffer gets unlocked).
29411da177e4SLinus Torvalds  *
29421da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
29431da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
29441da177e4SLinus Torvalds  * any waiters.
29451da177e4SLinus Torvalds  *
29461da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
29471da177e4SLinus Torvalds  * multiple of the current approved size for the device.
29481da177e4SLinus Torvalds  */
29491da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
29501da177e4SLinus Torvalds {
29511da177e4SLinus Torvalds 	int i;
29521da177e4SLinus Torvalds 
29531da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
29541da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
29551da177e4SLinus Torvalds 
295618ce3751SJens Axboe 		if (rw == SWRITE || rw == SWRITE_SYNC)
2957a7662236SJan Kara 			lock_buffer(bh);
2958a7662236SJan Kara 		else if (test_set_buffer_locked(bh))
29591da177e4SLinus Torvalds 			continue;
29601da177e4SLinus Torvalds 
296118ce3751SJens Axboe 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
29621da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
296376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2964e60e5c50SOGAWA Hirofumi 				get_bh(bh);
296518ce3751SJens Axboe 				if (rw == SWRITE_SYNC)
296618ce3751SJens Axboe 					submit_bh(WRITE_SYNC, bh);
296718ce3751SJens Axboe 				else
29681da177e4SLinus Torvalds 					submit_bh(WRITE, bh);
29691da177e4SLinus Torvalds 				continue;
29701da177e4SLinus Torvalds 			}
29711da177e4SLinus Torvalds 		} else {
29721da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
297376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2974e60e5c50SOGAWA Hirofumi 				get_bh(bh);
29751da177e4SLinus Torvalds 				submit_bh(rw, bh);
29761da177e4SLinus Torvalds 				continue;
29771da177e4SLinus Torvalds 			}
29781da177e4SLinus Torvalds 		}
29791da177e4SLinus Torvalds 		unlock_buffer(bh);
29801da177e4SLinus Torvalds 	}
29811da177e4SLinus Torvalds }
29821da177e4SLinus Torvalds 
29831da177e4SLinus Torvalds /*
29841da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
29851da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
29861da177e4SLinus Torvalds  * the buffer_head.
29871da177e4SLinus Torvalds  */
29881da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
29891da177e4SLinus Torvalds {
29901da177e4SLinus Torvalds 	int ret = 0;
29911da177e4SLinus Torvalds 
29921da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
29931da177e4SLinus Torvalds 	lock_buffer(bh);
29941da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
29951da177e4SLinus Torvalds 		get_bh(bh);
29961da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
299718ce3751SJens Axboe 		ret = submit_bh(WRITE_SYNC, bh);
29981da177e4SLinus Torvalds 		wait_on_buffer(bh);
29991da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
30001da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
30011da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
30021da177e4SLinus Torvalds 		}
30031da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
30041da177e4SLinus Torvalds 			ret = -EIO;
30051da177e4SLinus Torvalds 	} else {
30061da177e4SLinus Torvalds 		unlock_buffer(bh);
30071da177e4SLinus Torvalds 	}
30081da177e4SLinus Torvalds 	return ret;
30091da177e4SLinus Torvalds }
30101da177e4SLinus Torvalds 
30111da177e4SLinus Torvalds /*
30121da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
30131da177e4SLinus Torvalds  * are unused, and releases them if so.
30141da177e4SLinus Torvalds  *
30151da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
30161da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
30171da177e4SLinus Torvalds  *
30181da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
30191da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
30201da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
30211da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
30221da177e4SLinus Torvalds  * filesystem data on the same device.
30231da177e4SLinus Torvalds  *
30241da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
30251da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
30261da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
30271da177e4SLinus Torvalds  * private_lock.
30281da177e4SLinus Torvalds  *
30291da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
30301da177e4SLinus Torvalds  */
30311da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
30321da177e4SLinus Torvalds {
30331da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
30341da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
30351da177e4SLinus Torvalds }
30361da177e4SLinus Torvalds 
30371da177e4SLinus Torvalds static int
30381da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
30391da177e4SLinus Torvalds {
30401da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
30411da177e4SLinus Torvalds 	struct buffer_head *bh;
30421da177e4SLinus Torvalds 
30431da177e4SLinus Torvalds 	bh = head;
30441da177e4SLinus Torvalds 	do {
3045de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
30461da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
30471da177e4SLinus Torvalds 		if (buffer_busy(bh))
30481da177e4SLinus Torvalds 			goto failed;
30491da177e4SLinus Torvalds 		bh = bh->b_this_page;
30501da177e4SLinus Torvalds 	} while (bh != head);
30511da177e4SLinus Torvalds 
30521da177e4SLinus Torvalds 	do {
30531da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
30541da177e4SLinus Torvalds 
3055535ee2fbSJan Kara 		if (bh->b_assoc_map)
30561da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
30571da177e4SLinus Torvalds 		bh = next;
30581da177e4SLinus Torvalds 	} while (bh != head);
30591da177e4SLinus Torvalds 	*buffers_to_free = head;
30601da177e4SLinus Torvalds 	__clear_page_buffers(page);
30611da177e4SLinus Torvalds 	return 1;
30621da177e4SLinus Torvalds failed:
30631da177e4SLinus Torvalds 	return 0;
30641da177e4SLinus Torvalds }
30651da177e4SLinus Torvalds 
30661da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
30671da177e4SLinus Torvalds {
30681da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
30691da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
30701da177e4SLinus Torvalds 	int ret = 0;
30711da177e4SLinus Torvalds 
30721da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3073ecdfc978SLinus Torvalds 	if (PageWriteback(page))
30741da177e4SLinus Torvalds 		return 0;
30751da177e4SLinus Torvalds 
30761da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
30771da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
30781da177e4SLinus Torvalds 		goto out;
30791da177e4SLinus Torvalds 	}
30801da177e4SLinus Torvalds 
30811da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
30821da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3083ecdfc978SLinus Torvalds 
3084ecdfc978SLinus Torvalds 	/*
3085ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3086ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3087ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3088ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3089ecdfc978SLinus Torvalds 	 *
3090ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3091ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3092ecdfc978SLinus Torvalds 	 * the page also.
309387df7241SNick Piggin 	 *
309487df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
309587df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
309687df7241SNick Piggin 	 * dirty bit from being lost.
3097ecdfc978SLinus Torvalds 	 */
3098ecdfc978SLinus Torvalds 	if (ret)
3099ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
310087df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
31011da177e4SLinus Torvalds out:
31021da177e4SLinus Torvalds 	if (buffers_to_free) {
31031da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
31041da177e4SLinus Torvalds 
31051da177e4SLinus Torvalds 		do {
31061da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
31071da177e4SLinus Torvalds 			free_buffer_head(bh);
31081da177e4SLinus Torvalds 			bh = next;
31091da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
31101da177e4SLinus Torvalds 	}
31111da177e4SLinus Torvalds 	return ret;
31121da177e4SLinus Torvalds }
31131da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
31141da177e4SLinus Torvalds 
31153978d717SNeilBrown void block_sync_page(struct page *page)
31161da177e4SLinus Torvalds {
31171da177e4SLinus Torvalds 	struct address_space *mapping;
31181da177e4SLinus Torvalds 
31191da177e4SLinus Torvalds 	smp_mb();
31201da177e4SLinus Torvalds 	mapping = page_mapping(page);
31211da177e4SLinus Torvalds 	if (mapping)
31221da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
31231da177e4SLinus Torvalds }
31241da177e4SLinus Torvalds 
31251da177e4SLinus Torvalds /*
31261da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
31271da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
31281da177e4SLinus Torvalds  *
31291da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
31301da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
31311da177e4SLinus Torvalds  */
31321da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data)
31331da177e4SLinus Torvalds {
31341da177e4SLinus Torvalds 	static int msg_count;
31351da177e4SLinus Torvalds 
31361da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
31371da177e4SLinus Torvalds 		return -EPERM;
31381da177e4SLinus Torvalds 
31391da177e4SLinus Torvalds 	if (msg_count < 5) {
31401da177e4SLinus Torvalds 		msg_count++;
31411da177e4SLinus Torvalds 		printk(KERN_INFO
31421da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
31431da177e4SLinus Torvalds 			" system call\n", current->comm);
31441da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
31451da177e4SLinus Torvalds 	}
31461da177e4SLinus Torvalds 
31471da177e4SLinus Torvalds 	if (func == 1)
31481da177e4SLinus Torvalds 		do_exit(0);
31491da177e4SLinus Torvalds 	return 0;
31501da177e4SLinus Torvalds }
31511da177e4SLinus Torvalds 
31521da177e4SLinus Torvalds /*
31531da177e4SLinus Torvalds  * Buffer-head allocation
31541da177e4SLinus Torvalds  */
3155e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
31561da177e4SLinus Torvalds 
31571da177e4SLinus Torvalds /*
31581da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
31591da177e4SLinus Torvalds  * stripping them in writeback.
31601da177e4SLinus Torvalds  */
31611da177e4SLinus Torvalds static int max_buffer_heads;
31621da177e4SLinus Torvalds 
31631da177e4SLinus Torvalds int buffer_heads_over_limit;
31641da177e4SLinus Torvalds 
31651da177e4SLinus Torvalds struct bh_accounting {
31661da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
31671da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
31681da177e4SLinus Torvalds };
31691da177e4SLinus Torvalds 
31701da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
31711da177e4SLinus Torvalds 
31721da177e4SLinus Torvalds static void recalc_bh_state(void)
31731da177e4SLinus Torvalds {
31741da177e4SLinus Torvalds 	int i;
31751da177e4SLinus Torvalds 	int tot = 0;
31761da177e4SLinus Torvalds 
31771da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
31781da177e4SLinus Torvalds 		return;
31791da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
31808a143426SEric Dumazet 	for_each_online_cpu(i)
31811da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
31821da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
31831da177e4SLinus Torvalds }
31841da177e4SLinus Torvalds 
3185dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
31861da177e4SLinus Torvalds {
3187488514d1SChristoph Lameter 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
31881da177e4SLinus Torvalds 	if (ret) {
3189a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3190736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
31911da177e4SLinus Torvalds 		recalc_bh_state();
3192736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
31931da177e4SLinus Torvalds 	}
31941da177e4SLinus Torvalds 	return ret;
31951da177e4SLinus Torvalds }
31961da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
31971da177e4SLinus Torvalds 
31981da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
31991da177e4SLinus Torvalds {
32001da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
32011da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3202736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
32031da177e4SLinus Torvalds 	recalc_bh_state();
3204736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
32051da177e4SLinus Torvalds }
32061da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
32071da177e4SLinus Torvalds 
32081da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
32091da177e4SLinus Torvalds {
32101da177e4SLinus Torvalds 	int i;
32111da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
32121da177e4SLinus Torvalds 
32131da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
32141da177e4SLinus Torvalds 		brelse(b->bhs[i]);
32151da177e4SLinus Torvalds 		b->bhs[i] = NULL;
32161da177e4SLinus Torvalds 	}
32178a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
32188a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
32198a143426SEric Dumazet 	put_cpu_var(bh_accounting);
32201da177e4SLinus Torvalds }
32211da177e4SLinus Torvalds 
32221da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
32231da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
32241da177e4SLinus Torvalds {
32258bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
32261da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
32271da177e4SLinus Torvalds 	return NOTIFY_OK;
32281da177e4SLinus Torvalds }
32291da177e4SLinus Torvalds 
3230389d1b08SAneesh Kumar K.V /**
3231a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3232389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3233389d1b08SAneesh Kumar K.V  *
3234389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3235389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3236389d1b08SAneesh Kumar K.V  */
3237389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3238389d1b08SAneesh Kumar K.V {
3239389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3240389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3241389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3242389d1b08SAneesh Kumar K.V 			return 0;
3243389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3244389d1b08SAneesh Kumar K.V 	}
3245389d1b08SAneesh Kumar K.V 	return 1;
3246389d1b08SAneesh Kumar K.V }
3247389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3248389d1b08SAneesh Kumar K.V 
3249389d1b08SAneesh Kumar K.V /**
3250a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3251389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3252389d1b08SAneesh Kumar K.V  *
3253389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3254389d1b08SAneesh Kumar K.V  */
3255389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3256389d1b08SAneesh Kumar K.V {
3257389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3258389d1b08SAneesh Kumar K.V 
3259389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3260389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3261389d1b08SAneesh Kumar K.V 		return 0;
3262389d1b08SAneesh Kumar K.V 	}
3263389d1b08SAneesh Kumar K.V 
3264389d1b08SAneesh Kumar K.V 	get_bh(bh);
3265389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3266389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3267389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3268389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3269389d1b08SAneesh Kumar K.V 		return 0;
3270389d1b08SAneesh Kumar K.V 	return -EIO;
3271389d1b08SAneesh Kumar K.V }
3272389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3273389d1b08SAneesh Kumar K.V 
3274b98938c3SChristoph Lameter static void
3275*51cc5068SAlexey Dobriyan init_buffer_head(void *data)
3276b98938c3SChristoph Lameter {
3277b98938c3SChristoph Lameter 	struct buffer_head *bh = data;
3278b98938c3SChristoph Lameter 
3279b98938c3SChristoph Lameter 	memset(bh, 0, sizeof(*bh));
3280b98938c3SChristoph Lameter 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3281b98938c3SChristoph Lameter }
3282b98938c3SChristoph Lameter 
32831da177e4SLinus Torvalds void __init buffer_init(void)
32841da177e4SLinus Torvalds {
32851da177e4SLinus Torvalds 	int nrpages;
32861da177e4SLinus Torvalds 
3287b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3288b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3289b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3290b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3291b98938c3SChristoph Lameter 				init_buffer_head);
32921da177e4SLinus Torvalds 
32931da177e4SLinus Torvalds 	/*
32941da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
32951da177e4SLinus Torvalds 	 */
32961da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
32971da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
32981da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
32991da177e4SLinus Torvalds }
33001da177e4SLinus Torvalds 
33011da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
33021da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
33031da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
33041da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
33051da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
330654171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite);
33071da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
33081da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
33091da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
33101da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
331189e10787SNick Piggin EXPORT_SYMBOL(cont_write_begin);
33121da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
33131da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
33141da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
33151da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
33161da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
331705eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
33181da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
33191da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
33201da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
33211da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
33221da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
33231da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
33241da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3325