xref: /linux/fs/buffer.c (revision 3991d3bd1506391d8feec209b1d22ccb1c03a0bf)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/smp_lock.h>
2816f7e0feSRandy Dunlap #include <linux/capability.h>
291da177e4SLinus Torvalds #include <linux/blkdev.h>
301da177e4SLinus Torvalds #include <linux/file.h>
311da177e4SLinus Torvalds #include <linux/quotaops.h>
321da177e4SLinus Torvalds #include <linux/highmem.h>
331da177e4SLinus Torvalds #include <linux/module.h>
341da177e4SLinus Torvalds #include <linux/writeback.h>
351da177e4SLinus Torvalds #include <linux/hash.h>
361da177e4SLinus Torvalds #include <linux/suspend.h>
371da177e4SLinus Torvalds #include <linux/buffer_head.h>
3855e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
391da177e4SLinus Torvalds #include <linux/bio.h>
401da177e4SLinus Torvalds #include <linux/notifier.h>
411da177e4SLinus Torvalds #include <linux/cpu.h>
421da177e4SLinus Torvalds #include <linux/bitops.h>
431da177e4SLinus Torvalds #include <linux/mpage.h>
44fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
451da177e4SLinus Torvalds 
461da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
471da177e4SLinus Torvalds static void invalidate_bh_lrus(void);
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds inline void
521da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
531da177e4SLinus Torvalds {
541da177e4SLinus Torvalds 	bh->b_end_io = handler;
551da177e4SLinus Torvalds 	bh->b_private = private;
561da177e4SLinus Torvalds }
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds static int sync_buffer(void *word)
591da177e4SLinus Torvalds {
601da177e4SLinus Torvalds 	struct block_device *bd;
611da177e4SLinus Torvalds 	struct buffer_head *bh
621da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds 	smp_mb();
651da177e4SLinus Torvalds 	bd = bh->b_bdev;
661da177e4SLinus Torvalds 	if (bd)
671da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
681da177e4SLinus Torvalds 	io_schedule();
691da177e4SLinus Torvalds 	return 0;
701da177e4SLinus Torvalds }
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds void fastcall __lock_buffer(struct buffer_head *bh)
731da177e4SLinus Torvalds {
741da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
751da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
761da177e4SLinus Torvalds }
771da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds void fastcall unlock_buffer(struct buffer_head *bh)
801da177e4SLinus Torvalds {
8172ed3d03SNick Piggin 	smp_mb__before_clear_bit();
821da177e4SLinus Torvalds 	clear_buffer_locked(bh);
831da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
841da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
851da177e4SLinus Torvalds }
861da177e4SLinus Torvalds 
871da177e4SLinus Torvalds /*
881da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
891da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
901da177e4SLinus Torvalds  * if you want to preserve its state.
911da177e4SLinus Torvalds  */
921da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
931da177e4SLinus Torvalds {
941da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
951da177e4SLinus Torvalds }
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds static void
981da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
991da177e4SLinus Torvalds {
1001da177e4SLinus Torvalds 	ClearPagePrivate(page);
1014c21e2f2SHugh Dickins 	set_page_private(page, 0);
1021da177e4SLinus Torvalds 	page_cache_release(page);
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1061da177e4SLinus Torvalds {
1071da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1081da177e4SLinus Torvalds 
1091da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1101da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1111da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1121da177e4SLinus Torvalds }
1131da177e4SLinus Torvalds 
1141da177e4SLinus Torvalds /*
1151da177e4SLinus Torvalds  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
1161da177e4SLinus Torvalds  * unlock the buffer. This is what ll_rw_block uses too.
1171da177e4SLinus Torvalds  */
1181da177e4SLinus Torvalds void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
1191da177e4SLinus Torvalds {
1201da177e4SLinus Torvalds 	if (uptodate) {
1211da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1221da177e4SLinus Torvalds 	} else {
1231da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1241da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1251da177e4SLinus Torvalds 	}
1261da177e4SLinus Torvalds 	unlock_buffer(bh);
1271da177e4SLinus Torvalds 	put_bh(bh);
1281da177e4SLinus Torvalds }
1291da177e4SLinus Torvalds 
1301da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1311da177e4SLinus Torvalds {
1321da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds 	if (uptodate) {
1351da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1361da177e4SLinus Torvalds 	} else {
1371da177e4SLinus Torvalds 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1381da177e4SLinus Torvalds 			buffer_io_error(bh);
1391da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1401da177e4SLinus Torvalds 					"I/O error on %s\n",
1411da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1421da177e4SLinus Torvalds 		}
1431da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1441da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1451da177e4SLinus Torvalds 	}
1461da177e4SLinus Torvalds 	unlock_buffer(bh);
1471da177e4SLinus Torvalds 	put_bh(bh);
1481da177e4SLinus Torvalds }
1491da177e4SLinus Torvalds 
1501da177e4SLinus Torvalds /*
1511da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1521da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1531da177e4SLinus Torvalds  */
1541da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1551da177e4SLinus Torvalds {
1561da177e4SLinus Torvalds 	int ret = 0;
1571da177e4SLinus Torvalds 
15828fd1298SOGAWA Hirofumi 	if (bdev)
15928fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1601da177e4SLinus Torvalds 	return ret;
1611da177e4SLinus Torvalds }
1621da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds /*
1651da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1661da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1671da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1681da177e4SLinus Torvalds  */
1691da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1701da177e4SLinus Torvalds {
1711da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1721da177e4SLinus Torvalds 	if (sb) {
1731da177e4SLinus Torvalds 		int res = fsync_super(sb);
1741da177e4SLinus Torvalds 		drop_super(sb);
1751da177e4SLinus Torvalds 		return res;
1761da177e4SLinus Torvalds 	}
1771da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1781da177e4SLinus Torvalds }
1791da177e4SLinus Torvalds 
1801da177e4SLinus Torvalds /**
1811da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
1821da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
1831da177e4SLinus Torvalds  *
184f73ca1b7SDavid Chinner  * This takes the block device bd_mount_sem to make sure no new mounts
1851da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
1861da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
1871da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
1881da177e4SLinus Torvalds  */
1891da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
1901da177e4SLinus Torvalds {
1911da177e4SLinus Torvalds 	struct super_block *sb;
1921da177e4SLinus Torvalds 
193f73ca1b7SDavid Chinner 	down(&bdev->bd_mount_sem);
1941da177e4SLinus Torvalds 	sb = get_super(bdev);
1951da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
1961da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
197d59dd462Sakpm@osdl.org 		smp_wmb();
1981da177e4SLinus Torvalds 
199d25b9a1fSOGAWA Hirofumi 		__fsync_super(sb);
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
202d59dd462Sakpm@osdl.org 		smp_wmb();
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2051da177e4SLinus Torvalds 
2061da177e4SLinus Torvalds 		if (sb->s_op->write_super_lockfs)
2071da177e4SLinus Torvalds 			sb->s_op->write_super_lockfs(sb);
2081da177e4SLinus Torvalds 	}
2091da177e4SLinus Torvalds 
2101da177e4SLinus Torvalds 	sync_blockdev(bdev);
2111da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2121da177e4SLinus Torvalds }
2131da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds /**
2161da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2171da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2181da177e4SLinus Torvalds  * @sb:		associated superblock
2191da177e4SLinus Torvalds  *
2201da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2211da177e4SLinus Torvalds  */
2221da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb)
2231da177e4SLinus Torvalds {
2241da177e4SLinus Torvalds 	if (sb) {
2251da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 		if (sb->s_op->unlockfs)
2281da177e4SLinus Torvalds 			sb->s_op->unlockfs(sb);
2291da177e4SLinus Torvalds 		sb->s_frozen = SB_UNFROZEN;
230d59dd462Sakpm@osdl.org 		smp_wmb();
2311da177e4SLinus Torvalds 		wake_up(&sb->s_wait_unfrozen);
2321da177e4SLinus Torvalds 		drop_super(sb);
2331da177e4SLinus Torvalds 	}
2341da177e4SLinus Torvalds 
235f73ca1b7SDavid Chinner 	up(&bdev->bd_mount_sem);
2361da177e4SLinus Torvalds }
2371da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
2381da177e4SLinus Torvalds 
2391da177e4SLinus Torvalds /*
2401da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
2411da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
2421da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
2431da177e4SLinus Torvalds  * private_lock.
2441da177e4SLinus Torvalds  *
2451da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
2461da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
2471da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
2481da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
2491da177e4SLinus Torvalds  */
2501da177e4SLinus Torvalds static struct buffer_head *
251385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
2521da177e4SLinus Torvalds {
2531da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
2541da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
2551da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
2561da177e4SLinus Torvalds 	pgoff_t index;
2571da177e4SLinus Torvalds 	struct buffer_head *bh;
2581da177e4SLinus Torvalds 	struct buffer_head *head;
2591da177e4SLinus Torvalds 	struct page *page;
2601da177e4SLinus Torvalds 	int all_mapped = 1;
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
2631da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
2641da177e4SLinus Torvalds 	if (!page)
2651da177e4SLinus Torvalds 		goto out;
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2681da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2691da177e4SLinus Torvalds 		goto out_unlock;
2701da177e4SLinus Torvalds 	head = page_buffers(page);
2711da177e4SLinus Torvalds 	bh = head;
2721da177e4SLinus Torvalds 	do {
2731da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
2741da177e4SLinus Torvalds 			ret = bh;
2751da177e4SLinus Torvalds 			get_bh(bh);
2761da177e4SLinus Torvalds 			goto out_unlock;
2771da177e4SLinus Torvalds 		}
2781da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
2791da177e4SLinus Torvalds 			all_mapped = 0;
2801da177e4SLinus Torvalds 		bh = bh->b_this_page;
2811da177e4SLinus Torvalds 	} while (bh != head);
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2841da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2851da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2861da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2871da177e4SLinus Torvalds 	 */
2881da177e4SLinus Torvalds 	if (all_mapped) {
2891da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
2901da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
291205f87f6SBadari Pulavarty 			(unsigned long long)block,
292205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
293205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
294205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
2951da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
2961da177e4SLinus Torvalds 	}
2971da177e4SLinus Torvalds out_unlock:
2981da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
2991da177e4SLinus Torvalds 	page_cache_release(page);
3001da177e4SLinus Torvalds out:
3011da177e4SLinus Torvalds 	return ret;
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
3051da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
3061da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
3071da177e4SLinus Torvalds    by the user.
3081da177e4SLinus Torvalds 
3091da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
3101da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
3111da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
3141da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
3171da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
3181da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
3191da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
3201da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
3211da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
3221da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
3231da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
3241da177e4SLinus Torvalds 
3251da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
3261da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
3271da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
3281da177e4SLinus Torvalds 
3291da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
3301da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
3311da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
3321da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
3331da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
3341da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
3351da177e4SLinus Torvalds    pass does the actual I/O. */
3361da177e4SLinus Torvalds void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
3371da177e4SLinus Torvalds {
3380e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
3390e1dfc66SAndrew Morton 
3400e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
3410e1dfc66SAndrew Morton 		return;
3420e1dfc66SAndrew Morton 
3431da177e4SLinus Torvalds 	invalidate_bh_lrus();
3441da177e4SLinus Torvalds 	/*
3451da177e4SLinus Torvalds 	 * FIXME: what about destroy_dirty_buffers?
3461da177e4SLinus Torvalds 	 * We really want to use invalidate_inode_pages2() for
3471da177e4SLinus Torvalds 	 * that, but not until that's cleaned up.
3481da177e4SLinus Torvalds 	 */
349fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
3501da177e4SLinus Torvalds }
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds /*
3531da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
3541da177e4SLinus Torvalds  */
3551da177e4SLinus Torvalds static void free_more_memory(void)
3561da177e4SLinus Torvalds {
3571da177e4SLinus Torvalds 	struct zone **zones;
3581da177e4SLinus Torvalds 	pg_data_t *pgdat;
3591da177e4SLinus Torvalds 
360687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
3611da177e4SLinus Torvalds 	yield();
3621da177e4SLinus Torvalds 
363ec936fc5SKAMEZAWA Hiroyuki 	for_each_online_pgdat(pgdat) {
364af4ca457SAl Viro 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
3651da177e4SLinus Torvalds 		if (*zones)
3661ad539b2SDarren Hart 			try_to_free_pages(zones, GFP_NOFS);
3671da177e4SLinus Torvalds 	}
3681da177e4SLinus Torvalds }
3691da177e4SLinus Torvalds 
3701da177e4SLinus Torvalds /*
3711da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
3721da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3731da177e4SLinus Torvalds  */
3741da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3751da177e4SLinus Torvalds {
3761da177e4SLinus Torvalds 	unsigned long flags;
377a3972203SNick Piggin 	struct buffer_head *first;
3781da177e4SLinus Torvalds 	struct buffer_head *tmp;
3791da177e4SLinus Torvalds 	struct page *page;
3801da177e4SLinus Torvalds 	int page_uptodate = 1;
3811da177e4SLinus Torvalds 
3821da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3831da177e4SLinus Torvalds 
3841da177e4SLinus Torvalds 	page = bh->b_page;
3851da177e4SLinus Torvalds 	if (uptodate) {
3861da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3871da177e4SLinus Torvalds 	} else {
3881da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3891da177e4SLinus Torvalds 		if (printk_ratelimit())
3901da177e4SLinus Torvalds 			buffer_io_error(bh);
3911da177e4SLinus Torvalds 		SetPageError(page);
3921da177e4SLinus Torvalds 	}
3931da177e4SLinus Torvalds 
3941da177e4SLinus Torvalds 	/*
3951da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
3961da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
3971da177e4SLinus Torvalds 	 * decide that the page is now completely done.
3981da177e4SLinus Torvalds 	 */
399a3972203SNick Piggin 	first = page_buffers(page);
400a3972203SNick Piggin 	local_irq_save(flags);
401a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
4021da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
4031da177e4SLinus Torvalds 	unlock_buffer(bh);
4041da177e4SLinus Torvalds 	tmp = bh;
4051da177e4SLinus Torvalds 	do {
4061da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
4071da177e4SLinus Torvalds 			page_uptodate = 0;
4081da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
4091da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4101da177e4SLinus Torvalds 			goto still_busy;
4111da177e4SLinus Torvalds 		}
4121da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4131da177e4SLinus Torvalds 	} while (tmp != bh);
414a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
415a3972203SNick Piggin 	local_irq_restore(flags);
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds 	/*
4181da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
4191da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
4201da177e4SLinus Torvalds 	 */
4211da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
4221da177e4SLinus Torvalds 		SetPageUptodate(page);
4231da177e4SLinus Torvalds 	unlock_page(page);
4241da177e4SLinus Torvalds 	return;
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds still_busy:
427a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
428a3972203SNick Piggin 	local_irq_restore(flags);
4291da177e4SLinus Torvalds 	return;
4301da177e4SLinus Torvalds }
4311da177e4SLinus Torvalds 
4321da177e4SLinus Torvalds /*
4331da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
4341da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
4351da177e4SLinus Torvalds  */
436b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
4371da177e4SLinus Torvalds {
4381da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
4391da177e4SLinus Torvalds 	unsigned long flags;
440a3972203SNick Piggin 	struct buffer_head *first;
4411da177e4SLinus Torvalds 	struct buffer_head *tmp;
4421da177e4SLinus Torvalds 	struct page *page;
4431da177e4SLinus Torvalds 
4441da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
4451da177e4SLinus Torvalds 
4461da177e4SLinus Torvalds 	page = bh->b_page;
4471da177e4SLinus Torvalds 	if (uptodate) {
4481da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
4491da177e4SLinus Torvalds 	} else {
4501da177e4SLinus Torvalds 		if (printk_ratelimit()) {
4511da177e4SLinus Torvalds 			buffer_io_error(bh);
4521da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
4531da177e4SLinus Torvalds 					"I/O error on %s\n",
4541da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
4551da177e4SLinus Torvalds 		}
4561da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
45758ff407bSJan Kara 		set_buffer_write_io_error(bh);
4581da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
4591da177e4SLinus Torvalds 		SetPageError(page);
4601da177e4SLinus Torvalds 	}
4611da177e4SLinus Torvalds 
462a3972203SNick Piggin 	first = page_buffers(page);
463a3972203SNick Piggin 	local_irq_save(flags);
464a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
465a3972203SNick Piggin 
4661da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4671da177e4SLinus Torvalds 	unlock_buffer(bh);
4681da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4691da177e4SLinus Torvalds 	while (tmp != bh) {
4701da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4711da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4721da177e4SLinus Torvalds 			goto still_busy;
4731da177e4SLinus Torvalds 		}
4741da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4751da177e4SLinus Torvalds 	}
476a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
477a3972203SNick Piggin 	local_irq_restore(flags);
4781da177e4SLinus Torvalds 	end_page_writeback(page);
4791da177e4SLinus Torvalds 	return;
4801da177e4SLinus Torvalds 
4811da177e4SLinus Torvalds still_busy:
482a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483a3972203SNick Piggin 	local_irq_restore(flags);
4841da177e4SLinus Torvalds 	return;
4851da177e4SLinus Torvalds }
4861da177e4SLinus Torvalds 
4871da177e4SLinus Torvalds /*
4881da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4891da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4901da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4911da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4921da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4931da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4941da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4951da177e4SLinus Torvalds  *
4961da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4971da177e4SLinus Torvalds  * left.
4981da177e4SLinus Torvalds  *
4991da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
5001da177e4SLinus Torvalds  * the buffers.
5011da177e4SLinus Torvalds  *
5021da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
5031da177e4SLinus Torvalds  * page.
5041da177e4SLinus Torvalds  *
5051da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
5061da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
5071da177e4SLinus Torvalds  */
5081da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
5091da177e4SLinus Torvalds {
5101da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
5111da177e4SLinus Torvalds 	set_buffer_async_read(bh);
5121da177e4SLinus Torvalds }
5131da177e4SLinus Torvalds 
5141da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
5151da177e4SLinus Torvalds {
5161da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
5171da177e4SLinus Torvalds 	set_buffer_async_write(bh);
5181da177e4SLinus Torvalds }
5191da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
5201da177e4SLinus Torvalds 
5211da177e4SLinus Torvalds 
5221da177e4SLinus Torvalds /*
5231da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
5241da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
5251da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
5261da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
5271da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
5281da177e4SLinus Torvalds  *
5291da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
5301da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
5311da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
5321da177e4SLinus Torvalds  *
5331da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
5341da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
5351da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
5361da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
5371da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
5381da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
5391da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
5401da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
5411da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
5421da177e4SLinus Torvalds  * ->private_lock.
5431da177e4SLinus Torvalds  *
5441da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
5451da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
5461da177e4SLinus Torvalds  *
5471da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
5481da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
5491da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
5501da177e4SLinus Torvalds  * be true at clear_inode() time.
5511da177e4SLinus Torvalds  *
5521da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
5531da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
5541da177e4SLinus Torvalds  * BUG_ON(!list_empty).
5551da177e4SLinus Torvalds  *
5561da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
5571da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
5581da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5591da177e4SLinus Torvalds  * queued up.
5601da177e4SLinus Torvalds  *
5611da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5621da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5631da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5641da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5651da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5661da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5671da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5681da177e4SLinus Torvalds  * b_inode back.
5691da177e4SLinus Torvalds  */
5701da177e4SLinus Torvalds 
5711da177e4SLinus Torvalds /*
5721da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5731da177e4SLinus Torvalds  */
5741da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh)
5751da177e4SLinus Torvalds {
5761da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
57758ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
57858ff407bSJan Kara 	if (buffer_write_io_error(bh))
57958ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
58058ff407bSJan Kara 	bh->b_assoc_map = NULL;
5811da177e4SLinus Torvalds }
5821da177e4SLinus Torvalds 
5831da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5841da177e4SLinus Torvalds {
5851da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5861da177e4SLinus Torvalds }
5871da177e4SLinus Torvalds 
5881da177e4SLinus Torvalds /*
5891da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5901da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5911da177e4SLinus Torvalds  * writes to the disk.
5921da177e4SLinus Torvalds  *
5931da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
5941da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
5951da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5961da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5971da177e4SLinus Torvalds  */
5981da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5991da177e4SLinus Torvalds {
6001da177e4SLinus Torvalds 	struct buffer_head *bh;
6011da177e4SLinus Torvalds 	struct list_head *p;
6021da177e4SLinus Torvalds 	int err = 0;
6031da177e4SLinus Torvalds 
6041da177e4SLinus Torvalds 	spin_lock(lock);
6051da177e4SLinus Torvalds repeat:
6061da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
6071da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
6081da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
6091da177e4SLinus Torvalds 			get_bh(bh);
6101da177e4SLinus Torvalds 			spin_unlock(lock);
6111da177e4SLinus Torvalds 			wait_on_buffer(bh);
6121da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
6131da177e4SLinus Torvalds 				err = -EIO;
6141da177e4SLinus Torvalds 			brelse(bh);
6151da177e4SLinus Torvalds 			spin_lock(lock);
6161da177e4SLinus Torvalds 			goto repeat;
6171da177e4SLinus Torvalds 		}
6181da177e4SLinus Torvalds 	}
6191da177e4SLinus Torvalds 	spin_unlock(lock);
6201da177e4SLinus Torvalds 	return err;
6211da177e4SLinus Torvalds }
6221da177e4SLinus Torvalds 
6231da177e4SLinus Torvalds /**
6241da177e4SLinus Torvalds  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
6251da177e4SLinus Torvalds  *                        buffers
62667be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
6271da177e4SLinus Torvalds  *
6281da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6291da177e4SLinus Torvalds  * that I/O.
6301da177e4SLinus Torvalds  *
63167be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
63267be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
63367be2dd1SMartin Waitz  * a successful fsync().
6341da177e4SLinus Torvalds  */
6351da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6361da177e4SLinus Torvalds {
6371da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6381da177e4SLinus Torvalds 
6391da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6401da177e4SLinus Torvalds 		return 0;
6411da177e4SLinus Torvalds 
6421da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6431da177e4SLinus Torvalds 					&mapping->private_list);
6441da177e4SLinus Torvalds }
6451da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6461da177e4SLinus Torvalds 
6471da177e4SLinus Torvalds /*
6481da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6491da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6501da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6511da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6521da177e4SLinus Torvalds  */
6531da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6541da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6551da177e4SLinus Torvalds {
6561da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6571da177e4SLinus Torvalds 	if (bh) {
6581da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6591da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6601da177e4SLinus Torvalds 		put_bh(bh);
6611da177e4SLinus Torvalds 	}
6621da177e4SLinus Torvalds }
6631da177e4SLinus Torvalds 
6641da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6651da177e4SLinus Torvalds {
6661da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6671da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6681da177e4SLinus Torvalds 
6691da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6701da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6711da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6721da177e4SLinus Torvalds 	} else {
673e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6741da177e4SLinus Torvalds 	}
6751da177e4SLinus Torvalds 	if (list_empty(&bh->b_assoc_buffers)) {
6761da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6771da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6781da177e4SLinus Torvalds 				&mapping->private_list);
67958ff407bSJan Kara 		bh->b_assoc_map = mapping;
6801da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6811da177e4SLinus Torvalds 	}
6821da177e4SLinus Torvalds }
6831da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6841da177e4SLinus Torvalds 
6851da177e4SLinus Torvalds /*
6861da177e4SLinus Torvalds  * Add a page to the dirty page list.
6871da177e4SLinus Torvalds  *
6881da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6891da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6901da177e4SLinus Torvalds  *
6911da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6921da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6931da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6941da177e4SLinus Torvalds  * dirty.
6951da177e4SLinus Torvalds  *
6961da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6971da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6981da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6991da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
7001da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
7011da177e4SLinus Torvalds  * page on the dirty page list.
7021da177e4SLinus Torvalds  *
7031da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
7041da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
7051da177e4SLinus Torvalds  * added to the page after it was set dirty.
7061da177e4SLinus Torvalds  *
7071da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7081da177e4SLinus Torvalds  * address_space though.
7091da177e4SLinus Torvalds  */
7101da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7111da177e4SLinus Torvalds {
712ebf7a227SNick Piggin 	struct address_space * const mapping = page_mapping(page);
713ebf7a227SNick Piggin 
714ebf7a227SNick Piggin 	if (unlikely(!mapping))
715ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7161da177e4SLinus Torvalds 
7171da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7181da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7191da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7201da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7211da177e4SLinus Torvalds 
7221da177e4SLinus Torvalds 		do {
7231da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7241da177e4SLinus Torvalds 			bh = bh->b_this_page;
7251da177e4SLinus Torvalds 		} while (bh != head);
7261da177e4SLinus Torvalds 	}
7271da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7281da177e4SLinus Torvalds 
7298c08540fSAndrew Morton 	if (TestSetPageDirty(page))
7308c08540fSAndrew Morton 		return 0;
7318c08540fSAndrew Morton 
7321da177e4SLinus Torvalds 	write_lock_irq(&mapping->tree_lock);
7331da177e4SLinus Torvalds 	if (page->mapping) {	/* Race with truncate? */
73455e829afSAndrew Morton 		if (mapping_cap_account_dirty(mapping)) {
735b1e7a8fdSChristoph Lameter 			__inc_zone_page_state(page, NR_FILE_DIRTY);
73655e829afSAndrew Morton 			task_io_account_write(PAGE_CACHE_SIZE);
73755e829afSAndrew Morton 		}
7381da177e4SLinus Torvalds 		radix_tree_tag_set(&mapping->page_tree,
7398c08540fSAndrew Morton 				page_index(page), PAGECACHE_TAG_DIRTY);
7401da177e4SLinus Torvalds 	}
7411da177e4SLinus Torvalds 	write_unlock_irq(&mapping->tree_lock);
7421da177e4SLinus Torvalds 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
7434741c9fdSAndrew Morton 	return 1;
7441da177e4SLinus Torvalds }
7451da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7461da177e4SLinus Torvalds 
7471da177e4SLinus Torvalds /*
7481da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7491da177e4SLinus Torvalds  *
7501da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7511da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7521da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7531da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7541da177e4SLinus Torvalds  *
7551da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7561da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7571da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7581da177e4SLinus Torvalds  *
7591da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7601da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7611da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7621da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7631da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7641da177e4SLinus Torvalds  * any newly dirty buffers for write.
7651da177e4SLinus Torvalds  */
7661da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7671da177e4SLinus Torvalds {
7681da177e4SLinus Torvalds 	struct buffer_head *bh;
7691da177e4SLinus Torvalds 	struct list_head tmp;
7701da177e4SLinus Torvalds 	int err = 0, err2;
7711da177e4SLinus Torvalds 
7721da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7731da177e4SLinus Torvalds 
7741da177e4SLinus Torvalds 	spin_lock(lock);
7751da177e4SLinus Torvalds 	while (!list_empty(list)) {
7761da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
77758ff407bSJan Kara 		__remove_assoc_queue(bh);
7781da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7791da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
7801da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7811da177e4SLinus Torvalds 				get_bh(bh);
7821da177e4SLinus Torvalds 				spin_unlock(lock);
7831da177e4SLinus Torvalds 				/*
7841da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7851da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
7861da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
7871da177e4SLinus Torvalds 				 * flight on potentially older contents.
7881da177e4SLinus Torvalds 				 */
789a7662236SJan Kara 				ll_rw_block(SWRITE, 1, &bh);
7901da177e4SLinus Torvalds 				brelse(bh);
7911da177e4SLinus Torvalds 				spin_lock(lock);
7921da177e4SLinus Torvalds 			}
7931da177e4SLinus Torvalds 		}
7941da177e4SLinus Torvalds 	}
7951da177e4SLinus Torvalds 
7961da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7971da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
79858ff407bSJan Kara 		list_del_init(&bh->b_assoc_buffers);
7991da177e4SLinus Torvalds 		get_bh(bh);
8001da177e4SLinus Torvalds 		spin_unlock(lock);
8011da177e4SLinus Torvalds 		wait_on_buffer(bh);
8021da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8031da177e4SLinus Torvalds 			err = -EIO;
8041da177e4SLinus Torvalds 		brelse(bh);
8051da177e4SLinus Torvalds 		spin_lock(lock);
8061da177e4SLinus Torvalds 	}
8071da177e4SLinus Torvalds 
8081da177e4SLinus Torvalds 	spin_unlock(lock);
8091da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8101da177e4SLinus Torvalds 	if (err)
8111da177e4SLinus Torvalds 		return err;
8121da177e4SLinus Torvalds 	else
8131da177e4SLinus Torvalds 		return err2;
8141da177e4SLinus Torvalds }
8151da177e4SLinus Torvalds 
8161da177e4SLinus Torvalds /*
8171da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8181da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8191da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8201da177e4SLinus Torvalds  *
8211da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8221da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8231da177e4SLinus Torvalds  * for reiserfs.
8241da177e4SLinus Torvalds  */
8251da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8261da177e4SLinus Torvalds {
8271da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8281da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8291da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8301da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8331da177e4SLinus Torvalds 		while (!list_empty(list))
8341da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8351da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8361da177e4SLinus Torvalds 	}
8371da177e4SLinus Torvalds }
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds /*
8401da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8411da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8421da177e4SLinus Torvalds  *
8431da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8441da177e4SLinus Torvalds  */
8451da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8461da177e4SLinus Torvalds {
8471da177e4SLinus Torvalds 	int ret = 1;
8481da177e4SLinus Torvalds 
8491da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8501da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8511da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8521da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8531da177e4SLinus Torvalds 
8541da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8551da177e4SLinus Torvalds 		while (!list_empty(list)) {
8561da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8571da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8581da177e4SLinus Torvalds 				ret = 0;
8591da177e4SLinus Torvalds 				break;
8601da177e4SLinus Torvalds 			}
8611da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8621da177e4SLinus Torvalds 		}
8631da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8641da177e4SLinus Torvalds 	}
8651da177e4SLinus Torvalds 	return ret;
8661da177e4SLinus Torvalds }
8671da177e4SLinus Torvalds 
8681da177e4SLinus Torvalds /*
8691da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8701da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8711da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8721da177e4SLinus Torvalds  * buffers.
8731da177e4SLinus Torvalds  *
8741da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8751da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8761da177e4SLinus Torvalds  */
8771da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
8781da177e4SLinus Torvalds 		int retry)
8791da177e4SLinus Torvalds {
8801da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
8811da177e4SLinus Torvalds 	long offset;
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds try_again:
8841da177e4SLinus Torvalds 	head = NULL;
8851da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8861da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
8871da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
8881da177e4SLinus Torvalds 		if (!bh)
8891da177e4SLinus Torvalds 			goto no_grow;
8901da177e4SLinus Torvalds 
8911da177e4SLinus Torvalds 		bh->b_bdev = NULL;
8921da177e4SLinus Torvalds 		bh->b_this_page = head;
8931da177e4SLinus Torvalds 		bh->b_blocknr = -1;
8941da177e4SLinus Torvalds 		head = bh;
8951da177e4SLinus Torvalds 
8961da177e4SLinus Torvalds 		bh->b_state = 0;
8971da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
898fc5cd582SChris Mason 		bh->b_private = NULL;
8991da177e4SLinus Torvalds 		bh->b_size = size;
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 		/* Link the buffer to its page */
9021da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9031da177e4SLinus Torvalds 
90401ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9051da177e4SLinus Torvalds 	}
9061da177e4SLinus Torvalds 	return head;
9071da177e4SLinus Torvalds /*
9081da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9091da177e4SLinus Torvalds  */
9101da177e4SLinus Torvalds no_grow:
9111da177e4SLinus Torvalds 	if (head) {
9121da177e4SLinus Torvalds 		do {
9131da177e4SLinus Torvalds 			bh = head;
9141da177e4SLinus Torvalds 			head = head->b_this_page;
9151da177e4SLinus Torvalds 			free_buffer_head(bh);
9161da177e4SLinus Torvalds 		} while (head);
9171da177e4SLinus Torvalds 	}
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds 	/*
9201da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9211da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9221da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9231da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9241da177e4SLinus Torvalds 	 */
9251da177e4SLinus Torvalds 	if (!retry)
9261da177e4SLinus Torvalds 		return NULL;
9271da177e4SLinus Torvalds 
9281da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9291da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9301da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9311da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9321da177e4SLinus Torvalds 	 * async buffer heads in use.
9331da177e4SLinus Torvalds 	 */
9341da177e4SLinus Torvalds 	free_more_memory();
9351da177e4SLinus Torvalds 	goto try_again;
9361da177e4SLinus Torvalds }
9371da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9381da177e4SLinus Torvalds 
9391da177e4SLinus Torvalds static inline void
9401da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9411da177e4SLinus Torvalds {
9421da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9431da177e4SLinus Torvalds 
9441da177e4SLinus Torvalds 	bh = head;
9451da177e4SLinus Torvalds 	do {
9461da177e4SLinus Torvalds 		tail = bh;
9471da177e4SLinus Torvalds 		bh = bh->b_this_page;
9481da177e4SLinus Torvalds 	} while (bh);
9491da177e4SLinus Torvalds 	tail->b_this_page = head;
9501da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9511da177e4SLinus Torvalds }
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds /*
9541da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9551da177e4SLinus Torvalds  */
9561da177e4SLinus Torvalds static void
9571da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9581da177e4SLinus Torvalds 			sector_t block, int size)
9591da177e4SLinus Torvalds {
9601da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9611da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9621da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds 	do {
9651da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9661da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9671da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9681da177e4SLinus Torvalds 			bh->b_blocknr = block;
9691da177e4SLinus Torvalds 			if (uptodate)
9701da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9711da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9721da177e4SLinus Torvalds 		}
9731da177e4SLinus Torvalds 		block++;
9741da177e4SLinus Torvalds 		bh = bh->b_this_page;
9751da177e4SLinus Torvalds 	} while (bh != head);
9761da177e4SLinus Torvalds }
9771da177e4SLinus Torvalds 
9781da177e4SLinus Torvalds /*
9791da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9801da177e4SLinus Torvalds  *
9811da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
9821da177e4SLinus Torvalds  */
9831da177e4SLinus Torvalds static struct page *
9841da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9851da177e4SLinus Torvalds 		pgoff_t index, int size)
9861da177e4SLinus Torvalds {
9871da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9881da177e4SLinus Torvalds 	struct page *page;
9891da177e4SLinus Torvalds 	struct buffer_head *bh;
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
9921da177e4SLinus Torvalds 	if (!page)
9931da177e4SLinus Torvalds 		return NULL;
9941da177e4SLinus Torvalds 
995e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
9961da177e4SLinus Torvalds 
9971da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
9981da177e4SLinus Torvalds 		bh = page_buffers(page);
9991da177e4SLinus Torvalds 		if (bh->b_size == size) {
10001da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
10011da177e4SLinus Torvalds 			return page;
10021da177e4SLinus Torvalds 		}
10031da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10041da177e4SLinus Torvalds 			goto failed;
10051da177e4SLinus Torvalds 	}
10061da177e4SLinus Torvalds 
10071da177e4SLinus Torvalds 	/*
10081da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10091da177e4SLinus Torvalds 	 */
10101da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10111da177e4SLinus Torvalds 	if (!bh)
10121da177e4SLinus Torvalds 		goto failed;
10131da177e4SLinus Torvalds 
10141da177e4SLinus Torvalds 	/*
10151da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10161da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10171da177e4SLinus Torvalds 	 * run under the page lock.
10181da177e4SLinus Torvalds 	 */
10191da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10201da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10211da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10221da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10231da177e4SLinus Torvalds 	return page;
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds failed:
10261da177e4SLinus Torvalds 	BUG();
10271da177e4SLinus Torvalds 	unlock_page(page);
10281da177e4SLinus Torvalds 	page_cache_release(page);
10291da177e4SLinus Torvalds 	return NULL;
10301da177e4SLinus Torvalds }
10311da177e4SLinus Torvalds 
10321da177e4SLinus Torvalds /*
10331da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10341da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10351da177e4SLinus Torvalds  *
10361da177e4SLinus Torvalds  * Except that's a bug.  Attaching dirty buffers to a dirty
10371da177e4SLinus Torvalds  * blockdev's page can result in filesystem corruption, because
10381da177e4SLinus Torvalds  * some of those buffers may be aliases of filesystem data.
10391da177e4SLinus Torvalds  * grow_dev_page() will go BUG() if this happens.
10401da177e4SLinus Torvalds  */
1041858119e1SArjan van de Ven static int
10421da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10431da177e4SLinus Torvalds {
10441da177e4SLinus Torvalds 	struct page *page;
10451da177e4SLinus Torvalds 	pgoff_t index;
10461da177e4SLinus Torvalds 	int sizebits;
10471da177e4SLinus Torvalds 
10481da177e4SLinus Torvalds 	sizebits = -1;
10491da177e4SLinus Torvalds 	do {
10501da177e4SLinus Torvalds 		sizebits++;
10511da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10521da177e4SLinus Torvalds 
10531da177e4SLinus Torvalds 	index = block >> sizebits;
10541da177e4SLinus Torvalds 
1055e5657933SAndrew Morton 	/*
1056e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1057e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1058e5657933SAndrew Morton 	 */
1059e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1060e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1061e5657933SAndrew Morton 
1062e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1063e5657933SAndrew Morton 			"device %s\n",
1064e5657933SAndrew Morton 			__FUNCTION__, (unsigned long long)block,
1065e5657933SAndrew Morton 			bdevname(bdev, b));
1066e5657933SAndrew Morton 		return -EIO;
1067e5657933SAndrew Morton 	}
1068e5657933SAndrew Morton 	block = index << sizebits;
10691da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10701da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10711da177e4SLinus Torvalds 	if (!page)
10721da177e4SLinus Torvalds 		return 0;
10731da177e4SLinus Torvalds 	unlock_page(page);
10741da177e4SLinus Torvalds 	page_cache_release(page);
10751da177e4SLinus Torvalds 	return 1;
10761da177e4SLinus Torvalds }
10771da177e4SLinus Torvalds 
107875c96f85SAdrian Bunk static struct buffer_head *
10791da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
10801da177e4SLinus Torvalds {
10811da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
10821da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
10831da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10841da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10851da177e4SLinus Torvalds 					size);
10861da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
10871da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
10881da177e4SLinus Torvalds 
10891da177e4SLinus Torvalds 		dump_stack();
10901da177e4SLinus Torvalds 		return NULL;
10911da177e4SLinus Torvalds 	}
10921da177e4SLinus Torvalds 
10931da177e4SLinus Torvalds 	for (;;) {
10941da177e4SLinus Torvalds 		struct buffer_head * bh;
1095e5657933SAndrew Morton 		int ret;
10961da177e4SLinus Torvalds 
10971da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
10981da177e4SLinus Torvalds 		if (bh)
10991da177e4SLinus Torvalds 			return bh;
11001da177e4SLinus Torvalds 
1101e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1102e5657933SAndrew Morton 		if (ret < 0)
1103e5657933SAndrew Morton 			return NULL;
1104e5657933SAndrew Morton 		if (ret == 0)
11051da177e4SLinus Torvalds 			free_more_memory();
11061da177e4SLinus Torvalds 	}
11071da177e4SLinus Torvalds }
11081da177e4SLinus Torvalds 
11091da177e4SLinus Torvalds /*
11101da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11111da177e4SLinus Torvalds  *
11121da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11131da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11141da177e4SLinus Torvalds  *
11151da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11161da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11171da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11181da177e4SLinus Torvalds  *
11191da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11201da177e4SLinus Torvalds  * (if the page has buffers).
11211da177e4SLinus Torvalds  *
11221da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11231da177e4SLinus Torvalds  * buffers are not.
11241da177e4SLinus Torvalds  *
11251da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11261da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11271da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11281da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11291da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11301da177e4SLinus Torvalds  */
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds /**
11331da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
113467be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11351da177e4SLinus Torvalds  *
11361da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11371da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11381da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11391da177e4SLinus Torvalds  * inode list.
11401da177e4SLinus Torvalds  *
11411da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11421da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11431da177e4SLinus Torvalds  */
11441da177e4SLinus Torvalds void fastcall mark_buffer_dirty(struct buffer_head *bh)
11451da177e4SLinus Torvalds {
11461da177e4SLinus Torvalds 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
11471da177e4SLinus Torvalds 		__set_page_dirty_nobuffers(bh->b_page);
11481da177e4SLinus Torvalds }
11491da177e4SLinus Torvalds 
11501da177e4SLinus Torvalds /*
11511da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11521da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11531da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11541da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11551da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11561da177e4SLinus Torvalds  */
11571da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11581da177e4SLinus Torvalds {
11591da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11601da177e4SLinus Torvalds 		put_bh(buf);
11611da177e4SLinus Torvalds 		return;
11621da177e4SLinus Torvalds 	}
11631da177e4SLinus Torvalds 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11641da177e4SLinus Torvalds 	WARN_ON(1);
11651da177e4SLinus Torvalds }
11661da177e4SLinus Torvalds 
11671da177e4SLinus Torvalds /*
11681da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11691da177e4SLinus Torvalds  * potentially dirty data.
11701da177e4SLinus Torvalds  */
11711da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11721da177e4SLinus Torvalds {
11731da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
11741da177e4SLinus Torvalds 	if (!list_empty(&bh->b_assoc_buffers)) {
11751da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11761da177e4SLinus Torvalds 
11771da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
11781da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
117958ff407bSJan Kara 		bh->b_assoc_map = NULL;
11801da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
11811da177e4SLinus Torvalds 	}
11821da177e4SLinus Torvalds 	__brelse(bh);
11831da177e4SLinus Torvalds }
11841da177e4SLinus Torvalds 
11851da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
11861da177e4SLinus Torvalds {
11871da177e4SLinus Torvalds 	lock_buffer(bh);
11881da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
11891da177e4SLinus Torvalds 		unlock_buffer(bh);
11901da177e4SLinus Torvalds 		return bh;
11911da177e4SLinus Torvalds 	} else {
11921da177e4SLinus Torvalds 		get_bh(bh);
11931da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
11941da177e4SLinus Torvalds 		submit_bh(READ, bh);
11951da177e4SLinus Torvalds 		wait_on_buffer(bh);
11961da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
11971da177e4SLinus Torvalds 			return bh;
11981da177e4SLinus Torvalds 	}
11991da177e4SLinus Torvalds 	brelse(bh);
12001da177e4SLinus Torvalds 	return NULL;
12011da177e4SLinus Torvalds }
12021da177e4SLinus Torvalds 
12031da177e4SLinus Torvalds /*
12041da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12051da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12061da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12071da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12081da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12091da177e4SLinus Torvalds  *
12101da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12111da177e4SLinus Torvalds  * sb_find_get_block().
12121da177e4SLinus Torvalds  *
12131da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12141da177e4SLinus Torvalds  * a local interrupt disable for that.
12151da177e4SLinus Torvalds  */
12161da177e4SLinus Torvalds 
12171da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds struct bh_lru {
12201da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12211da177e4SLinus Torvalds };
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12241da177e4SLinus Torvalds 
12251da177e4SLinus Torvalds #ifdef CONFIG_SMP
12261da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12271da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12281da177e4SLinus Torvalds #else
12291da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12301da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12311da177e4SLinus Torvalds #endif
12321da177e4SLinus Torvalds 
12331da177e4SLinus Torvalds static inline void check_irqs_on(void)
12341da177e4SLinus Torvalds {
12351da177e4SLinus Torvalds #ifdef irqs_disabled
12361da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12371da177e4SLinus Torvalds #endif
12381da177e4SLinus Torvalds }
12391da177e4SLinus Torvalds 
12401da177e4SLinus Torvalds /*
12411da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12421da177e4SLinus Torvalds  */
12431da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12441da177e4SLinus Torvalds {
12451da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12461da177e4SLinus Torvalds 	struct bh_lru *lru;
12471da177e4SLinus Torvalds 
12481da177e4SLinus Torvalds 	check_irqs_on();
12491da177e4SLinus Torvalds 	bh_lru_lock();
12501da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12511da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12521da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12531da177e4SLinus Torvalds 		int in;
12541da177e4SLinus Torvalds 		int out = 0;
12551da177e4SLinus Torvalds 
12561da177e4SLinus Torvalds 		get_bh(bh);
12571da177e4SLinus Torvalds 		bhs[out++] = bh;
12581da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12591da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12601da177e4SLinus Torvalds 
12611da177e4SLinus Torvalds 			if (bh2 == bh) {
12621da177e4SLinus Torvalds 				__brelse(bh2);
12631da177e4SLinus Torvalds 			} else {
12641da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12651da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12661da177e4SLinus Torvalds 					evictee = bh2;
12671da177e4SLinus Torvalds 				} else {
12681da177e4SLinus Torvalds 					bhs[out++] = bh2;
12691da177e4SLinus Torvalds 				}
12701da177e4SLinus Torvalds 			}
12711da177e4SLinus Torvalds 		}
12721da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12731da177e4SLinus Torvalds 			bhs[out++] = NULL;
12741da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
12751da177e4SLinus Torvalds 	}
12761da177e4SLinus Torvalds 	bh_lru_unlock();
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds 	if (evictee)
12791da177e4SLinus Torvalds 		__brelse(evictee);
12801da177e4SLinus Torvalds }
12811da177e4SLinus Torvalds 
12821da177e4SLinus Torvalds /*
12831da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
12841da177e4SLinus Torvalds  */
1285858119e1SArjan van de Ven static struct buffer_head *
1286*3991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
12871da177e4SLinus Torvalds {
12881da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
12891da177e4SLinus Torvalds 	struct bh_lru *lru;
1290*3991d3bdSTomasz Kvarsin 	unsigned int i;
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 	check_irqs_on();
12931da177e4SLinus Torvalds 	bh_lru_lock();
12941da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12951da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
12961da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
12971da177e4SLinus Torvalds 
12981da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
12991da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13001da177e4SLinus Torvalds 			if (i) {
13011da177e4SLinus Torvalds 				while (i) {
13021da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
13031da177e4SLinus Torvalds 					i--;
13041da177e4SLinus Torvalds 				}
13051da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13061da177e4SLinus Torvalds 			}
13071da177e4SLinus Torvalds 			get_bh(bh);
13081da177e4SLinus Torvalds 			ret = bh;
13091da177e4SLinus Torvalds 			break;
13101da177e4SLinus Torvalds 		}
13111da177e4SLinus Torvalds 	}
13121da177e4SLinus Torvalds 	bh_lru_unlock();
13131da177e4SLinus Torvalds 	return ret;
13141da177e4SLinus Torvalds }
13151da177e4SLinus Torvalds 
13161da177e4SLinus Torvalds /*
13171da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13181da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13191da177e4SLinus Torvalds  * NULL
13201da177e4SLinus Torvalds  */
13211da177e4SLinus Torvalds struct buffer_head *
1322*3991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13231da177e4SLinus Torvalds {
13241da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13251da177e4SLinus Torvalds 
13261da177e4SLinus Torvalds 	if (bh == NULL) {
1327385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13281da177e4SLinus Torvalds 		if (bh)
13291da177e4SLinus Torvalds 			bh_lru_install(bh);
13301da177e4SLinus Torvalds 	}
13311da177e4SLinus Torvalds 	if (bh)
13321da177e4SLinus Torvalds 		touch_buffer(bh);
13331da177e4SLinus Torvalds 	return bh;
13341da177e4SLinus Torvalds }
13351da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13361da177e4SLinus Torvalds 
13371da177e4SLinus Torvalds /*
13381da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13391da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13401da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13411da177e4SLinus Torvalds  *
13421da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13431da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13441da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13451da177e4SLinus Torvalds  *
13461da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13471da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13481da177e4SLinus Torvalds  */
13491da177e4SLinus Torvalds struct buffer_head *
1350*3991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13511da177e4SLinus Torvalds {
13521da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds 	might_sleep();
13551da177e4SLinus Torvalds 	if (bh == NULL)
13561da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13571da177e4SLinus Torvalds 	return bh;
13581da177e4SLinus Torvalds }
13591da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13601da177e4SLinus Torvalds 
13611da177e4SLinus Torvalds /*
13621da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13631da177e4SLinus Torvalds  */
1364*3991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13651da177e4SLinus Torvalds {
13661da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1367a3e713b5SAndrew Morton 	if (likely(bh)) {
13681da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13691da177e4SLinus Torvalds 		brelse(bh);
13701da177e4SLinus Torvalds 	}
1371a3e713b5SAndrew Morton }
13721da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13731da177e4SLinus Torvalds 
13741da177e4SLinus Torvalds /**
13751da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
137667be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13771da177e4SLinus Torvalds  *  @block: number of block
13781da177e4SLinus Torvalds  *  @size: size (in bytes) to read
13791da177e4SLinus Torvalds  *
13801da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
13811da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
13821da177e4SLinus Torvalds  */
13831da177e4SLinus Torvalds struct buffer_head *
1384*3991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
13851da177e4SLinus Torvalds {
13861da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
13871da177e4SLinus Torvalds 
1388a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
13891da177e4SLinus Torvalds 		bh = __bread_slow(bh);
13901da177e4SLinus Torvalds 	return bh;
13911da177e4SLinus Torvalds }
13921da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
13931da177e4SLinus Torvalds 
13941da177e4SLinus Torvalds /*
13951da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
13961da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
13971da177e4SLinus Torvalds  * or with preempt disabled.
13981da177e4SLinus Torvalds  */
13991da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14001da177e4SLinus Torvalds {
14011da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14021da177e4SLinus Torvalds 	int i;
14031da177e4SLinus Torvalds 
14041da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14051da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14061da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14071da177e4SLinus Torvalds 	}
14081da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14091da177e4SLinus Torvalds }
14101da177e4SLinus Torvalds 
14111da177e4SLinus Torvalds static void invalidate_bh_lrus(void)
14121da177e4SLinus Torvalds {
14131da177e4SLinus Torvalds 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
14141da177e4SLinus Torvalds }
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14171da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14181da177e4SLinus Torvalds {
14191da177e4SLinus Torvalds 	bh->b_page = page;
1420e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14211da177e4SLinus Torvalds 	if (PageHighMem(page))
14221da177e4SLinus Torvalds 		/*
14231da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14241da177e4SLinus Torvalds 		 */
14251da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14261da177e4SLinus Torvalds 	else
14271da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14281da177e4SLinus Torvalds }
14291da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14301da177e4SLinus Torvalds 
14311da177e4SLinus Torvalds /*
14321da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14331da177e4SLinus Torvalds  */
1434858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14351da177e4SLinus Torvalds {
14361da177e4SLinus Torvalds 	lock_buffer(bh);
14371da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14381da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14391da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14401da177e4SLinus Torvalds 	clear_buffer_req(bh);
14411da177e4SLinus Torvalds 	clear_buffer_new(bh);
14421da177e4SLinus Torvalds 	clear_buffer_delay(bh);
144333a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14441da177e4SLinus Torvalds 	unlock_buffer(bh);
14451da177e4SLinus Torvalds }
14461da177e4SLinus Torvalds 
14471da177e4SLinus Torvalds /**
14481da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14491da177e4SLinus Torvalds  *
14501da177e4SLinus Torvalds  * @page: the page which is affected
14511da177e4SLinus Torvalds  * @offset: the index of the truncation point
14521da177e4SLinus Torvalds  *
14531da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14541da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14551da177e4SLinus Torvalds  *
14561da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14571da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14581da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14591da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14601da177e4SLinus Torvalds  * blocks on-disk.
14611da177e4SLinus Torvalds  */
14622ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14631da177e4SLinus Torvalds {
14641da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14651da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14661da177e4SLinus Torvalds 
14671da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14681da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14691da177e4SLinus Torvalds 		goto out;
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds 	head = page_buffers(page);
14721da177e4SLinus Torvalds 	bh = head;
14731da177e4SLinus Torvalds 	do {
14741da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
14751da177e4SLinus Torvalds 		next = bh->b_this_page;
14761da177e4SLinus Torvalds 
14771da177e4SLinus Torvalds 		/*
14781da177e4SLinus Torvalds 		 * is this block fully invalidated?
14791da177e4SLinus Torvalds 		 */
14801da177e4SLinus Torvalds 		if (offset <= curr_off)
14811da177e4SLinus Torvalds 			discard_buffer(bh);
14821da177e4SLinus Torvalds 		curr_off = next_off;
14831da177e4SLinus Torvalds 		bh = next;
14841da177e4SLinus Torvalds 	} while (bh != head);
14851da177e4SLinus Torvalds 
14861da177e4SLinus Torvalds 	/*
14871da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
14881da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
14891da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
14901da177e4SLinus Torvalds 	 */
14911da177e4SLinus Torvalds 	if (offset == 0)
14922ff28e22SNeilBrown 		try_to_release_page(page, 0);
14931da177e4SLinus Torvalds out:
14942ff28e22SNeilBrown 	return;
14951da177e4SLinus Torvalds }
14961da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
14971da177e4SLinus Torvalds 
14981da177e4SLinus Torvalds /*
14991da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15001da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15011da177e4SLinus Torvalds  * is already excluded via the page lock.
15021da177e4SLinus Torvalds  */
15031da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15041da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15051da177e4SLinus Torvalds {
15061da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15071da177e4SLinus Torvalds 
15081da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15091da177e4SLinus Torvalds 	bh = head;
15101da177e4SLinus Torvalds 	do {
15111da177e4SLinus Torvalds 		bh->b_state |= b_state;
15121da177e4SLinus Torvalds 		tail = bh;
15131da177e4SLinus Torvalds 		bh = bh->b_this_page;
15141da177e4SLinus Torvalds 	} while (bh);
15151da177e4SLinus Torvalds 	tail->b_this_page = head;
15161da177e4SLinus Torvalds 
15171da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15181da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15191da177e4SLinus Torvalds 		bh = head;
15201da177e4SLinus Torvalds 		do {
15211da177e4SLinus Torvalds 			if (PageDirty(page))
15221da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15231da177e4SLinus Torvalds 			if (PageUptodate(page))
15241da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15251da177e4SLinus Torvalds 			bh = bh->b_this_page;
15261da177e4SLinus Torvalds 		} while (bh != head);
15271da177e4SLinus Torvalds 	}
15281da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15291da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15301da177e4SLinus Torvalds }
15311da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15321da177e4SLinus Torvalds 
15331da177e4SLinus Torvalds /*
15341da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15351da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15361da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15371da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15381da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15391da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15401da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15411da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15421da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15431da177e4SLinus Torvalds  *
15441da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15451da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15461da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15471da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15481da177e4SLinus Torvalds  */
15491da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15501da177e4SLinus Torvalds {
15511da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds 	might_sleep();
15541da177e4SLinus Torvalds 
1555385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15561da177e4SLinus Torvalds 	if (old_bh) {
15571da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15581da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15591da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15601da177e4SLinus Torvalds 		__brelse(old_bh);
15611da177e4SLinus Torvalds 	}
15621da177e4SLinus Torvalds }
15631da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds /*
15661da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15671da177e4SLinus Torvalds  *
15681da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15691da177e4SLinus Torvalds  *
15701da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15711da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15721da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15731da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15741da177e4SLinus Torvalds  *
15751da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
15761da177e4SLinus Torvalds  */
15771da177e4SLinus Torvalds 
15781da177e4SLinus Torvalds /*
15791da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
15801da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
15811da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
15821da177e4SLinus Torvalds  * state inside lock_buffer().
15831da177e4SLinus Torvalds  *
15841da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
15851da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
15861da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
15871da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
15881da177e4SLinus Torvalds  * prevents this contention from occurring.
15891da177e4SLinus Torvalds  */
15901da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
15911da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
15921da177e4SLinus Torvalds {
15931da177e4SLinus Torvalds 	int err;
15941da177e4SLinus Torvalds 	sector_t block;
15951da177e4SLinus Torvalds 	sector_t last_block;
1596f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1597b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
15981da177e4SLinus Torvalds 	int nr_underway = 0;
15991da177e4SLinus Torvalds 
16001da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16011da177e4SLinus Torvalds 
16021da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16031da177e4SLinus Torvalds 
16041da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1605b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16061da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16071da177e4SLinus Torvalds 	}
16081da177e4SLinus Torvalds 
16091da177e4SLinus Torvalds 	/*
16101da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16111da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16121da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16131da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16141da177e4SLinus Torvalds 	 *
16151da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16161da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16171da177e4SLinus Torvalds 	 */
16181da177e4SLinus Torvalds 
161954b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16201da177e4SLinus Torvalds 	head = page_buffers(page);
16211da177e4SLinus Torvalds 	bh = head;
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	/*
16241da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16251da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16261da177e4SLinus Torvalds 	 */
16271da177e4SLinus Torvalds 	do {
16281da177e4SLinus Torvalds 		if (block > last_block) {
16291da177e4SLinus Torvalds 			/*
16301da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16311da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16321da177e4SLinus Torvalds 			 * truncate in progress.
16331da177e4SLinus Torvalds 			 */
16341da177e4SLinus Torvalds 			/*
16351da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16361da177e4SLinus Torvalds 			 */
16371da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16381da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
16391da177e4SLinus Torvalds 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1640b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16411da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16421da177e4SLinus Torvalds 			if (err)
16431da177e4SLinus Torvalds 				goto recover;
16441da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16451da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16461da177e4SLinus Torvalds 				clear_buffer_new(bh);
16471da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16481da177e4SLinus Torvalds 							bh->b_blocknr);
16491da177e4SLinus Torvalds 			}
16501da177e4SLinus Torvalds 		}
16511da177e4SLinus Torvalds 		bh = bh->b_this_page;
16521da177e4SLinus Torvalds 		block++;
16531da177e4SLinus Torvalds 	} while (bh != head);
16541da177e4SLinus Torvalds 
16551da177e4SLinus Torvalds 	do {
16561da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16571da177e4SLinus Torvalds 			continue;
16581da177e4SLinus Torvalds 		/*
16591da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16601da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16611da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
16621da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
16631da177e4SLinus Torvalds 		 * throttling.
16641da177e4SLinus Torvalds 		 */
16651da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
16661da177e4SLinus Torvalds 			lock_buffer(bh);
16671da177e4SLinus Torvalds 		} else if (test_set_buffer_locked(bh)) {
16681da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
16691da177e4SLinus Torvalds 			continue;
16701da177e4SLinus Torvalds 		}
16711da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
16721da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
16731da177e4SLinus Torvalds 		} else {
16741da177e4SLinus Torvalds 			unlock_buffer(bh);
16751da177e4SLinus Torvalds 		}
16761da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
16771da177e4SLinus Torvalds 
16781da177e4SLinus Torvalds 	/*
16791da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
16801da177e4SLinus Torvalds 	 * drop the bh refcounts early.
16811da177e4SLinus Torvalds 	 */
16821da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
16831da177e4SLinus Torvalds 	set_page_writeback(page);
16841da177e4SLinus Torvalds 
16851da177e4SLinus Torvalds 	do {
16861da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
16871da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
16881da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
16891da177e4SLinus Torvalds 			nr_underway++;
1690ad576e63SNick Piggin 		}
16911da177e4SLinus Torvalds 		bh = next;
16921da177e4SLinus Torvalds 	} while (bh != head);
169305937baaSAndrew Morton 	unlock_page(page);
16941da177e4SLinus Torvalds 
16951da177e4SLinus Torvalds 	err = 0;
16961da177e4SLinus Torvalds done:
16971da177e4SLinus Torvalds 	if (nr_underway == 0) {
16981da177e4SLinus Torvalds 		/*
16991da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17001da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17011da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17021da177e4SLinus Torvalds 		 */
17031da177e4SLinus Torvalds 		int uptodate = 1;
17041da177e4SLinus Torvalds 		do {
17051da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
17061da177e4SLinus Torvalds 				uptodate = 0;
17071da177e4SLinus Torvalds 				break;
17081da177e4SLinus Torvalds 			}
17091da177e4SLinus Torvalds 			bh = bh->b_this_page;
17101da177e4SLinus Torvalds 		} while (bh != head);
17111da177e4SLinus Torvalds 		if (uptodate)
17121da177e4SLinus Torvalds 			SetPageUptodate(page);
17131da177e4SLinus Torvalds 		end_page_writeback(page);
17141da177e4SLinus Torvalds 		/*
17151da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17161da177e4SLinus Torvalds 		 * here on.
17171da177e4SLinus Torvalds 		 */
17181da177e4SLinus Torvalds 		wbc->pages_skipped++;	/* We didn't write this page */
17191da177e4SLinus Torvalds 	}
17201da177e4SLinus Torvalds 	return err;
17211da177e4SLinus Torvalds 
17221da177e4SLinus Torvalds recover:
17231da177e4SLinus Torvalds 	/*
17241da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17251da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17261da177e4SLinus Torvalds 	 * exposing stale data.
17271da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17281da177e4SLinus Torvalds 	 */
17291da177e4SLinus Torvalds 	bh = head;
17301da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17311da177e4SLinus Torvalds 	do {
17321da177e4SLinus Torvalds 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
17331da177e4SLinus Torvalds 			lock_buffer(bh);
17341da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
17351da177e4SLinus Torvalds 		} else {
17361da177e4SLinus Torvalds 			/*
17371da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17381da177e4SLinus Torvalds 			 * attachment to a dirty page.
17391da177e4SLinus Torvalds 			 */
17401da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17411da177e4SLinus Torvalds 		}
17421da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17431da177e4SLinus Torvalds 	SetPageError(page);
17441da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17451da177e4SLinus Torvalds 	set_page_writeback(page);
17461da177e4SLinus Torvalds 	unlock_page(page);
17471da177e4SLinus Torvalds 	do {
17481da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17491da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17501da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17511da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17521da177e4SLinus Torvalds 			nr_underway++;
1753ad576e63SNick Piggin 		}
17541da177e4SLinus Torvalds 		bh = next;
17551da177e4SLinus Torvalds 	} while (bh != head);
17561da177e4SLinus Torvalds 	goto done;
17571da177e4SLinus Torvalds }
17581da177e4SLinus Torvalds 
17591da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
17601da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
17611da177e4SLinus Torvalds {
17621da177e4SLinus Torvalds 	unsigned block_start, block_end;
17631da177e4SLinus Torvalds 	sector_t block;
17641da177e4SLinus Torvalds 	int err = 0;
17651da177e4SLinus Torvalds 	unsigned blocksize, bbits;
17661da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
17671da177e4SLinus Torvalds 
17681da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
17691da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
17701da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
17711da177e4SLinus Torvalds 	BUG_ON(from > to);
17721da177e4SLinus Torvalds 
17731da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
17741da177e4SLinus Torvalds 	if (!page_has_buffers(page))
17751da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
17761da177e4SLinus Torvalds 	head = page_buffers(page);
17771da177e4SLinus Torvalds 
17781da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
17791da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
17801da177e4SLinus Torvalds 
17811da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
17821da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
17831da177e4SLinus Torvalds 		block_end = block_start + blocksize;
17841da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
17851da177e4SLinus Torvalds 			if (PageUptodate(page)) {
17861da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
17871da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
17881da177e4SLinus Torvalds 			}
17891da177e4SLinus Torvalds 			continue;
17901da177e4SLinus Torvalds 		}
17911da177e4SLinus Torvalds 		if (buffer_new(bh))
17921da177e4SLinus Torvalds 			clear_buffer_new(bh);
17931da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1794b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
17951da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17961da177e4SLinus Torvalds 			if (err)
1797f3ddbdc6SNick Piggin 				break;
17981da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17991da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18001da177e4SLinus Torvalds 							bh->b_blocknr);
18011da177e4SLinus Torvalds 				if (PageUptodate(page)) {
18021da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18031da177e4SLinus Torvalds 					continue;
18041da177e4SLinus Torvalds 				}
18051da177e4SLinus Torvalds 				if (block_end > to || block_start < from) {
18061da177e4SLinus Torvalds 					void *kaddr;
18071da177e4SLinus Torvalds 
18081da177e4SLinus Torvalds 					kaddr = kmap_atomic(page, KM_USER0);
18091da177e4SLinus Torvalds 					if (block_end > to)
18101da177e4SLinus Torvalds 						memset(kaddr+to, 0,
18111da177e4SLinus Torvalds 							block_end-to);
18121da177e4SLinus Torvalds 					if (block_start < from)
18131da177e4SLinus Torvalds 						memset(kaddr+block_start,
18141da177e4SLinus Torvalds 							0, from-block_start);
18151da177e4SLinus Torvalds 					flush_dcache_page(page);
18161da177e4SLinus Torvalds 					kunmap_atomic(kaddr, KM_USER0);
18171da177e4SLinus Torvalds 				}
18181da177e4SLinus Torvalds 				continue;
18191da177e4SLinus Torvalds 			}
18201da177e4SLinus Torvalds 		}
18211da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18221da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18231da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18241da177e4SLinus Torvalds 			continue;
18251da177e4SLinus Torvalds 		}
18261da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
182733a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18281da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18291da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18301da177e4SLinus Torvalds 			*wait_bh++=bh;
18311da177e4SLinus Torvalds 		}
18321da177e4SLinus Torvalds 	}
18331da177e4SLinus Torvalds 	/*
18341da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
18351da177e4SLinus Torvalds 	 */
18361da177e4SLinus Torvalds 	while(wait_bh > wait) {
18371da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
18381da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1839f3ddbdc6SNick Piggin 			err = -EIO;
18401da177e4SLinus Torvalds 	}
1841152becd2SAnton Altaparmakov 	if (!err) {
1842152becd2SAnton Altaparmakov 		bh = head;
1843152becd2SAnton Altaparmakov 		do {
1844152becd2SAnton Altaparmakov 			if (buffer_new(bh))
1845152becd2SAnton Altaparmakov 				clear_buffer_new(bh);
1846152becd2SAnton Altaparmakov 		} while ((bh = bh->b_this_page) != head);
1847152becd2SAnton Altaparmakov 		return 0;
1848152becd2SAnton Altaparmakov 	}
1849f3ddbdc6SNick Piggin 	/* Error case: */
18501da177e4SLinus Torvalds 	/*
18511da177e4SLinus Torvalds 	 * Zero out any newly allocated blocks to avoid exposing stale
18521da177e4SLinus Torvalds 	 * data.  If BH_New is set, we know that the block was newly
18531da177e4SLinus Torvalds 	 * allocated in the above loop.
18541da177e4SLinus Torvalds 	 */
18551da177e4SLinus Torvalds 	bh = head;
18561da177e4SLinus Torvalds 	block_start = 0;
18571da177e4SLinus Torvalds 	do {
18581da177e4SLinus Torvalds 		block_end = block_start+blocksize;
18591da177e4SLinus Torvalds 		if (block_end <= from)
18601da177e4SLinus Torvalds 			goto next_bh;
18611da177e4SLinus Torvalds 		if (block_start >= to)
18621da177e4SLinus Torvalds 			break;
18631da177e4SLinus Torvalds 		if (buffer_new(bh)) {
18641da177e4SLinus Torvalds 			void *kaddr;
18651da177e4SLinus Torvalds 
18661da177e4SLinus Torvalds 			clear_buffer_new(bh);
18671da177e4SLinus Torvalds 			kaddr = kmap_atomic(page, KM_USER0);
18681da177e4SLinus Torvalds 			memset(kaddr+block_start, 0, bh->b_size);
18698c581651SMonakhov Dmitriy 			flush_dcache_page(page);
18701da177e4SLinus Torvalds 			kunmap_atomic(kaddr, KM_USER0);
18711da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
18721da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
18731da177e4SLinus Torvalds 		}
18741da177e4SLinus Torvalds next_bh:
18751da177e4SLinus Torvalds 		block_start = block_end;
18761da177e4SLinus Torvalds 		bh = bh->b_this_page;
18771da177e4SLinus Torvalds 	} while (bh != head);
18781da177e4SLinus Torvalds 	return err;
18791da177e4SLinus Torvalds }
18801da177e4SLinus Torvalds 
18811da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
18821da177e4SLinus Torvalds 		unsigned from, unsigned to)
18831da177e4SLinus Torvalds {
18841da177e4SLinus Torvalds 	unsigned block_start, block_end;
18851da177e4SLinus Torvalds 	int partial = 0;
18861da177e4SLinus Torvalds 	unsigned blocksize;
18871da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
18881da177e4SLinus Torvalds 
18891da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18901da177e4SLinus Torvalds 
18911da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
18921da177e4SLinus Torvalds 	    bh != head || !block_start;
18931da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
18941da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18951da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18961da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18971da177e4SLinus Torvalds 				partial = 1;
18981da177e4SLinus Torvalds 		} else {
18991da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19001da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19011da177e4SLinus Torvalds 		}
19021da177e4SLinus Torvalds 	}
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds 	/*
19051da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19061da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19071da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19081da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19091da177e4SLinus Torvalds 	 */
19101da177e4SLinus Torvalds 	if (!partial)
19111da177e4SLinus Torvalds 		SetPageUptodate(page);
19121da177e4SLinus Torvalds 	return 0;
19131da177e4SLinus Torvalds }
19141da177e4SLinus Torvalds 
19151da177e4SLinus Torvalds /*
19161da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
19171da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
19181da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
19191da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
19201da177e4SLinus Torvalds  * page struct once IO has completed.
19211da177e4SLinus Torvalds  */
19221da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
19231da177e4SLinus Torvalds {
19241da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
19251da177e4SLinus Torvalds 	sector_t iblock, lblock;
19261da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
19271da177e4SLinus Torvalds 	unsigned int blocksize;
19281da177e4SLinus Torvalds 	int nr, i;
19291da177e4SLinus Torvalds 	int fully_mapped = 1;
19301da177e4SLinus Torvalds 
1931cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
19321da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19331da177e4SLinus Torvalds 	if (!page_has_buffers(page))
19341da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
19351da177e4SLinus Torvalds 	head = page_buffers(page);
19361da177e4SLinus Torvalds 
19371da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
19381da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
19391da177e4SLinus Torvalds 	bh = head;
19401da177e4SLinus Torvalds 	nr = 0;
19411da177e4SLinus Torvalds 	i = 0;
19421da177e4SLinus Torvalds 
19431da177e4SLinus Torvalds 	do {
19441da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
19451da177e4SLinus Torvalds 			continue;
19461da177e4SLinus Torvalds 
19471da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1948c64610baSAndrew Morton 			int err = 0;
1949c64610baSAndrew Morton 
19501da177e4SLinus Torvalds 			fully_mapped = 0;
19511da177e4SLinus Torvalds 			if (iblock < lblock) {
1952b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
1953c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
1954c64610baSAndrew Morton 				if (err)
19551da177e4SLinus Torvalds 					SetPageError(page);
19561da177e4SLinus Torvalds 			}
19571da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
19581da177e4SLinus Torvalds 				void *kaddr = kmap_atomic(page, KM_USER0);
19591da177e4SLinus Torvalds 				memset(kaddr + i * blocksize, 0, blocksize);
19601da177e4SLinus Torvalds 				flush_dcache_page(page);
19611da177e4SLinus Torvalds 				kunmap_atomic(kaddr, KM_USER0);
1962c64610baSAndrew Morton 				if (!err)
19631da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19641da177e4SLinus Torvalds 				continue;
19651da177e4SLinus Torvalds 			}
19661da177e4SLinus Torvalds 			/*
19671da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
19681da177e4SLinus Torvalds 			 * synchronously
19691da177e4SLinus Torvalds 			 */
19701da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
19711da177e4SLinus Torvalds 				continue;
19721da177e4SLinus Torvalds 		}
19731da177e4SLinus Torvalds 		arr[nr++] = bh;
19741da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
19751da177e4SLinus Torvalds 
19761da177e4SLinus Torvalds 	if (fully_mapped)
19771da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
19781da177e4SLinus Torvalds 
19791da177e4SLinus Torvalds 	if (!nr) {
19801da177e4SLinus Torvalds 		/*
19811da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
19821da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
19831da177e4SLinus Torvalds 		 */
19841da177e4SLinus Torvalds 		if (!PageError(page))
19851da177e4SLinus Torvalds 			SetPageUptodate(page);
19861da177e4SLinus Torvalds 		unlock_page(page);
19871da177e4SLinus Torvalds 		return 0;
19881da177e4SLinus Torvalds 	}
19891da177e4SLinus Torvalds 
19901da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
19911da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
19921da177e4SLinus Torvalds 		bh = arr[i];
19931da177e4SLinus Torvalds 		lock_buffer(bh);
19941da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
19951da177e4SLinus Torvalds 	}
19961da177e4SLinus Torvalds 
19971da177e4SLinus Torvalds 	/*
19981da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
19991da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
20001da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
20011da177e4SLinus Torvalds 	 */
20021da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
20031da177e4SLinus Torvalds 		bh = arr[i];
20041da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
20051da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
20061da177e4SLinus Torvalds 		else
20071da177e4SLinus Torvalds 			submit_bh(READ, bh);
20081da177e4SLinus Torvalds 	}
20091da177e4SLinus Torvalds 	return 0;
20101da177e4SLinus Torvalds }
20111da177e4SLinus Torvalds 
20121da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
20131da177e4SLinus Torvalds  * truncates.  Uses prepare/commit_write to allow the filesystem to
20141da177e4SLinus Torvalds  * deal with the hole.
20151da177e4SLinus Torvalds  */
201605eb0b51SOGAWA Hirofumi static int __generic_cont_expand(struct inode *inode, loff_t size,
201705eb0b51SOGAWA Hirofumi 				 pgoff_t index, unsigned int offset)
20181da177e4SLinus Torvalds {
20191da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
20201da177e4SLinus Torvalds 	struct page *page;
202105eb0b51SOGAWA Hirofumi 	unsigned long limit;
20221da177e4SLinus Torvalds 	int err;
20231da177e4SLinus Torvalds 
20241da177e4SLinus Torvalds 	err = -EFBIG;
20251da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
20261da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
20271da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
20281da177e4SLinus Torvalds 		goto out;
20291da177e4SLinus Torvalds 	}
20301da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
20311da177e4SLinus Torvalds 		goto out;
20321da177e4SLinus Torvalds 
203305eb0b51SOGAWA Hirofumi 	err = -ENOMEM;
203405eb0b51SOGAWA Hirofumi 	page = grab_cache_page(mapping, index);
203505eb0b51SOGAWA Hirofumi 	if (!page)
203605eb0b51SOGAWA Hirofumi 		goto out;
203705eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
203805eb0b51SOGAWA Hirofumi 	if (err) {
203905eb0b51SOGAWA Hirofumi 		/*
204005eb0b51SOGAWA Hirofumi 		 * ->prepare_write() may have instantiated a few blocks
204105eb0b51SOGAWA Hirofumi 		 * outside i_size.  Trim these off again.
204205eb0b51SOGAWA Hirofumi 		 */
204305eb0b51SOGAWA Hirofumi 		unlock_page(page);
204405eb0b51SOGAWA Hirofumi 		page_cache_release(page);
204505eb0b51SOGAWA Hirofumi 		vmtruncate(inode, inode->i_size);
204605eb0b51SOGAWA Hirofumi 		goto out;
204705eb0b51SOGAWA Hirofumi 	}
204805eb0b51SOGAWA Hirofumi 
204905eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
205005eb0b51SOGAWA Hirofumi 
205105eb0b51SOGAWA Hirofumi 	unlock_page(page);
205205eb0b51SOGAWA Hirofumi 	page_cache_release(page);
205305eb0b51SOGAWA Hirofumi 	if (err > 0)
205405eb0b51SOGAWA Hirofumi 		err = 0;
205505eb0b51SOGAWA Hirofumi out:
205605eb0b51SOGAWA Hirofumi 	return err;
205705eb0b51SOGAWA Hirofumi }
205805eb0b51SOGAWA Hirofumi 
205905eb0b51SOGAWA Hirofumi int generic_cont_expand(struct inode *inode, loff_t size)
206005eb0b51SOGAWA Hirofumi {
206105eb0b51SOGAWA Hirofumi 	pgoff_t index;
206205eb0b51SOGAWA Hirofumi 	unsigned int offset;
206305eb0b51SOGAWA Hirofumi 
20641da177e4SLinus Torvalds 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
20651da177e4SLinus Torvalds 
20661da177e4SLinus Torvalds 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
20671da177e4SLinus Torvalds 	** skip the prepare.  make sure we never send an offset for the start
20681da177e4SLinus Torvalds 	** of a block
20691da177e4SLinus Torvalds 	*/
20701da177e4SLinus Torvalds 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
207105eb0b51SOGAWA Hirofumi 		/* caller must handle this extra byte. */
20721da177e4SLinus Torvalds 		offset++;
20731da177e4SLinus Torvalds 	}
20741da177e4SLinus Torvalds 	index = size >> PAGE_CACHE_SHIFT;
207505eb0b51SOGAWA Hirofumi 
207605eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
20771da177e4SLinus Torvalds }
207805eb0b51SOGAWA Hirofumi 
207905eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size)
208005eb0b51SOGAWA Hirofumi {
208105eb0b51SOGAWA Hirofumi 	loff_t pos = size - 1;
208205eb0b51SOGAWA Hirofumi 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
208305eb0b51SOGAWA Hirofumi 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
208405eb0b51SOGAWA Hirofumi 
208505eb0b51SOGAWA Hirofumi 	/* prepare/commit_write can handle even if from==to==start of block. */
208605eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
20871da177e4SLinus Torvalds }
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds /*
20901da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
20911da177e4SLinus Torvalds  * We may have to extend the file.
20921da177e4SLinus Torvalds  */
20931da177e4SLinus Torvalds 
20941da177e4SLinus Torvalds int cont_prepare_write(struct page *page, unsigned offset,
20951da177e4SLinus Torvalds 		unsigned to, get_block_t *get_block, loff_t *bytes)
20961da177e4SLinus Torvalds {
20971da177e4SLinus Torvalds 	struct address_space *mapping = page->mapping;
20981da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
20991da177e4SLinus Torvalds 	struct page *new_page;
21001da177e4SLinus Torvalds 	pgoff_t pgpos;
21011da177e4SLinus Torvalds 	long status;
21021da177e4SLinus Torvalds 	unsigned zerofrom;
21031da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
21041da177e4SLinus Torvalds 	void *kaddr;
21051da177e4SLinus Torvalds 
21061da177e4SLinus Torvalds 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
21071da177e4SLinus Torvalds 		status = -ENOMEM;
21081da177e4SLinus Torvalds 		new_page = grab_cache_page(mapping, pgpos);
21091da177e4SLinus Torvalds 		if (!new_page)
21101da177e4SLinus Torvalds 			goto out;
21111da177e4SLinus Torvalds 		/* we might sleep */
21121da177e4SLinus Torvalds 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
21131da177e4SLinus Torvalds 			unlock_page(new_page);
21141da177e4SLinus Torvalds 			page_cache_release(new_page);
21151da177e4SLinus Torvalds 			continue;
21161da177e4SLinus Torvalds 		}
21171da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
21181da177e4SLinus Torvalds 		if (zerofrom & (blocksize-1)) {
21191da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
21201da177e4SLinus Torvalds 			(*bytes)++;
21211da177e4SLinus Torvalds 		}
21221da177e4SLinus Torvalds 		status = __block_prepare_write(inode, new_page, zerofrom,
21231da177e4SLinus Torvalds 						PAGE_CACHE_SIZE, get_block);
21241da177e4SLinus Torvalds 		if (status)
21251da177e4SLinus Torvalds 			goto out_unmap;
21261da177e4SLinus Torvalds 		kaddr = kmap_atomic(new_page, KM_USER0);
21271da177e4SLinus Torvalds 		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
21281da177e4SLinus Torvalds 		flush_dcache_page(new_page);
21291da177e4SLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
21301da177e4SLinus Torvalds 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
21311da177e4SLinus Torvalds 		unlock_page(new_page);
21321da177e4SLinus Torvalds 		page_cache_release(new_page);
21331da177e4SLinus Torvalds 	}
21341da177e4SLinus Torvalds 
21351da177e4SLinus Torvalds 	if (page->index < pgpos) {
21361da177e4SLinus Torvalds 		/* completely inside the area */
21371da177e4SLinus Torvalds 		zerofrom = offset;
21381da177e4SLinus Torvalds 	} else {
21391da177e4SLinus Torvalds 		/* page covers the boundary, find the boundary offset */
21401da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
21411da177e4SLinus Torvalds 
21421da177e4SLinus Torvalds 		/* if we will expand the thing last block will be filled */
21431da177e4SLinus Torvalds 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
21441da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
21451da177e4SLinus Torvalds 			(*bytes)++;
21461da177e4SLinus Torvalds 		}
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds 		/* starting below the boundary? Nothing to zero out */
21491da177e4SLinus Torvalds 		if (offset <= zerofrom)
21501da177e4SLinus Torvalds 			zerofrom = offset;
21511da177e4SLinus Torvalds 	}
21521da177e4SLinus Torvalds 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
21531da177e4SLinus Torvalds 	if (status)
21541da177e4SLinus Torvalds 		goto out1;
21551da177e4SLinus Torvalds 	if (zerofrom < offset) {
21561da177e4SLinus Torvalds 		kaddr = kmap_atomic(page, KM_USER0);
21571da177e4SLinus Torvalds 		memset(kaddr+zerofrom, 0, offset-zerofrom);
21581da177e4SLinus Torvalds 		flush_dcache_page(page);
21591da177e4SLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
21601da177e4SLinus Torvalds 		__block_commit_write(inode, page, zerofrom, offset);
21611da177e4SLinus Torvalds 	}
21621da177e4SLinus Torvalds 	return 0;
21631da177e4SLinus Torvalds out1:
21641da177e4SLinus Torvalds 	ClearPageUptodate(page);
21651da177e4SLinus Torvalds 	return status;
21661da177e4SLinus Torvalds 
21671da177e4SLinus Torvalds out_unmap:
21681da177e4SLinus Torvalds 	ClearPageUptodate(new_page);
21691da177e4SLinus Torvalds 	unlock_page(new_page);
21701da177e4SLinus Torvalds 	page_cache_release(new_page);
21711da177e4SLinus Torvalds out:
21721da177e4SLinus Torvalds 	return status;
21731da177e4SLinus Torvalds }
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
21761da177e4SLinus Torvalds 			get_block_t *get_block)
21771da177e4SLinus Torvalds {
21781da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21791da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
21801da177e4SLinus Torvalds 	if (err)
21811da177e4SLinus Torvalds 		ClearPageUptodate(page);
21821da177e4SLinus Torvalds 	return err;
21831da177e4SLinus Torvalds }
21841da177e4SLinus Torvalds 
21851da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
21861da177e4SLinus Torvalds {
21871da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21881da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
21891da177e4SLinus Torvalds 	return 0;
21901da177e4SLinus Torvalds }
21911da177e4SLinus Torvalds 
21921da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page,
21931da177e4SLinus Torvalds 		unsigned from, unsigned to)
21941da177e4SLinus Torvalds {
21951da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21961da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
21971da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
21981da177e4SLinus Torvalds 	/*
21991da177e4SLinus Torvalds 	 * No need to use i_size_read() here, the i_size
22001b1dcc1bSJes Sorensen 	 * cannot change under us because we hold i_mutex.
22011da177e4SLinus Torvalds 	 */
22021da177e4SLinus Torvalds 	if (pos > inode->i_size) {
22031da177e4SLinus Torvalds 		i_size_write(inode, pos);
22041da177e4SLinus Torvalds 		mark_inode_dirty(inode);
22051da177e4SLinus Torvalds 	}
22061da177e4SLinus Torvalds 	return 0;
22071da177e4SLinus Torvalds }
22081da177e4SLinus Torvalds 
22091da177e4SLinus Torvalds 
22101da177e4SLinus Torvalds /*
22111da177e4SLinus Torvalds  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
22121da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
22131da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
22141da177e4SLinus Torvalds  *
22151da177e4SLinus Torvalds  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
22161da177e4SLinus Torvalds  * a race there is benign: unlock_buffer() only use the bh's address for
22171da177e4SLinus Torvalds  * hashing after unlocking the buffer, so it doesn't actually touch the bh
22181da177e4SLinus Torvalds  * itself.
22191da177e4SLinus Torvalds  */
22201da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
22211da177e4SLinus Torvalds {
22221da177e4SLinus Torvalds 	if (uptodate) {
22231da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
22241da177e4SLinus Torvalds 	} else {
22251da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
22261da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
22271da177e4SLinus Torvalds 	}
22281da177e4SLinus Torvalds 	unlock_buffer(bh);
22291da177e4SLinus Torvalds }
22301da177e4SLinus Torvalds 
22311da177e4SLinus Torvalds /*
22321da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
22331da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
22341da177e4SLinus Torvalds  */
22351da177e4SLinus Torvalds int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
22361da177e4SLinus Torvalds 			get_block_t *get_block)
22371da177e4SLinus Torvalds {
22381da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22391da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
22401da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
22411da177e4SLinus Torvalds 	struct buffer_head map_bh;
22421da177e4SLinus Torvalds 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
22431da177e4SLinus Torvalds 	unsigned block_in_page;
22441da177e4SLinus Torvalds 	unsigned block_start;
22451da177e4SLinus Torvalds 	sector_t block_in_file;
22461da177e4SLinus Torvalds 	char *kaddr;
22471da177e4SLinus Torvalds 	int nr_reads = 0;
22481da177e4SLinus Torvalds 	int i;
22491da177e4SLinus Torvalds 	int ret = 0;
22501da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
22511da177e4SLinus Torvalds 	int dirtied_it = 0;
22521da177e4SLinus Torvalds 
22531da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
22541da177e4SLinus Torvalds 		return 0;
22551da177e4SLinus Torvalds 
22561da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
22571da177e4SLinus Torvalds 	map_bh.b_page = page;
22581da177e4SLinus Torvalds 
22591da177e4SLinus Torvalds 	/*
22601da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
22611da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
22621da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
22631da177e4SLinus Torvalds 	 */
22641da177e4SLinus Torvalds 	for (block_start = 0, block_in_page = 0;
22651da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
22661da177e4SLinus Torvalds 		  block_in_page++, block_start += blocksize) {
22671da177e4SLinus Torvalds 		unsigned block_end = block_start + blocksize;
22681da177e4SLinus Torvalds 		int create;
22691da177e4SLinus Torvalds 
22701da177e4SLinus Torvalds 		map_bh.b_state = 0;
22711da177e4SLinus Torvalds 		create = 1;
22721da177e4SLinus Torvalds 		if (block_start >= to)
22731da177e4SLinus Torvalds 			create = 0;
2274b0cf2321SBadari Pulavarty 		map_bh.b_size = blocksize;
22751da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
22761da177e4SLinus Torvalds 					&map_bh, create);
22771da177e4SLinus Torvalds 		if (ret)
22781da177e4SLinus Torvalds 			goto failed;
22791da177e4SLinus Torvalds 		if (!buffer_mapped(&map_bh))
22801da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
22811da177e4SLinus Torvalds 		if (buffer_new(&map_bh))
22821da177e4SLinus Torvalds 			unmap_underlying_metadata(map_bh.b_bdev,
22831da177e4SLinus Torvalds 							map_bh.b_blocknr);
22841da177e4SLinus Torvalds 		if (PageUptodate(page))
22851da177e4SLinus Torvalds 			continue;
22861da177e4SLinus Torvalds 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
22871da177e4SLinus Torvalds 			kaddr = kmap_atomic(page, KM_USER0);
22881da177e4SLinus Torvalds 			if (block_start < from) {
22891da177e4SLinus Torvalds 				memset(kaddr+block_start, 0, from-block_start);
22901da177e4SLinus Torvalds 				dirtied_it = 1;
22911da177e4SLinus Torvalds 			}
22921da177e4SLinus Torvalds 			if (block_end > to) {
22931da177e4SLinus Torvalds 				memset(kaddr + to, 0, block_end - to);
22941da177e4SLinus Torvalds 				dirtied_it = 1;
22951da177e4SLinus Torvalds 			}
22961da177e4SLinus Torvalds 			flush_dcache_page(page);
22971da177e4SLinus Torvalds 			kunmap_atomic(kaddr, KM_USER0);
22981da177e4SLinus Torvalds 			continue;
22991da177e4SLinus Torvalds 		}
23001da177e4SLinus Torvalds 		if (buffer_uptodate(&map_bh))
23011da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
23021da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
23031da177e4SLinus Torvalds 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
23041da177e4SLinus Torvalds 
23051da177e4SLinus Torvalds 			if (!bh) {
23061da177e4SLinus Torvalds 				ret = -ENOMEM;
23071da177e4SLinus Torvalds 				goto failed;
23081da177e4SLinus Torvalds 			}
23091da177e4SLinus Torvalds 			bh->b_state = map_bh.b_state;
23101da177e4SLinus Torvalds 			atomic_set(&bh->b_count, 0);
23111da177e4SLinus Torvalds 			bh->b_this_page = NULL;
23121da177e4SLinus Torvalds 			bh->b_page = page;
23131da177e4SLinus Torvalds 			bh->b_blocknr = map_bh.b_blocknr;
23141da177e4SLinus Torvalds 			bh->b_size = blocksize;
23151da177e4SLinus Torvalds 			bh->b_data = (char *)(long)block_start;
23161da177e4SLinus Torvalds 			bh->b_bdev = map_bh.b_bdev;
23171da177e4SLinus Torvalds 			bh->b_private = NULL;
23181da177e4SLinus Torvalds 			read_bh[nr_reads++] = bh;
23191da177e4SLinus Torvalds 		}
23201da177e4SLinus Torvalds 	}
23211da177e4SLinus Torvalds 
23221da177e4SLinus Torvalds 	if (nr_reads) {
23231da177e4SLinus Torvalds 		struct buffer_head *bh;
23241da177e4SLinus Torvalds 
23251da177e4SLinus Torvalds 		/*
23261da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
23271da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
23281da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
23291da177e4SLinus Torvalds 		 */
23301da177e4SLinus Torvalds 		for (i = 0; i < nr_reads; i++) {
23311da177e4SLinus Torvalds 			bh = read_bh[i];
23321da177e4SLinus Torvalds 			lock_buffer(bh);
23331da177e4SLinus Torvalds 			bh->b_end_io = end_buffer_read_nobh;
23341da177e4SLinus Torvalds 			submit_bh(READ, bh);
23351da177e4SLinus Torvalds 		}
23361da177e4SLinus Torvalds 		for (i = 0; i < nr_reads; i++) {
23371da177e4SLinus Torvalds 			bh = read_bh[i];
23381da177e4SLinus Torvalds 			wait_on_buffer(bh);
23391da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
23401da177e4SLinus Torvalds 				ret = -EIO;
23411da177e4SLinus Torvalds 			free_buffer_head(bh);
23421da177e4SLinus Torvalds 			read_bh[i] = NULL;
23431da177e4SLinus Torvalds 		}
23441da177e4SLinus Torvalds 		if (ret)
23451da177e4SLinus Torvalds 			goto failed;
23461da177e4SLinus Torvalds 	}
23471da177e4SLinus Torvalds 
23481da177e4SLinus Torvalds 	if (is_mapped_to_disk)
23491da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
23501da177e4SLinus Torvalds 	SetPageUptodate(page);
23511da177e4SLinus Torvalds 
23521da177e4SLinus Torvalds 	/*
23531da177e4SLinus Torvalds 	 * Setting the page dirty here isn't necessary for the prepare_write
23541da177e4SLinus Torvalds 	 * function - commit_write will do that.  But if/when this function is
23551da177e4SLinus Torvalds 	 * used within the pagefault handler to ensure that all mmapped pages
23561da177e4SLinus Torvalds 	 * have backing space in the filesystem, we will need to dirty the page
23571da177e4SLinus Torvalds 	 * if its contents were altered.
23581da177e4SLinus Torvalds 	 */
23591da177e4SLinus Torvalds 	if (dirtied_it)
23601da177e4SLinus Torvalds 		set_page_dirty(page);
23611da177e4SLinus Torvalds 
23621da177e4SLinus Torvalds 	return 0;
23631da177e4SLinus Torvalds 
23641da177e4SLinus Torvalds failed:
23651da177e4SLinus Torvalds 	for (i = 0; i < nr_reads; i++) {
23661da177e4SLinus Torvalds 		if (read_bh[i])
23671da177e4SLinus Torvalds 			free_buffer_head(read_bh[i]);
23681da177e4SLinus Torvalds 	}
23691da177e4SLinus Torvalds 
23701da177e4SLinus Torvalds 	/*
23711da177e4SLinus Torvalds 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
23721da177e4SLinus Torvalds 	 * so we'll later zero out any blocks which _were_ allocated.
23731da177e4SLinus Torvalds 	 */
23741da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
23751da177e4SLinus Torvalds 	memset(kaddr, 0, PAGE_CACHE_SIZE);
23768c581651SMonakhov Dmitriy 	flush_dcache_page(page);
23771da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
23781da177e4SLinus Torvalds 	SetPageUptodate(page);
23791da177e4SLinus Torvalds 	set_page_dirty(page);
23801da177e4SLinus Torvalds 	return ret;
23811da177e4SLinus Torvalds }
23821da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_prepare_write);
23831da177e4SLinus Torvalds 
23841da177e4SLinus Torvalds int nobh_commit_write(struct file *file, struct page *page,
23851da177e4SLinus Torvalds 		unsigned from, unsigned to)
23861da177e4SLinus Torvalds {
23871da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23881da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
23891da177e4SLinus Torvalds 
23901da177e4SLinus Torvalds 	set_page_dirty(page);
23911da177e4SLinus Torvalds 	if (pos > inode->i_size) {
23921da177e4SLinus Torvalds 		i_size_write(inode, pos);
23931da177e4SLinus Torvalds 		mark_inode_dirty(inode);
23941da177e4SLinus Torvalds 	}
23951da177e4SLinus Torvalds 	return 0;
23961da177e4SLinus Torvalds }
23971da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_commit_write);
23981da177e4SLinus Torvalds 
23991da177e4SLinus Torvalds /*
24001da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
24011da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
24021da177e4SLinus Torvalds  * the page.
24031da177e4SLinus Torvalds  */
24041da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
24051da177e4SLinus Torvalds 			struct writeback_control *wbc)
24061da177e4SLinus Torvalds {
24071da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
24081da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
24091da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
24101da177e4SLinus Torvalds 	unsigned offset;
24111da177e4SLinus Torvalds 	void *kaddr;
24121da177e4SLinus Torvalds 	int ret;
24131da177e4SLinus Torvalds 
24141da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
24151da177e4SLinus Torvalds 	if (page->index < end_index)
24161da177e4SLinus Torvalds 		goto out;
24171da177e4SLinus Torvalds 
24181da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
24191da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
24201da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
24211da177e4SLinus Torvalds 		/*
24221da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
24231da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
24241da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
24251da177e4SLinus Torvalds 		 */
24261da177e4SLinus Torvalds #if 0
24271da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
24281da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
24291da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
24301da177e4SLinus Torvalds #endif
24311da177e4SLinus Torvalds 		unlock_page(page);
24321da177e4SLinus Torvalds 		return 0; /* don't care */
24331da177e4SLinus Torvalds 	}
24341da177e4SLinus Torvalds 
24351da177e4SLinus Torvalds 	/*
24361da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
24371da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
24381da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
24391da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
24401da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
24411da177e4SLinus Torvalds 	 */
24421da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
24431da177e4SLinus Torvalds 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
24441da177e4SLinus Torvalds 	flush_dcache_page(page);
24451da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
24461da177e4SLinus Torvalds out:
24471da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
24481da177e4SLinus Torvalds 	if (ret == -EAGAIN)
24491da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
24501da177e4SLinus Torvalds 	return ret;
24511da177e4SLinus Torvalds }
24521da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
24531da177e4SLinus Torvalds 
24541da177e4SLinus Torvalds /*
24551da177e4SLinus Torvalds  * This function assumes that ->prepare_write() uses nobh_prepare_write().
24561da177e4SLinus Torvalds  */
24571da177e4SLinus Torvalds int nobh_truncate_page(struct address_space *mapping, loff_t from)
24581da177e4SLinus Torvalds {
24591da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
24601da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
24611da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
24621da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
24631da177e4SLinus Torvalds 	unsigned to;
24641da177e4SLinus Torvalds 	struct page *page;
2465f5e54d6eSChristoph Hellwig 	const struct address_space_operations *a_ops = mapping->a_ops;
24661da177e4SLinus Torvalds 	char *kaddr;
24671da177e4SLinus Torvalds 	int ret = 0;
24681da177e4SLinus Torvalds 
24691da177e4SLinus Torvalds 	if ((offset & (blocksize - 1)) == 0)
24701da177e4SLinus Torvalds 		goto out;
24711da177e4SLinus Torvalds 
24721da177e4SLinus Torvalds 	ret = -ENOMEM;
24731da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
24741da177e4SLinus Torvalds 	if (!page)
24751da177e4SLinus Torvalds 		goto out;
24761da177e4SLinus Torvalds 
24771da177e4SLinus Torvalds 	to = (offset + blocksize) & ~(blocksize - 1);
24781da177e4SLinus Torvalds 	ret = a_ops->prepare_write(NULL, page, offset, to);
24791da177e4SLinus Torvalds 	if (ret == 0) {
24801da177e4SLinus Torvalds 		kaddr = kmap_atomic(page, KM_USER0);
24811da177e4SLinus Torvalds 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
24821da177e4SLinus Torvalds 		flush_dcache_page(page);
24831da177e4SLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
24841da177e4SLinus Torvalds 		set_page_dirty(page);
24851da177e4SLinus Torvalds 	}
24861da177e4SLinus Torvalds 	unlock_page(page);
24871da177e4SLinus Torvalds 	page_cache_release(page);
24881da177e4SLinus Torvalds out:
24891da177e4SLinus Torvalds 	return ret;
24901da177e4SLinus Torvalds }
24911da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
24921da177e4SLinus Torvalds 
24931da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
24941da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
24951da177e4SLinus Torvalds {
24961da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
24971da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
24981da177e4SLinus Torvalds 	unsigned blocksize;
249954b21a79SAndrew Morton 	sector_t iblock;
25001da177e4SLinus Torvalds 	unsigned length, pos;
25011da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
25021da177e4SLinus Torvalds 	struct page *page;
25031da177e4SLinus Torvalds 	struct buffer_head *bh;
25041da177e4SLinus Torvalds 	void *kaddr;
25051da177e4SLinus Torvalds 	int err;
25061da177e4SLinus Torvalds 
25071da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
25081da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
25091da177e4SLinus Torvalds 
25101da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
25111da177e4SLinus Torvalds 	if (!length)
25121da177e4SLinus Torvalds 		return 0;
25131da177e4SLinus Torvalds 
25141da177e4SLinus Torvalds 	length = blocksize - length;
251554b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
25161da177e4SLinus Torvalds 
25171da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
25181da177e4SLinus Torvalds 	err = -ENOMEM;
25191da177e4SLinus Torvalds 	if (!page)
25201da177e4SLinus Torvalds 		goto out;
25211da177e4SLinus Torvalds 
25221da177e4SLinus Torvalds 	if (!page_has_buffers(page))
25231da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
25261da177e4SLinus Torvalds 	bh = page_buffers(page);
25271da177e4SLinus Torvalds 	pos = blocksize;
25281da177e4SLinus Torvalds 	while (offset >= pos) {
25291da177e4SLinus Torvalds 		bh = bh->b_this_page;
25301da177e4SLinus Torvalds 		iblock++;
25311da177e4SLinus Torvalds 		pos += blocksize;
25321da177e4SLinus Torvalds 	}
25331da177e4SLinus Torvalds 
25341da177e4SLinus Torvalds 	err = 0;
25351da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2536b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
25371da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
25381da177e4SLinus Torvalds 		if (err)
25391da177e4SLinus Torvalds 			goto unlock;
25401da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
25411da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
25421da177e4SLinus Torvalds 			goto unlock;
25431da177e4SLinus Torvalds 	}
25441da177e4SLinus Torvalds 
25451da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
25461da177e4SLinus Torvalds 	if (PageUptodate(page))
25471da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
25481da177e4SLinus Torvalds 
254933a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
25501da177e4SLinus Torvalds 		err = -EIO;
25511da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
25521da177e4SLinus Torvalds 		wait_on_buffer(bh);
25531da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
25541da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
25551da177e4SLinus Torvalds 			goto unlock;
25561da177e4SLinus Torvalds 	}
25571da177e4SLinus Torvalds 
25581da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
25591da177e4SLinus Torvalds 	memset(kaddr + offset, 0, length);
25601da177e4SLinus Torvalds 	flush_dcache_page(page);
25611da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
25621da177e4SLinus Torvalds 
25631da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
25641da177e4SLinus Torvalds 	err = 0;
25651da177e4SLinus Torvalds 
25661da177e4SLinus Torvalds unlock:
25671da177e4SLinus Torvalds 	unlock_page(page);
25681da177e4SLinus Torvalds 	page_cache_release(page);
25691da177e4SLinus Torvalds out:
25701da177e4SLinus Torvalds 	return err;
25711da177e4SLinus Torvalds }
25721da177e4SLinus Torvalds 
25731da177e4SLinus Torvalds /*
25741da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
25751da177e4SLinus Torvalds  */
25761da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
25771da177e4SLinus Torvalds 			struct writeback_control *wbc)
25781da177e4SLinus Torvalds {
25791da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
25801da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
25811da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
25821da177e4SLinus Torvalds 	unsigned offset;
25831da177e4SLinus Torvalds 	void *kaddr;
25841da177e4SLinus Torvalds 
25851da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
25861da177e4SLinus Torvalds 	if (page->index < end_index)
25871da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
25901da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
25911da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
25921da177e4SLinus Torvalds 		/*
25931da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
25941da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
25951da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
25961da177e4SLinus Torvalds 		 */
2597aaa4059bSJan Kara 		do_invalidatepage(page, 0);
25981da177e4SLinus Torvalds 		unlock_page(page);
25991da177e4SLinus Torvalds 		return 0; /* don't care */
26001da177e4SLinus Torvalds 	}
26011da177e4SLinus Torvalds 
26021da177e4SLinus Torvalds 	/*
26031da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26041da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
26051da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26061da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26071da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26081da177e4SLinus Torvalds 	 */
26091da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
26101da177e4SLinus Torvalds 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
26111da177e4SLinus Torvalds 	flush_dcache_page(page);
26121da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
26131da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
26141da177e4SLinus Torvalds }
26151da177e4SLinus Torvalds 
26161da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
26171da177e4SLinus Torvalds 			    get_block_t *get_block)
26181da177e4SLinus Torvalds {
26191da177e4SLinus Torvalds 	struct buffer_head tmp;
26201da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26211da177e4SLinus Torvalds 	tmp.b_state = 0;
26221da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2623b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
26241da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
26251da177e4SLinus Torvalds 	return tmp.b_blocknr;
26261da177e4SLinus Torvalds }
26271da177e4SLinus Torvalds 
26281da177e4SLinus Torvalds static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
26291da177e4SLinus Torvalds {
26301da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
26311da177e4SLinus Torvalds 
26321da177e4SLinus Torvalds 	if (bio->bi_size)
26331da177e4SLinus Torvalds 		return 1;
26341da177e4SLinus Torvalds 
26351da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
26361da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
26371da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
26381da177e4SLinus Torvalds 	}
26391da177e4SLinus Torvalds 
26401da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
26411da177e4SLinus Torvalds 	bio_put(bio);
26421da177e4SLinus Torvalds 	return 0;
26431da177e4SLinus Torvalds }
26441da177e4SLinus Torvalds 
26451da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
26461da177e4SLinus Torvalds {
26471da177e4SLinus Torvalds 	struct bio *bio;
26481da177e4SLinus Torvalds 	int ret = 0;
26491da177e4SLinus Torvalds 
26501da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
26511da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
26521da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
26531da177e4SLinus Torvalds 
26541da177e4SLinus Torvalds 	if (buffer_ordered(bh) && (rw == WRITE))
26551da177e4SLinus Torvalds 		rw = WRITE_BARRIER;
26561da177e4SLinus Torvalds 
26571da177e4SLinus Torvalds 	/*
26581da177e4SLinus Torvalds 	 * Only clear out a write error when rewriting, should this
26591da177e4SLinus Torvalds 	 * include WRITE_SYNC as well?
26601da177e4SLinus Torvalds 	 */
26611da177e4SLinus Torvalds 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
26621da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
26631da177e4SLinus Torvalds 
26641da177e4SLinus Torvalds 	/*
26651da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
26661da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
26671da177e4SLinus Torvalds 	 */
26681da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
26691da177e4SLinus Torvalds 
26701da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
26711da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
26721da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
26731da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
26741da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
26751da177e4SLinus Torvalds 
26761da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
26771da177e4SLinus Torvalds 	bio->bi_idx = 0;
26781da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
26791da177e4SLinus Torvalds 
26801da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
26811da177e4SLinus Torvalds 	bio->bi_private = bh;
26821da177e4SLinus Torvalds 
26831da177e4SLinus Torvalds 	bio_get(bio);
26841da177e4SLinus Torvalds 	submit_bio(rw, bio);
26851da177e4SLinus Torvalds 
26861da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
26871da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
26881da177e4SLinus Torvalds 
26891da177e4SLinus Torvalds 	bio_put(bio);
26901da177e4SLinus Torvalds 	return ret;
26911da177e4SLinus Torvalds }
26921da177e4SLinus Torvalds 
26931da177e4SLinus Torvalds /**
26941da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2695a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
26961da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
26971da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
26981da177e4SLinus Torvalds  *
2699a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2700a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2701a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2702a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2703a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
27041da177e4SLinus Torvalds  *
27051da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2706a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2707a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2708a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2709a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2710a7662236SJan Kara  * actually clean until the buffer gets unlocked).
27111da177e4SLinus Torvalds  *
27121da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
27131da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
27141da177e4SLinus Torvalds  * any waiters.
27151da177e4SLinus Torvalds  *
27161da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
27171da177e4SLinus Torvalds  * multiple of the current approved size for the device.
27181da177e4SLinus Torvalds  */
27191da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
27201da177e4SLinus Torvalds {
27211da177e4SLinus Torvalds 	int i;
27221da177e4SLinus Torvalds 
27231da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
27241da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
27251da177e4SLinus Torvalds 
2726a7662236SJan Kara 		if (rw == SWRITE)
2727a7662236SJan Kara 			lock_buffer(bh);
2728a7662236SJan Kara 		else if (test_set_buffer_locked(bh))
27291da177e4SLinus Torvalds 			continue;
27301da177e4SLinus Torvalds 
2731a7662236SJan Kara 		if (rw == WRITE || rw == SWRITE) {
27321da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
273376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2734e60e5c50SOGAWA Hirofumi 				get_bh(bh);
27351da177e4SLinus Torvalds 				submit_bh(WRITE, bh);
27361da177e4SLinus Torvalds 				continue;
27371da177e4SLinus Torvalds 			}
27381da177e4SLinus Torvalds 		} else {
27391da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
274076c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2741e60e5c50SOGAWA Hirofumi 				get_bh(bh);
27421da177e4SLinus Torvalds 				submit_bh(rw, bh);
27431da177e4SLinus Torvalds 				continue;
27441da177e4SLinus Torvalds 			}
27451da177e4SLinus Torvalds 		}
27461da177e4SLinus Torvalds 		unlock_buffer(bh);
27471da177e4SLinus Torvalds 	}
27481da177e4SLinus Torvalds }
27491da177e4SLinus Torvalds 
27501da177e4SLinus Torvalds /*
27511da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
27521da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
27531da177e4SLinus Torvalds  * the buffer_head.
27541da177e4SLinus Torvalds  */
27551da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
27561da177e4SLinus Torvalds {
27571da177e4SLinus Torvalds 	int ret = 0;
27581da177e4SLinus Torvalds 
27591da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
27601da177e4SLinus Torvalds 	lock_buffer(bh);
27611da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
27621da177e4SLinus Torvalds 		get_bh(bh);
27631da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
27641da177e4SLinus Torvalds 		ret = submit_bh(WRITE, bh);
27651da177e4SLinus Torvalds 		wait_on_buffer(bh);
27661da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
27671da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
27681da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
27691da177e4SLinus Torvalds 		}
27701da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
27711da177e4SLinus Torvalds 			ret = -EIO;
27721da177e4SLinus Torvalds 	} else {
27731da177e4SLinus Torvalds 		unlock_buffer(bh);
27741da177e4SLinus Torvalds 	}
27751da177e4SLinus Torvalds 	return ret;
27761da177e4SLinus Torvalds }
27771da177e4SLinus Torvalds 
27781da177e4SLinus Torvalds /*
27791da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
27801da177e4SLinus Torvalds  * are unused, and releases them if so.
27811da177e4SLinus Torvalds  *
27821da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
27831da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
27841da177e4SLinus Torvalds  *
27851da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
27861da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
27871da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
27881da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
27891da177e4SLinus Torvalds  * filesystem data on the same device.
27901da177e4SLinus Torvalds  *
27911da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
27921da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
27931da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
27941da177e4SLinus Torvalds  * private_lock.
27951da177e4SLinus Torvalds  *
27961da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
27971da177e4SLinus Torvalds  */
27981da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
27991da177e4SLinus Torvalds {
28001da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
28011da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
28021da177e4SLinus Torvalds }
28031da177e4SLinus Torvalds 
28041da177e4SLinus Torvalds static int
28051da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
28061da177e4SLinus Torvalds {
28071da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
28081da177e4SLinus Torvalds 	struct buffer_head *bh;
28091da177e4SLinus Torvalds 
28101da177e4SLinus Torvalds 	bh = head;
28111da177e4SLinus Torvalds 	do {
2812de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
28131da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
28141da177e4SLinus Torvalds 		if (buffer_busy(bh))
28151da177e4SLinus Torvalds 			goto failed;
28161da177e4SLinus Torvalds 		bh = bh->b_this_page;
28171da177e4SLinus Torvalds 	} while (bh != head);
28181da177e4SLinus Torvalds 
28191da177e4SLinus Torvalds 	do {
28201da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
28211da177e4SLinus Torvalds 
28221da177e4SLinus Torvalds 		if (!list_empty(&bh->b_assoc_buffers))
28231da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
28241da177e4SLinus Torvalds 		bh = next;
28251da177e4SLinus Torvalds 	} while (bh != head);
28261da177e4SLinus Torvalds 	*buffers_to_free = head;
28271da177e4SLinus Torvalds 	__clear_page_buffers(page);
28281da177e4SLinus Torvalds 	return 1;
28291da177e4SLinus Torvalds failed:
28301da177e4SLinus Torvalds 	return 0;
28311da177e4SLinus Torvalds }
28321da177e4SLinus Torvalds 
28331da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
28341da177e4SLinus Torvalds {
28351da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
28361da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
28371da177e4SLinus Torvalds 	int ret = 0;
28381da177e4SLinus Torvalds 
28391da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
2840ecdfc978SLinus Torvalds 	if (PageWriteback(page))
28411da177e4SLinus Torvalds 		return 0;
28421da177e4SLinus Torvalds 
28431da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
28441da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
28451da177e4SLinus Torvalds 		goto out;
28461da177e4SLinus Torvalds 	}
28471da177e4SLinus Torvalds 
28481da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
28491da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
2850ecdfc978SLinus Torvalds 
2851ecdfc978SLinus Torvalds 	/*
2852ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
2853ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
2854ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
2855ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
2856ecdfc978SLinus Torvalds 	 *
2857ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
2858ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
2859ecdfc978SLinus Torvalds 	 * the page also.
286087df7241SNick Piggin 	 *
286187df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
286287df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
286387df7241SNick Piggin 	 * dirty bit from being lost.
2864ecdfc978SLinus Torvalds 	 */
2865ecdfc978SLinus Torvalds 	if (ret)
2866ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
286787df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
28681da177e4SLinus Torvalds out:
28691da177e4SLinus Torvalds 	if (buffers_to_free) {
28701da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds 		do {
28731da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
28741da177e4SLinus Torvalds 			free_buffer_head(bh);
28751da177e4SLinus Torvalds 			bh = next;
28761da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
28771da177e4SLinus Torvalds 	}
28781da177e4SLinus Torvalds 	return ret;
28791da177e4SLinus Torvalds }
28801da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
28811da177e4SLinus Torvalds 
28823978d717SNeilBrown void block_sync_page(struct page *page)
28831da177e4SLinus Torvalds {
28841da177e4SLinus Torvalds 	struct address_space *mapping;
28851da177e4SLinus Torvalds 
28861da177e4SLinus Torvalds 	smp_mb();
28871da177e4SLinus Torvalds 	mapping = page_mapping(page);
28881da177e4SLinus Torvalds 	if (mapping)
28891da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
28901da177e4SLinus Torvalds }
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds /*
28931da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
28941da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
28951da177e4SLinus Torvalds  *
28961da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
28971da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
28981da177e4SLinus Torvalds  */
28991da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data)
29001da177e4SLinus Torvalds {
29011da177e4SLinus Torvalds 	static int msg_count;
29021da177e4SLinus Torvalds 
29031da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
29041da177e4SLinus Torvalds 		return -EPERM;
29051da177e4SLinus Torvalds 
29061da177e4SLinus Torvalds 	if (msg_count < 5) {
29071da177e4SLinus Torvalds 		msg_count++;
29081da177e4SLinus Torvalds 		printk(KERN_INFO
29091da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
29101da177e4SLinus Torvalds 			" system call\n", current->comm);
29111da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
29121da177e4SLinus Torvalds 	}
29131da177e4SLinus Torvalds 
29141da177e4SLinus Torvalds 	if (func == 1)
29151da177e4SLinus Torvalds 		do_exit(0);
29161da177e4SLinus Torvalds 	return 0;
29171da177e4SLinus Torvalds }
29181da177e4SLinus Torvalds 
29191da177e4SLinus Torvalds /*
29201da177e4SLinus Torvalds  * Buffer-head allocation
29211da177e4SLinus Torvalds  */
2922e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
29231da177e4SLinus Torvalds 
29241da177e4SLinus Torvalds /*
29251da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
29261da177e4SLinus Torvalds  * stripping them in writeback.
29271da177e4SLinus Torvalds  */
29281da177e4SLinus Torvalds static int max_buffer_heads;
29291da177e4SLinus Torvalds 
29301da177e4SLinus Torvalds int buffer_heads_over_limit;
29311da177e4SLinus Torvalds 
29321da177e4SLinus Torvalds struct bh_accounting {
29331da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
29341da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
29351da177e4SLinus Torvalds };
29361da177e4SLinus Torvalds 
29371da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
29381da177e4SLinus Torvalds 
29391da177e4SLinus Torvalds static void recalc_bh_state(void)
29401da177e4SLinus Torvalds {
29411da177e4SLinus Torvalds 	int i;
29421da177e4SLinus Torvalds 	int tot = 0;
29431da177e4SLinus Torvalds 
29441da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
29451da177e4SLinus Torvalds 		return;
29461da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
29478a143426SEric Dumazet 	for_each_online_cpu(i)
29481da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
29491da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
29501da177e4SLinus Torvalds }
29511da177e4SLinus Torvalds 
2952dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
29531da177e4SLinus Torvalds {
29541da177e4SLinus Torvalds 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
29551da177e4SLinus Torvalds 	if (ret) {
2956736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
29571da177e4SLinus Torvalds 		recalc_bh_state();
2958736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
29591da177e4SLinus Torvalds 	}
29601da177e4SLinus Torvalds 	return ret;
29611da177e4SLinus Torvalds }
29621da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
29631da177e4SLinus Torvalds 
29641da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
29651da177e4SLinus Torvalds {
29661da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
29671da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
2968736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
29691da177e4SLinus Torvalds 	recalc_bh_state();
2970736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
29711da177e4SLinus Torvalds }
29721da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
29731da177e4SLinus Torvalds 
29741da177e4SLinus Torvalds static void
2975e18b890bSChristoph Lameter init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
29761da177e4SLinus Torvalds {
29771da177e4SLinus Torvalds 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
29781da177e4SLinus Torvalds 			    SLAB_CTOR_CONSTRUCTOR) {
29791da177e4SLinus Torvalds 		struct buffer_head * bh = (struct buffer_head *)data;
29801da177e4SLinus Torvalds 
29811da177e4SLinus Torvalds 		memset(bh, 0, sizeof(*bh));
29821da177e4SLinus Torvalds 		INIT_LIST_HEAD(&bh->b_assoc_buffers);
29831da177e4SLinus Torvalds 	}
29841da177e4SLinus Torvalds }
29851da177e4SLinus Torvalds 
29861da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
29871da177e4SLinus Torvalds {
29881da177e4SLinus Torvalds 	int i;
29891da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
29901da177e4SLinus Torvalds 
29911da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
29921da177e4SLinus Torvalds 		brelse(b->bhs[i]);
29931da177e4SLinus Torvalds 		b->bhs[i] = NULL;
29941da177e4SLinus Torvalds 	}
29958a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
29968a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
29978a143426SEric Dumazet 	put_cpu_var(bh_accounting);
29981da177e4SLinus Torvalds }
29991da177e4SLinus Torvalds 
30001da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
30011da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
30021da177e4SLinus Torvalds {
30031da177e4SLinus Torvalds 	if (action == CPU_DEAD)
30041da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
30051da177e4SLinus Torvalds 	return NOTIFY_OK;
30061da177e4SLinus Torvalds }
30071da177e4SLinus Torvalds 
30081da177e4SLinus Torvalds void __init buffer_init(void)
30091da177e4SLinus Torvalds {
30101da177e4SLinus Torvalds 	int nrpages;
30111da177e4SLinus Torvalds 
30121da177e4SLinus Torvalds 	bh_cachep = kmem_cache_create("buffer_head",
30131da177e4SLinus Torvalds 					sizeof(struct buffer_head), 0,
3014b0196009SPaul Jackson 					(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3015b0196009SPaul Jackson 					SLAB_MEM_SPREAD),
3016b0196009SPaul Jackson 					init_buffer_head,
3017b0196009SPaul Jackson 					NULL);
30181da177e4SLinus Torvalds 
30191da177e4SLinus Torvalds 	/*
30201da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
30211da177e4SLinus Torvalds 	 */
30221da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
30231da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
30241da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
30251da177e4SLinus Torvalds }
30261da177e4SLinus Torvalds 
30271da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
30281da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
30291da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
30301da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
30311da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
30321da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
30331da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
30341da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
30351da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
30361da177e4SLinus Torvalds EXPORT_SYMBOL(cont_prepare_write);
30371da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
30381da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
30391da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
30401da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
30411da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
30421da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write);
30431da177e4SLinus Torvalds EXPORT_SYMBOL(generic_cont_expand);
304405eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
30451da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
30461da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
30471da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
30481da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
30491da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
30501da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
30511da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3052