xref: /linux/fs/buffer.c (revision ea0f04e59543bafb3d2cbe37a0d375acb0bb2c34)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(init_buffer);
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds static int sync_buffer(void *word)
581da177e4SLinus Torvalds {
591da177e4SLinus Torvalds 	struct block_device *bd;
601da177e4SLinus Torvalds 	struct buffer_head *bh
611da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds 	smp_mb();
641da177e4SLinus Torvalds 	bd = bh->b_bdev;
651da177e4SLinus Torvalds 	if (bd)
661da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
671da177e4SLinus Torvalds 	io_schedule();
681da177e4SLinus Torvalds 	return 0;
691da177e4SLinus Torvalds }
701da177e4SLinus Torvalds 
71fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
721da177e4SLinus Torvalds {
731da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
741da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
751da177e4SLinus Torvalds }
761da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
771da177e4SLinus Torvalds 
78fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
791da177e4SLinus Torvalds {
8051b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
811da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
821da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
831da177e4SLinus Torvalds }
841fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds /*
871da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
881da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
891da177e4SLinus Torvalds  * if you want to preserve its state.
901da177e4SLinus Torvalds  */
911da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
921da177e4SLinus Torvalds {
931da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
941da177e4SLinus Torvalds }
951fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds static void
981da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
991da177e4SLinus Torvalds {
1001da177e4SLinus Torvalds 	ClearPagePrivate(page);
1014c21e2f2SHugh Dickins 	set_page_private(page, 0);
1021da177e4SLinus Torvalds 	page_cache_release(page);
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
10508bafc03SKeith Mannthey 
10608bafc03SKeith Mannthey static int quiet_error(struct buffer_head *bh)
10708bafc03SKeith Mannthey {
10808bafc03SKeith Mannthey 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
10908bafc03SKeith Mannthey 		return 0;
11008bafc03SKeith Mannthey 	return 1;
11108bafc03SKeith Mannthey }
11208bafc03SKeith Mannthey 
11308bafc03SKeith Mannthey 
1141da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1151da177e4SLinus Torvalds {
1161da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1171da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1181da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1191da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1201da177e4SLinus Torvalds }
1211da177e4SLinus Torvalds 
1221da177e4SLinus Torvalds /*
12368671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
12468671f35SDmitry Monakhov  * unlocking it.
12568671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
12668671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
12768671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
12868671f35SDmitry Monakhov  * itself.
1291da177e4SLinus Torvalds  */
13068671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1311da177e4SLinus Torvalds {
1321da177e4SLinus Torvalds 	if (uptodate) {
1331da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1341da177e4SLinus Torvalds 	} else {
1351da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1361da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1371da177e4SLinus Torvalds 	}
1381da177e4SLinus Torvalds 	unlock_buffer(bh);
13968671f35SDmitry Monakhov }
14068671f35SDmitry Monakhov 
14168671f35SDmitry Monakhov /*
14268671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
14368671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
14468671f35SDmitry Monakhov  */
14568671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
14668671f35SDmitry Monakhov {
14768671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1481da177e4SLinus Torvalds 	put_bh(bh);
1491da177e4SLinus Torvalds }
1501fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1531da177e4SLinus Torvalds {
1541da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1551da177e4SLinus Torvalds 
1561da177e4SLinus Torvalds 	if (uptodate) {
1571da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1581da177e4SLinus Torvalds 	} else {
15908bafc03SKeith Mannthey 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1601da177e4SLinus Torvalds 			buffer_io_error(bh);
1611da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1621da177e4SLinus Torvalds 					"I/O error on %s\n",
1631da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1641da177e4SLinus Torvalds 		}
1651da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1661da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1671da177e4SLinus Torvalds 	}
1681da177e4SLinus Torvalds 	unlock_buffer(bh);
1691da177e4SLinus Torvalds 	put_bh(bh);
1701da177e4SLinus Torvalds }
1711fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1721da177e4SLinus Torvalds 
1731da177e4SLinus Torvalds /*
1741da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1751da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1761da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1771da177e4SLinus Torvalds  * private_lock.
1781da177e4SLinus Torvalds  *
1791da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
1801da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
1811da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
1821da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
1831da177e4SLinus Torvalds  */
1841da177e4SLinus Torvalds static struct buffer_head *
185385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1861da177e4SLinus Torvalds {
1871da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1881da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1891da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1901da177e4SLinus Torvalds 	pgoff_t index;
1911da177e4SLinus Torvalds 	struct buffer_head *bh;
1921da177e4SLinus Torvalds 	struct buffer_head *head;
1931da177e4SLinus Torvalds 	struct page *page;
1941da177e4SLinus Torvalds 	int all_mapped = 1;
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
1971da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
1981da177e4SLinus Torvalds 	if (!page)
1991da177e4SLinus Torvalds 		goto out;
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
2021da177e4SLinus Torvalds 	if (!page_has_buffers(page))
2031da177e4SLinus Torvalds 		goto out_unlock;
2041da177e4SLinus Torvalds 	head = page_buffers(page);
2051da177e4SLinus Torvalds 	bh = head;
2061da177e4SLinus Torvalds 	do {
20797f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
20897f76d3dSNikanth Karthikesan 			all_mapped = 0;
20997f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2101da177e4SLinus Torvalds 			ret = bh;
2111da177e4SLinus Torvalds 			get_bh(bh);
2121da177e4SLinus Torvalds 			goto out_unlock;
2131da177e4SLinus Torvalds 		}
2141da177e4SLinus Torvalds 		bh = bh->b_this_page;
2151da177e4SLinus Torvalds 	} while (bh != head);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2181da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2191da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2201da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2211da177e4SLinus Torvalds 	 */
2221da177e4SLinus Torvalds 	if (all_mapped) {
2231da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
2241da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
225205f87f6SBadari Pulavarty 			(unsigned long long)block,
226205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
227205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
228205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
2291da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
2301da177e4SLinus Torvalds 	}
2311da177e4SLinus Torvalds out_unlock:
2321da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
2331da177e4SLinus Torvalds 	page_cache_release(page);
2341da177e4SLinus Torvalds out:
2351da177e4SLinus Torvalds 	return ret;
2361da177e4SLinus Torvalds }
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
2391da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
2401da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
2411da177e4SLinus Torvalds    by the user.
2421da177e4SLinus Torvalds 
2431da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
2441da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
2451da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
2461da177e4SLinus Torvalds 
2471da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
2481da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
2511da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
2521da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
2531da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
2541da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
2551da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
2561da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
2571da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
2581da177e4SLinus Torvalds 
2591da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
2601da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
2611da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
2641da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
2651da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
2661da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
2671da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
2681da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
2691da177e4SLinus Torvalds    pass does the actual I/O. */
270f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
2711da177e4SLinus Torvalds {
2720e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
2730e1dfc66SAndrew Morton 
2740e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
2750e1dfc66SAndrew Morton 		return;
2760e1dfc66SAndrew Morton 
2771da177e4SLinus Torvalds 	invalidate_bh_lrus();
278fa4b9074STejun Heo 	lru_add_drain_all();	/* make sure all lru add caches are flushed */
279fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
2801da177e4SLinus Torvalds }
2811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(invalidate_bdev);
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds /*
2845b0830cbSJens Axboe  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
2851da177e4SLinus Torvalds  */
2861da177e4SLinus Torvalds static void free_more_memory(void)
2871da177e4SLinus Torvalds {
28819770b32SMel Gorman 	struct zone *zone;
2890e88460dSMel Gorman 	int nid;
2901da177e4SLinus Torvalds 
29103ba3782SJens Axboe 	wakeup_flusher_threads(1024);
2921da177e4SLinus Torvalds 	yield();
2931da177e4SLinus Torvalds 
2940e88460dSMel Gorman 	for_each_online_node(nid) {
29519770b32SMel Gorman 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
29619770b32SMel Gorman 						gfp_zone(GFP_NOFS), NULL,
29719770b32SMel Gorman 						&zone);
29819770b32SMel Gorman 		if (zone)
29954a6eb5cSMel Gorman 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300327c0e96SKAMEZAWA Hiroyuki 						GFP_NOFS, NULL);
3011da177e4SLinus Torvalds 	}
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds /*
3051da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
3061da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3071da177e4SLinus Torvalds  */
3081da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3091da177e4SLinus Torvalds {
3101da177e4SLinus Torvalds 	unsigned long flags;
311a3972203SNick Piggin 	struct buffer_head *first;
3121da177e4SLinus Torvalds 	struct buffer_head *tmp;
3131da177e4SLinus Torvalds 	struct page *page;
3141da177e4SLinus Torvalds 	int page_uptodate = 1;
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds 	page = bh->b_page;
3191da177e4SLinus Torvalds 	if (uptodate) {
3201da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3211da177e4SLinus Torvalds 	} else {
3221da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
32308bafc03SKeith Mannthey 		if (!quiet_error(bh))
3241da177e4SLinus Torvalds 			buffer_io_error(bh);
3251da177e4SLinus Torvalds 		SetPageError(page);
3261da177e4SLinus Torvalds 	}
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds 	/*
3291da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
3301da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
3311da177e4SLinus Torvalds 	 * decide that the page is now completely done.
3321da177e4SLinus Torvalds 	 */
333a3972203SNick Piggin 	first = page_buffers(page);
334a3972203SNick Piggin 	local_irq_save(flags);
335a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
3361da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
3371da177e4SLinus Torvalds 	unlock_buffer(bh);
3381da177e4SLinus Torvalds 	tmp = bh;
3391da177e4SLinus Torvalds 	do {
3401da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
3411da177e4SLinus Torvalds 			page_uptodate = 0;
3421da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
3431da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3441da177e4SLinus Torvalds 			goto still_busy;
3451da177e4SLinus Torvalds 		}
3461da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
3471da177e4SLinus Torvalds 	} while (tmp != bh);
348a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349a3972203SNick Piggin 	local_irq_restore(flags);
3501da177e4SLinus Torvalds 
3511da177e4SLinus Torvalds 	/*
3521da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
3531da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
3541da177e4SLinus Torvalds 	 */
3551da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
3561da177e4SLinus Torvalds 		SetPageUptodate(page);
3571da177e4SLinus Torvalds 	unlock_page(page);
3581da177e4SLinus Torvalds 	return;
3591da177e4SLinus Torvalds 
3601da177e4SLinus Torvalds still_busy:
361a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362a3972203SNick Piggin 	local_irq_restore(flags);
3631da177e4SLinus Torvalds 	return;
3641da177e4SLinus Torvalds }
3651da177e4SLinus Torvalds 
3661da177e4SLinus Torvalds /*
3671da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3681da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3691da177e4SLinus Torvalds  */
37035c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3711da177e4SLinus Torvalds {
3721da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
3731da177e4SLinus Torvalds 	unsigned long flags;
374a3972203SNick Piggin 	struct buffer_head *first;
3751da177e4SLinus Torvalds 	struct buffer_head *tmp;
3761da177e4SLinus Torvalds 	struct page *page;
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3791da177e4SLinus Torvalds 
3801da177e4SLinus Torvalds 	page = bh->b_page;
3811da177e4SLinus Torvalds 	if (uptodate) {
3821da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3831da177e4SLinus Torvalds 	} else {
38408bafc03SKeith Mannthey 		if (!quiet_error(bh)) {
3851da177e4SLinus Torvalds 			buffer_io_error(bh);
3861da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
3871da177e4SLinus Torvalds 					"I/O error on %s\n",
3881da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
3891da177e4SLinus Torvalds 		}
3901da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
39158ff407bSJan Kara 		set_buffer_write_io_error(bh);
3921da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3931da177e4SLinus Torvalds 		SetPageError(page);
3941da177e4SLinus Torvalds 	}
3951da177e4SLinus Torvalds 
396a3972203SNick Piggin 	first = page_buffers(page);
397a3972203SNick Piggin 	local_irq_save(flags);
398a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399a3972203SNick Piggin 
4001da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
4011da177e4SLinus Torvalds 	unlock_buffer(bh);
4021da177e4SLinus Torvalds 	tmp = bh->b_this_page;
4031da177e4SLinus Torvalds 	while (tmp != bh) {
4041da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
4051da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
4061da177e4SLinus Torvalds 			goto still_busy;
4071da177e4SLinus Torvalds 		}
4081da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4091da177e4SLinus Torvalds 	}
410a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411a3972203SNick Piggin 	local_irq_restore(flags);
4121da177e4SLinus Torvalds 	end_page_writeback(page);
4131da177e4SLinus Torvalds 	return;
4141da177e4SLinus Torvalds 
4151da177e4SLinus Torvalds still_busy:
416a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417a3972203SNick Piggin 	local_irq_restore(flags);
4181da177e4SLinus Torvalds 	return;
4191da177e4SLinus Torvalds }
4201fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write);
4211da177e4SLinus Torvalds 
4221da177e4SLinus Torvalds /*
4231da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4241da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4251da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4261da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4271da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4281da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4291da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4301da177e4SLinus Torvalds  *
4311da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4321da177e4SLinus Torvalds  * left.
4331da177e4SLinus Torvalds  *
4341da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4351da177e4SLinus Torvalds  * the buffers.
4361da177e4SLinus Torvalds  *
4371da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4381da177e4SLinus Torvalds  * page.
4391da177e4SLinus Torvalds  *
4401da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4411da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4421da177e4SLinus Torvalds  */
4431da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4441da177e4SLinus Torvalds {
4451da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
4461da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4471da177e4SLinus Torvalds }
4481da177e4SLinus Torvalds 
4491fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
45035c80d5fSChris Mason 					  bh_end_io_t *handler)
45135c80d5fSChris Mason {
45235c80d5fSChris Mason 	bh->b_end_io = handler;
45335c80d5fSChris Mason 	set_buffer_async_write(bh);
45435c80d5fSChris Mason }
45535c80d5fSChris Mason 
4561da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4571da177e4SLinus Torvalds {
45835c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4591da177e4SLinus Torvalds }
4601da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4611da177e4SLinus Torvalds 
4621da177e4SLinus Torvalds 
4631da177e4SLinus Torvalds /*
4641da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4651da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4661da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4671da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4681da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4691da177e4SLinus Torvalds  *
4701da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4711da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4721da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4731da177e4SLinus Torvalds  *
4741da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4751da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4761da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4771da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4781da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4791da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4801da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4811da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4821da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4831da177e4SLinus Torvalds  * ->private_lock.
4841da177e4SLinus Torvalds  *
4851da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4861da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4871da177e4SLinus Torvalds  *
4881da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4891da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4901da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4911da177e4SLinus Torvalds  * be true at clear_inode() time.
4921da177e4SLinus Torvalds  *
4931da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4941da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4951da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4961da177e4SLinus Torvalds  *
4971da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4981da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4991da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5001da177e4SLinus Torvalds  * queued up.
5011da177e4SLinus Torvalds  *
5021da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5031da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
5041da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
5051da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
5061da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
5071da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5081da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5091da177e4SLinus Torvalds  * b_inode back.
5101da177e4SLinus Torvalds  */
5111da177e4SLinus Torvalds 
5121da177e4SLinus Torvalds /*
5131da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5141da177e4SLinus Torvalds  */
515dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5161da177e4SLinus Torvalds {
5171da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
51858ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
51958ff407bSJan Kara 	if (buffer_write_io_error(bh))
52058ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
52158ff407bSJan Kara 	bh->b_assoc_map = NULL;
5221da177e4SLinus Torvalds }
5231da177e4SLinus Torvalds 
5241da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5251da177e4SLinus Torvalds {
5261da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5271da177e4SLinus Torvalds }
5281da177e4SLinus Torvalds 
5291da177e4SLinus Torvalds /*
5301da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5311da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5321da177e4SLinus Torvalds  * writes to the disk.
5331da177e4SLinus Torvalds  *
5341da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
5351da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
5361da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5371da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5381da177e4SLinus Torvalds  */
5391da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5401da177e4SLinus Torvalds {
5411da177e4SLinus Torvalds 	struct buffer_head *bh;
5421da177e4SLinus Torvalds 	struct list_head *p;
5431da177e4SLinus Torvalds 	int err = 0;
5441da177e4SLinus Torvalds 
5451da177e4SLinus Torvalds 	spin_lock(lock);
5461da177e4SLinus Torvalds repeat:
5471da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5481da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5491da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5501da177e4SLinus Torvalds 			get_bh(bh);
5511da177e4SLinus Torvalds 			spin_unlock(lock);
5521da177e4SLinus Torvalds 			wait_on_buffer(bh);
5531da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5541da177e4SLinus Torvalds 				err = -EIO;
5551da177e4SLinus Torvalds 			brelse(bh);
5561da177e4SLinus Torvalds 			spin_lock(lock);
5571da177e4SLinus Torvalds 			goto repeat;
5581da177e4SLinus Torvalds 		}
5591da177e4SLinus Torvalds 	}
5601da177e4SLinus Torvalds 	spin_unlock(lock);
5611da177e4SLinus Torvalds 	return err;
5621da177e4SLinus Torvalds }
5631da177e4SLinus Torvalds 
56401a05b33SAl Viro static void do_thaw_one(struct super_block *sb, void *unused)
565c2d75438SEric Sandeen {
566c2d75438SEric Sandeen 	char b[BDEVNAME_SIZE];
567c2d75438SEric Sandeen 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568c2d75438SEric Sandeen 		printk(KERN_WARNING "Emergency Thaw on %s\n",
569c2d75438SEric Sandeen 		       bdevname(sb->s_bdev, b));
570c2d75438SEric Sandeen }
57101a05b33SAl Viro 
57201a05b33SAl Viro static void do_thaw_all(struct work_struct *work)
57301a05b33SAl Viro {
57401a05b33SAl Viro 	iterate_supers(do_thaw_one, NULL);
575053c525fSJens Axboe 	kfree(work);
576c2d75438SEric Sandeen 	printk(KERN_WARNING "Emergency Thaw complete\n");
577c2d75438SEric Sandeen }
578c2d75438SEric Sandeen 
579c2d75438SEric Sandeen /**
580c2d75438SEric Sandeen  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581c2d75438SEric Sandeen  *
582c2d75438SEric Sandeen  * Used for emergency unfreeze of all filesystems via SysRq
583c2d75438SEric Sandeen  */
584c2d75438SEric Sandeen void emergency_thaw_all(void)
585c2d75438SEric Sandeen {
586053c525fSJens Axboe 	struct work_struct *work;
587053c525fSJens Axboe 
588053c525fSJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
589053c525fSJens Axboe 	if (work) {
590053c525fSJens Axboe 		INIT_WORK(work, do_thaw_all);
591053c525fSJens Axboe 		schedule_work(work);
592053c525fSJens Axboe 	}
593c2d75438SEric Sandeen }
594c2d75438SEric Sandeen 
5951da177e4SLinus Torvalds /**
59678a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
59767be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5981da177e4SLinus Torvalds  *
5991da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6001da177e4SLinus Torvalds  * that I/O.
6011da177e4SLinus Torvalds  *
60267be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
60367be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
60467be2dd1SMartin Waitz  * a successful fsync().
6051da177e4SLinus Torvalds  */
6061da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6071da177e4SLinus Torvalds {
6081da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6091da177e4SLinus Torvalds 
6101da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6111da177e4SLinus Torvalds 		return 0;
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6141da177e4SLinus Torvalds 					&mapping->private_list);
6151da177e4SLinus Torvalds }
6161da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6171da177e4SLinus Torvalds 
6181da177e4SLinus Torvalds /*
6191da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6201da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6211da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6221da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6231da177e4SLinus Torvalds  */
6241da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6251da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6261da177e4SLinus Torvalds {
6271da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6281da177e4SLinus Torvalds 	if (bh) {
6291da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6301da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6311da177e4SLinus Torvalds 		put_bh(bh);
6321da177e4SLinus Torvalds 	}
6331da177e4SLinus Torvalds }
6341da177e4SLinus Torvalds 
6351da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6361da177e4SLinus Torvalds {
6371da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6381da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6391da177e4SLinus Torvalds 
6401da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6411da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6421da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6431da177e4SLinus Torvalds 	} else {
644e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6451da177e4SLinus Torvalds 	}
646535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6471da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6481da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6491da177e4SLinus Torvalds 				&mapping->private_list);
65058ff407bSJan Kara 		bh->b_assoc_map = mapping;
6511da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6521da177e4SLinus Torvalds 	}
6531da177e4SLinus Torvalds }
6541da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6551da177e4SLinus Torvalds 
6561da177e4SLinus Torvalds /*
657787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658787d2214SNick Piggin  * dirty.
659787d2214SNick Piggin  *
660787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
661787d2214SNick Piggin  * not been truncated.
662787d2214SNick Piggin  */
663a8e7d49aSLinus Torvalds static void __set_page_dirty(struct page *page,
664787d2214SNick Piggin 		struct address_space *mapping, int warn)
665787d2214SNick Piggin {
66619fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
667787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
668787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
669e3a7cca1SEdward Shishkin 		account_page_dirtied(page, mapping);
670787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
671787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
672787d2214SNick Piggin 	}
67319fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
674787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675787d2214SNick Piggin }
676787d2214SNick Piggin 
677787d2214SNick Piggin /*
6781da177e4SLinus Torvalds  * Add a page to the dirty page list.
6791da177e4SLinus Torvalds  *
6801da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6811da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6821da177e4SLinus Torvalds  *
6831da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6841da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6851da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6861da177e4SLinus Torvalds  * dirty.
6871da177e4SLinus Torvalds  *
6881da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6891da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6901da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6911da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6921da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6931da177e4SLinus Torvalds  * page on the dirty page list.
6941da177e4SLinus Torvalds  *
6951da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6961da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6971da177e4SLinus Torvalds  * added to the page after it was set dirty.
6981da177e4SLinus Torvalds  *
6991da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7001da177e4SLinus Torvalds  * address_space though.
7011da177e4SLinus Torvalds  */
7021da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7031da177e4SLinus Torvalds {
704a8e7d49aSLinus Torvalds 	int newly_dirty;
705787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
706ebf7a227SNick Piggin 
707ebf7a227SNick Piggin 	if (unlikely(!mapping))
708ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7111da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7121da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7131da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds 		do {
7161da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7171da177e4SLinus Torvalds 			bh = bh->b_this_page;
7181da177e4SLinus Torvalds 		} while (bh != head);
7191da177e4SLinus Torvalds 	}
720a8e7d49aSLinus Torvalds 	newly_dirty = !TestSetPageDirty(page);
7211da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7221da177e4SLinus Torvalds 
723a8e7d49aSLinus Torvalds 	if (newly_dirty)
724a8e7d49aSLinus Torvalds 		__set_page_dirty(page, mapping, 1);
725a8e7d49aSLinus Torvalds 	return newly_dirty;
7261da177e4SLinus Torvalds }
7271da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7281da177e4SLinus Torvalds 
7291da177e4SLinus Torvalds /*
7301da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7311da177e4SLinus Torvalds  *
7321da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7331da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7341da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7351da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7361da177e4SLinus Torvalds  *
7371da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7381da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7391da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7401da177e4SLinus Torvalds  *
7411da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7421da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7431da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7441da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7451da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7461da177e4SLinus Torvalds  * any newly dirty buffers for write.
7471da177e4SLinus Torvalds  */
7481da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7491da177e4SLinus Torvalds {
7501da177e4SLinus Torvalds 	struct buffer_head *bh;
7511da177e4SLinus Torvalds 	struct list_head tmp;
7529cf6b720SJens Axboe 	struct address_space *mapping, *prev_mapping = NULL;
7531da177e4SLinus Torvalds 	int err = 0, err2;
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7561da177e4SLinus Torvalds 
7571da177e4SLinus Torvalds 	spin_lock(lock);
7581da177e4SLinus Torvalds 	while (!list_empty(list)) {
7591da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
760535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
76158ff407bSJan Kara 		__remove_assoc_queue(bh);
762535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
763535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
764535ee2fbSJan Kara 		smp_mb();
7651da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7661da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
767535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7681da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7691da177e4SLinus Torvalds 				get_bh(bh);
7701da177e4SLinus Torvalds 				spin_unlock(lock);
7711da177e4SLinus Torvalds 				/*
7721da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7731da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
7741da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
7751da177e4SLinus Torvalds 				 * flight on potentially older contents.
7761da177e4SLinus Torvalds 				 */
7779cf6b720SJens Axboe 				ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
7789cf6b720SJens Axboe 
7799cf6b720SJens Axboe 				/*
7809cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
7819cf6b720SJens Axboe 				 * that we will not run the very last mapping,
7829cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
7839cf6b720SJens Axboe 				 * through sync_buffer().
7849cf6b720SJens Axboe 				 */
7859cf6b720SJens Axboe 				if (prev_mapping && prev_mapping != mapping)
7869cf6b720SJens Axboe 					blk_run_address_space(prev_mapping);
7879cf6b720SJens Axboe 				prev_mapping = mapping;
7889cf6b720SJens Axboe 
7891da177e4SLinus Torvalds 				brelse(bh);
7901da177e4SLinus Torvalds 				spin_lock(lock);
7911da177e4SLinus Torvalds 			}
7921da177e4SLinus Torvalds 		}
7931da177e4SLinus Torvalds 	}
7941da177e4SLinus Torvalds 
7951da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7961da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
7971da177e4SLinus Torvalds 		get_bh(bh);
798535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
799535ee2fbSJan Kara 		__remove_assoc_queue(bh);
800535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
801535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
802535ee2fbSJan Kara 		smp_mb();
803535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
804535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
805e3892296SJan Kara 				 &mapping->private_list);
806535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
807535ee2fbSJan Kara 		}
8081da177e4SLinus Torvalds 		spin_unlock(lock);
8091da177e4SLinus Torvalds 		wait_on_buffer(bh);
8101da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8111da177e4SLinus Torvalds 			err = -EIO;
8121da177e4SLinus Torvalds 		brelse(bh);
8131da177e4SLinus Torvalds 		spin_lock(lock);
8141da177e4SLinus Torvalds 	}
8151da177e4SLinus Torvalds 
8161da177e4SLinus Torvalds 	spin_unlock(lock);
8171da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8181da177e4SLinus Torvalds 	if (err)
8191da177e4SLinus Torvalds 		return err;
8201da177e4SLinus Torvalds 	else
8211da177e4SLinus Torvalds 		return err2;
8221da177e4SLinus Torvalds }
8231da177e4SLinus Torvalds 
8241da177e4SLinus Torvalds /*
8251da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8261da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8271da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8281da177e4SLinus Torvalds  *
8291da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8301da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8311da177e4SLinus Torvalds  * for reiserfs.
8321da177e4SLinus Torvalds  */
8331da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8341da177e4SLinus Torvalds {
8351da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8361da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8371da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8381da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8391da177e4SLinus Torvalds 
8401da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8411da177e4SLinus Torvalds 		while (!list_empty(list))
8421da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8431da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8441da177e4SLinus Torvalds 	}
8451da177e4SLinus Torvalds }
84652b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds /*
8491da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8501da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8511da177e4SLinus Torvalds  *
8521da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8531da177e4SLinus Torvalds  */
8541da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8551da177e4SLinus Torvalds {
8561da177e4SLinus Torvalds 	int ret = 1;
8571da177e4SLinus Torvalds 
8581da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8591da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8601da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8611da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8621da177e4SLinus Torvalds 
8631da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8641da177e4SLinus Torvalds 		while (!list_empty(list)) {
8651da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8661da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8671da177e4SLinus Torvalds 				ret = 0;
8681da177e4SLinus Torvalds 				break;
8691da177e4SLinus Torvalds 			}
8701da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8711da177e4SLinus Torvalds 		}
8721da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8731da177e4SLinus Torvalds 	}
8741da177e4SLinus Torvalds 	return ret;
8751da177e4SLinus Torvalds }
8761da177e4SLinus Torvalds 
8771da177e4SLinus Torvalds /*
8781da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8791da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8801da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8811da177e4SLinus Torvalds  * buffers.
8821da177e4SLinus Torvalds  *
8831da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8841da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8851da177e4SLinus Torvalds  */
8861da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
8871da177e4SLinus Torvalds 		int retry)
8881da177e4SLinus Torvalds {
8891da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
8901da177e4SLinus Torvalds 	long offset;
8911da177e4SLinus Torvalds 
8921da177e4SLinus Torvalds try_again:
8931da177e4SLinus Torvalds 	head = NULL;
8941da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8951da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
8961da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
8971da177e4SLinus Torvalds 		if (!bh)
8981da177e4SLinus Torvalds 			goto no_grow;
8991da177e4SLinus Torvalds 
9001da177e4SLinus Torvalds 		bh->b_bdev = NULL;
9011da177e4SLinus Torvalds 		bh->b_this_page = head;
9021da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9031da177e4SLinus Torvalds 		head = bh;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 		bh->b_state = 0;
9061da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
907fc5cd582SChris Mason 		bh->b_private = NULL;
9081da177e4SLinus Torvalds 		bh->b_size = size;
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds 		/* Link the buffer to its page */
9111da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9121da177e4SLinus Torvalds 
91301ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9141da177e4SLinus Torvalds 	}
9151da177e4SLinus Torvalds 	return head;
9161da177e4SLinus Torvalds /*
9171da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9181da177e4SLinus Torvalds  */
9191da177e4SLinus Torvalds no_grow:
9201da177e4SLinus Torvalds 	if (head) {
9211da177e4SLinus Torvalds 		do {
9221da177e4SLinus Torvalds 			bh = head;
9231da177e4SLinus Torvalds 			head = head->b_this_page;
9241da177e4SLinus Torvalds 			free_buffer_head(bh);
9251da177e4SLinus Torvalds 		} while (head);
9261da177e4SLinus Torvalds 	}
9271da177e4SLinus Torvalds 
9281da177e4SLinus Torvalds 	/*
9291da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9301da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9311da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9321da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9331da177e4SLinus Torvalds 	 */
9341da177e4SLinus Torvalds 	if (!retry)
9351da177e4SLinus Torvalds 		return NULL;
9361da177e4SLinus Torvalds 
9371da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9381da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9391da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9401da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9411da177e4SLinus Torvalds 	 * async buffer heads in use.
9421da177e4SLinus Torvalds 	 */
9431da177e4SLinus Torvalds 	free_more_memory();
9441da177e4SLinus Torvalds 	goto try_again;
9451da177e4SLinus Torvalds }
9461da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9471da177e4SLinus Torvalds 
9481da177e4SLinus Torvalds static inline void
9491da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9501da177e4SLinus Torvalds {
9511da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds 	bh = head;
9541da177e4SLinus Torvalds 	do {
9551da177e4SLinus Torvalds 		tail = bh;
9561da177e4SLinus Torvalds 		bh = bh->b_this_page;
9571da177e4SLinus Torvalds 	} while (bh);
9581da177e4SLinus Torvalds 	tail->b_this_page = head;
9591da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9601da177e4SLinus Torvalds }
9611da177e4SLinus Torvalds 
9621da177e4SLinus Torvalds /*
9631da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9641da177e4SLinus Torvalds  */
9651da177e4SLinus Torvalds static void
9661da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9671da177e4SLinus Torvalds 			sector_t block, int size)
9681da177e4SLinus Torvalds {
9691da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9701da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9711da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds 	do {
9741da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9751da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9761da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9771da177e4SLinus Torvalds 			bh->b_blocknr = block;
9781da177e4SLinus Torvalds 			if (uptodate)
9791da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9801da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9811da177e4SLinus Torvalds 		}
9821da177e4SLinus Torvalds 		block++;
9831da177e4SLinus Torvalds 		bh = bh->b_this_page;
9841da177e4SLinus Torvalds 	} while (bh != head);
9851da177e4SLinus Torvalds }
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds /*
9881da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9891da177e4SLinus Torvalds  *
9901da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
9911da177e4SLinus Torvalds  */
9921da177e4SLinus Torvalds static struct page *
9931da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9941da177e4SLinus Torvalds 		pgoff_t index, int size)
9951da177e4SLinus Torvalds {
9961da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9971da177e4SLinus Torvalds 	struct page *page;
9981da177e4SLinus Torvalds 	struct buffer_head *bh;
9991da177e4SLinus Torvalds 
1000ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
1001769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
10021da177e4SLinus Torvalds 	if (!page)
10031da177e4SLinus Torvalds 		return NULL;
10041da177e4SLinus Torvalds 
1005e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
10061da177e4SLinus Torvalds 
10071da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
10081da177e4SLinus Torvalds 		bh = page_buffers(page);
10091da177e4SLinus Torvalds 		if (bh->b_size == size) {
10101da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
10111da177e4SLinus Torvalds 			return page;
10121da177e4SLinus Torvalds 		}
10131da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10141da177e4SLinus Torvalds 			goto failed;
10151da177e4SLinus Torvalds 	}
10161da177e4SLinus Torvalds 
10171da177e4SLinus Torvalds 	/*
10181da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10191da177e4SLinus Torvalds 	 */
10201da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10211da177e4SLinus Torvalds 	if (!bh)
10221da177e4SLinus Torvalds 		goto failed;
10231da177e4SLinus Torvalds 
10241da177e4SLinus Torvalds 	/*
10251da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10261da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10271da177e4SLinus Torvalds 	 * run under the page lock.
10281da177e4SLinus Torvalds 	 */
10291da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10301da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10311da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10321da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10331da177e4SLinus Torvalds 	return page;
10341da177e4SLinus Torvalds 
10351da177e4SLinus Torvalds failed:
10361da177e4SLinus Torvalds 	BUG();
10371da177e4SLinus Torvalds 	unlock_page(page);
10381da177e4SLinus Torvalds 	page_cache_release(page);
10391da177e4SLinus Torvalds 	return NULL;
10401da177e4SLinus Torvalds }
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds /*
10431da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10441da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10451da177e4SLinus Torvalds  */
1046858119e1SArjan van de Ven static int
10471da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10481da177e4SLinus Torvalds {
10491da177e4SLinus Torvalds 	struct page *page;
10501da177e4SLinus Torvalds 	pgoff_t index;
10511da177e4SLinus Torvalds 	int sizebits;
10521da177e4SLinus Torvalds 
10531da177e4SLinus Torvalds 	sizebits = -1;
10541da177e4SLinus Torvalds 	do {
10551da177e4SLinus Torvalds 		sizebits++;
10561da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds 	index = block >> sizebits;
10591da177e4SLinus Torvalds 
1060e5657933SAndrew Morton 	/*
1061e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1062e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1063e5657933SAndrew Morton 	 */
1064e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1065e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1066e5657933SAndrew Morton 
1067e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1068e5657933SAndrew Morton 			"device %s\n",
10698e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1070e5657933SAndrew Morton 			bdevname(bdev, b));
1071e5657933SAndrew Morton 		return -EIO;
1072e5657933SAndrew Morton 	}
1073e5657933SAndrew Morton 	block = index << sizebits;
10741da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10751da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10761da177e4SLinus Torvalds 	if (!page)
10771da177e4SLinus Torvalds 		return 0;
10781da177e4SLinus Torvalds 	unlock_page(page);
10791da177e4SLinus Torvalds 	page_cache_release(page);
10801da177e4SLinus Torvalds 	return 1;
10811da177e4SLinus Torvalds }
10821da177e4SLinus Torvalds 
108375c96f85SAdrian Bunk static struct buffer_head *
10841da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
10851da177e4SLinus Torvalds {
10861da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1087e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
10881da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10891da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10901da177e4SLinus Torvalds 					size);
1091e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1092e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
10931da177e4SLinus Torvalds 
10941da177e4SLinus Torvalds 		dump_stack();
10951da177e4SLinus Torvalds 		return NULL;
10961da177e4SLinus Torvalds 	}
10971da177e4SLinus Torvalds 
10981da177e4SLinus Torvalds 	for (;;) {
10991da177e4SLinus Torvalds 		struct buffer_head * bh;
1100e5657933SAndrew Morton 		int ret;
11011da177e4SLinus Torvalds 
11021da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11031da177e4SLinus Torvalds 		if (bh)
11041da177e4SLinus Torvalds 			return bh;
11051da177e4SLinus Torvalds 
1106e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1107e5657933SAndrew Morton 		if (ret < 0)
1108e5657933SAndrew Morton 			return NULL;
1109e5657933SAndrew Morton 		if (ret == 0)
11101da177e4SLinus Torvalds 			free_more_memory();
11111da177e4SLinus Torvalds 	}
11121da177e4SLinus Torvalds }
11131da177e4SLinus Torvalds 
11141da177e4SLinus Torvalds /*
11151da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11161da177e4SLinus Torvalds  *
11171da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11181da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11191da177e4SLinus Torvalds  *
11201da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11211da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11221da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11231da177e4SLinus Torvalds  *
11241da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11251da177e4SLinus Torvalds  * (if the page has buffers).
11261da177e4SLinus Torvalds  *
11271da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11281da177e4SLinus Torvalds  * buffers are not.
11291da177e4SLinus Torvalds  *
11301da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11311da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11321da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11331da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11341da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11351da177e4SLinus Torvalds  */
11361da177e4SLinus Torvalds 
11371da177e4SLinus Torvalds /**
11381da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
113967be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11401da177e4SLinus Torvalds  *
11411da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11421da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11431da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11441da177e4SLinus Torvalds  * inode list.
11451da177e4SLinus Torvalds  *
11461da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11471da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11481da177e4SLinus Torvalds  */
1149fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11501da177e4SLinus Torvalds {
1151787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11521be62dc1SLinus Torvalds 
11531be62dc1SLinus Torvalds 	/*
11541be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11551be62dc1SLinus Torvalds 	 *
11561be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11571be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11581be62dc1SLinus Torvalds 	 */
11591be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11601be62dc1SLinus Torvalds 		smp_mb();
11611be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11621be62dc1SLinus Torvalds 			return;
11631be62dc1SLinus Torvalds 	}
11641be62dc1SLinus Torvalds 
1165a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1166a8e7d49aSLinus Torvalds 		struct page *page = bh->b_page;
11678e9d78edSLinus Torvalds 		if (!TestSetPageDirty(page)) {
11688e9d78edSLinus Torvalds 			struct address_space *mapping = page_mapping(page);
11698e9d78edSLinus Torvalds 			if (mapping)
11708e9d78edSLinus Torvalds 				__set_page_dirty(page, mapping, 0);
11718e9d78edSLinus Torvalds 		}
1172a8e7d49aSLinus Torvalds 	}
11731da177e4SLinus Torvalds }
11741fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
11751da177e4SLinus Torvalds 
11761da177e4SLinus Torvalds /*
11771da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11781da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11791da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11801da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11811da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11821da177e4SLinus Torvalds  */
11831da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11841da177e4SLinus Torvalds {
11851da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11861da177e4SLinus Torvalds 		put_bh(buf);
11871da177e4SLinus Torvalds 		return;
11881da177e4SLinus Torvalds 	}
11895c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11901da177e4SLinus Torvalds }
11911fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds /*
11941da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11951da177e4SLinus Torvalds  * potentially dirty data.
11961da177e4SLinus Torvalds  */
11971da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11981da177e4SLinus Torvalds {
11991da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1200535ee2fbSJan Kara 	if (bh->b_assoc_map) {
12011da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
12021da177e4SLinus Torvalds 
12031da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12041da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
120558ff407bSJan Kara 		bh->b_assoc_map = NULL;
12061da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12071da177e4SLinus Torvalds 	}
12081da177e4SLinus Torvalds 	__brelse(bh);
12091da177e4SLinus Torvalds }
12101fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12111da177e4SLinus Torvalds 
12121da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12131da177e4SLinus Torvalds {
12141da177e4SLinus Torvalds 	lock_buffer(bh);
12151da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12161da177e4SLinus Torvalds 		unlock_buffer(bh);
12171da177e4SLinus Torvalds 		return bh;
12181da177e4SLinus Torvalds 	} else {
12191da177e4SLinus Torvalds 		get_bh(bh);
12201da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12211da177e4SLinus Torvalds 		submit_bh(READ, bh);
12221da177e4SLinus Torvalds 		wait_on_buffer(bh);
12231da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12241da177e4SLinus Torvalds 			return bh;
12251da177e4SLinus Torvalds 	}
12261da177e4SLinus Torvalds 	brelse(bh);
12271da177e4SLinus Torvalds 	return NULL;
12281da177e4SLinus Torvalds }
12291da177e4SLinus Torvalds 
12301da177e4SLinus Torvalds /*
12311da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12321da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12331da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12341da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12351da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12361da177e4SLinus Torvalds  *
12371da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12381da177e4SLinus Torvalds  * sb_find_get_block().
12391da177e4SLinus Torvalds  *
12401da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12411da177e4SLinus Torvalds  * a local interrupt disable for that.
12421da177e4SLinus Torvalds  */
12431da177e4SLinus Torvalds 
12441da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12451da177e4SLinus Torvalds 
12461da177e4SLinus Torvalds struct bh_lru {
12471da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12481da177e4SLinus Torvalds };
12491da177e4SLinus Torvalds 
12501da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12511da177e4SLinus Torvalds 
12521da177e4SLinus Torvalds #ifdef CONFIG_SMP
12531da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12541da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12551da177e4SLinus Torvalds #else
12561da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12571da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12581da177e4SLinus Torvalds #endif
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds static inline void check_irqs_on(void)
12611da177e4SLinus Torvalds {
12621da177e4SLinus Torvalds #ifdef irqs_disabled
12631da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12641da177e4SLinus Torvalds #endif
12651da177e4SLinus Torvalds }
12661da177e4SLinus Torvalds 
12671da177e4SLinus Torvalds /*
12681da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12691da177e4SLinus Torvalds  */
12701da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12711da177e4SLinus Torvalds {
12721da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12731da177e4SLinus Torvalds 	struct bh_lru *lru;
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds 	check_irqs_on();
12761da177e4SLinus Torvalds 	bh_lru_lock();
12771da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12781da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12791da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12801da177e4SLinus Torvalds 		int in;
12811da177e4SLinus Torvalds 		int out = 0;
12821da177e4SLinus Torvalds 
12831da177e4SLinus Torvalds 		get_bh(bh);
12841da177e4SLinus Torvalds 		bhs[out++] = bh;
12851da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12861da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12871da177e4SLinus Torvalds 
12881da177e4SLinus Torvalds 			if (bh2 == bh) {
12891da177e4SLinus Torvalds 				__brelse(bh2);
12901da177e4SLinus Torvalds 			} else {
12911da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12921da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12931da177e4SLinus Torvalds 					evictee = bh2;
12941da177e4SLinus Torvalds 				} else {
12951da177e4SLinus Torvalds 					bhs[out++] = bh2;
12961da177e4SLinus Torvalds 				}
12971da177e4SLinus Torvalds 			}
12981da177e4SLinus Torvalds 		}
12991da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
13001da177e4SLinus Torvalds 			bhs[out++] = NULL;
13011da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
13021da177e4SLinus Torvalds 	}
13031da177e4SLinus Torvalds 	bh_lru_unlock();
13041da177e4SLinus Torvalds 
13051da177e4SLinus Torvalds 	if (evictee)
13061da177e4SLinus Torvalds 		__brelse(evictee);
13071da177e4SLinus Torvalds }
13081da177e4SLinus Torvalds 
13091da177e4SLinus Torvalds /*
13101da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13111da177e4SLinus Torvalds  */
1312858119e1SArjan van de Ven static struct buffer_head *
13133991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13141da177e4SLinus Torvalds {
13151da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13161da177e4SLinus Torvalds 	struct bh_lru *lru;
13173991d3bdSTomasz Kvarsin 	unsigned int i;
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	check_irqs_on();
13201da177e4SLinus Torvalds 	bh_lru_lock();
13211da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13221da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13231da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
13261da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13271da177e4SLinus Torvalds 			if (i) {
13281da177e4SLinus Torvalds 				while (i) {
13291da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
13301da177e4SLinus Torvalds 					i--;
13311da177e4SLinus Torvalds 				}
13321da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13331da177e4SLinus Torvalds 			}
13341da177e4SLinus Torvalds 			get_bh(bh);
13351da177e4SLinus Torvalds 			ret = bh;
13361da177e4SLinus Torvalds 			break;
13371da177e4SLinus Torvalds 		}
13381da177e4SLinus Torvalds 	}
13391da177e4SLinus Torvalds 	bh_lru_unlock();
13401da177e4SLinus Torvalds 	return ret;
13411da177e4SLinus Torvalds }
13421da177e4SLinus Torvalds 
13431da177e4SLinus Torvalds /*
13441da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13451da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13461da177e4SLinus Torvalds  * NULL
13471da177e4SLinus Torvalds  */
13481da177e4SLinus Torvalds struct buffer_head *
13493991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13501da177e4SLinus Torvalds {
13511da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13521da177e4SLinus Torvalds 
13531da177e4SLinus Torvalds 	if (bh == NULL) {
1354385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13551da177e4SLinus Torvalds 		if (bh)
13561da177e4SLinus Torvalds 			bh_lru_install(bh);
13571da177e4SLinus Torvalds 	}
13581da177e4SLinus Torvalds 	if (bh)
13591da177e4SLinus Torvalds 		touch_buffer(bh);
13601da177e4SLinus Torvalds 	return bh;
13611da177e4SLinus Torvalds }
13621da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13631da177e4SLinus Torvalds 
13641da177e4SLinus Torvalds /*
13651da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13661da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13671da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13681da177e4SLinus Torvalds  *
13691da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13701da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13711da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13721da177e4SLinus Torvalds  *
13731da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13741da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13751da177e4SLinus Torvalds  */
13761da177e4SLinus Torvalds struct buffer_head *
13773991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13781da177e4SLinus Torvalds {
13791da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13801da177e4SLinus Torvalds 
13811da177e4SLinus Torvalds 	might_sleep();
13821da177e4SLinus Torvalds 	if (bh == NULL)
13831da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13841da177e4SLinus Torvalds 	return bh;
13851da177e4SLinus Torvalds }
13861da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds /*
13891da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13901da177e4SLinus Torvalds  */
13913991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13921da177e4SLinus Torvalds {
13931da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1394a3e713b5SAndrew Morton 	if (likely(bh)) {
13951da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13961da177e4SLinus Torvalds 		brelse(bh);
13971da177e4SLinus Torvalds 	}
1398a3e713b5SAndrew Morton }
13991da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds /**
14021da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
140367be2dd1SMartin Waitz  *  @bdev: the block_device to read from
14041da177e4SLinus Torvalds  *  @block: number of block
14051da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14061da177e4SLinus Torvalds  *
14071da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14081da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14091da177e4SLinus Torvalds  */
14101da177e4SLinus Torvalds struct buffer_head *
14113991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
14121da177e4SLinus Torvalds {
14131da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14141da177e4SLinus Torvalds 
1415a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14161da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14171da177e4SLinus Torvalds 	return bh;
14181da177e4SLinus Torvalds }
14191da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
14201da177e4SLinus Torvalds 
14211da177e4SLinus Torvalds /*
14221da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14231da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14241da177e4SLinus Torvalds  * or with preempt disabled.
14251da177e4SLinus Torvalds  */
14261da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14271da177e4SLinus Torvalds {
14281da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14291da177e4SLinus Torvalds 	int i;
14301da177e4SLinus Torvalds 
14311da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14321da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14331da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14341da177e4SLinus Torvalds 	}
14351da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14361da177e4SLinus Torvalds }
14371da177e4SLinus Torvalds 
1438f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14391da177e4SLinus Torvalds {
144015c8b6c1SJens Axboe 	on_each_cpu(invalidate_bh_lru, NULL, 1);
14411da177e4SLinus Torvalds }
14429db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14431da177e4SLinus Torvalds 
14441da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14451da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14461da177e4SLinus Torvalds {
14471da177e4SLinus Torvalds 	bh->b_page = page;
1448e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14491da177e4SLinus Torvalds 	if (PageHighMem(page))
14501da177e4SLinus Torvalds 		/*
14511da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14521da177e4SLinus Torvalds 		 */
14531da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14541da177e4SLinus Torvalds 	else
14551da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14561da177e4SLinus Torvalds }
14571da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14581da177e4SLinus Torvalds 
14591da177e4SLinus Torvalds /*
14601da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14611da177e4SLinus Torvalds  */
1462858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14631da177e4SLinus Torvalds {
14641da177e4SLinus Torvalds 	lock_buffer(bh);
14651da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14661da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14671da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14681da177e4SLinus Torvalds 	clear_buffer_req(bh);
14691da177e4SLinus Torvalds 	clear_buffer_new(bh);
14701da177e4SLinus Torvalds 	clear_buffer_delay(bh);
147133a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14721da177e4SLinus Torvalds 	unlock_buffer(bh);
14731da177e4SLinus Torvalds }
14741da177e4SLinus Torvalds 
14751da177e4SLinus Torvalds /**
14761da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14771da177e4SLinus Torvalds  *
14781da177e4SLinus Torvalds  * @page: the page which is affected
14791da177e4SLinus Torvalds  * @offset: the index of the truncation point
14801da177e4SLinus Torvalds  *
14811da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14821da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14831da177e4SLinus Torvalds  *
14841da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14851da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14861da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14871da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14881da177e4SLinus Torvalds  * blocks on-disk.
14891da177e4SLinus Torvalds  */
14902ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14911da177e4SLinus Torvalds {
14921da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14931da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14941da177e4SLinus Torvalds 
14951da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14961da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14971da177e4SLinus Torvalds 		goto out;
14981da177e4SLinus Torvalds 
14991da177e4SLinus Torvalds 	head = page_buffers(page);
15001da177e4SLinus Torvalds 	bh = head;
15011da177e4SLinus Torvalds 	do {
15021da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
15031da177e4SLinus Torvalds 		next = bh->b_this_page;
15041da177e4SLinus Torvalds 
15051da177e4SLinus Torvalds 		/*
15061da177e4SLinus Torvalds 		 * is this block fully invalidated?
15071da177e4SLinus Torvalds 		 */
15081da177e4SLinus Torvalds 		if (offset <= curr_off)
15091da177e4SLinus Torvalds 			discard_buffer(bh);
15101da177e4SLinus Torvalds 		curr_off = next_off;
15111da177e4SLinus Torvalds 		bh = next;
15121da177e4SLinus Torvalds 	} while (bh != head);
15131da177e4SLinus Torvalds 
15141da177e4SLinus Torvalds 	/*
15151da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
15161da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15171da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15181da177e4SLinus Torvalds 	 */
15191da177e4SLinus Torvalds 	if (offset == 0)
15202ff28e22SNeilBrown 		try_to_release_page(page, 0);
15211da177e4SLinus Torvalds out:
15222ff28e22SNeilBrown 	return;
15231da177e4SLinus Torvalds }
15241da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
15251da177e4SLinus Torvalds 
15261da177e4SLinus Torvalds /*
15271da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15281da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15291da177e4SLinus Torvalds  * is already excluded via the page lock.
15301da177e4SLinus Torvalds  */
15311da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15321da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15331da177e4SLinus Torvalds {
15341da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15351da177e4SLinus Torvalds 
15361da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15371da177e4SLinus Torvalds 	bh = head;
15381da177e4SLinus Torvalds 	do {
15391da177e4SLinus Torvalds 		bh->b_state |= b_state;
15401da177e4SLinus Torvalds 		tail = bh;
15411da177e4SLinus Torvalds 		bh = bh->b_this_page;
15421da177e4SLinus Torvalds 	} while (bh);
15431da177e4SLinus Torvalds 	tail->b_this_page = head;
15441da177e4SLinus Torvalds 
15451da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15461da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15471da177e4SLinus Torvalds 		bh = head;
15481da177e4SLinus Torvalds 		do {
15491da177e4SLinus Torvalds 			if (PageDirty(page))
15501da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15511da177e4SLinus Torvalds 			if (PageUptodate(page))
15521da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15531da177e4SLinus Torvalds 			bh = bh->b_this_page;
15541da177e4SLinus Torvalds 		} while (bh != head);
15551da177e4SLinus Torvalds 	}
15561da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15571da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15581da177e4SLinus Torvalds }
15591da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15601da177e4SLinus Torvalds 
15611da177e4SLinus Torvalds /*
15621da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15631da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15641da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15651da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15661da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15671da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15681da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15691da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15701da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15711da177e4SLinus Torvalds  *
15721da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15731da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15741da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15751da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15761da177e4SLinus Torvalds  */
15771da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15781da177e4SLinus Torvalds {
15791da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15801da177e4SLinus Torvalds 
15811da177e4SLinus Torvalds 	might_sleep();
15821da177e4SLinus Torvalds 
1583385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15841da177e4SLinus Torvalds 	if (old_bh) {
15851da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15861da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15871da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15881da177e4SLinus Torvalds 		__brelse(old_bh);
15891da177e4SLinus Torvalds 	}
15901da177e4SLinus Torvalds }
15911da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds /*
15941da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15951da177e4SLinus Torvalds  *
15961da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15971da177e4SLinus Torvalds  *
15981da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15991da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
16001da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
16011da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
16021da177e4SLinus Torvalds  *
16031da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
16041da177e4SLinus Torvalds  */
16051da177e4SLinus Torvalds 
16061da177e4SLinus Torvalds /*
16071da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16081da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16091da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
16101da177e4SLinus Torvalds  * state inside lock_buffer().
16111da177e4SLinus Torvalds  *
16121da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
16131da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
16141da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
16151da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
16161da177e4SLinus Torvalds  * prevents this contention from occurring.
16176e34eeddSTheodore Ts'o  *
16186e34eeddSTheodore Ts'o  * If block_write_full_page() is called with wbc->sync_mode ==
16196e34eeddSTheodore Ts'o  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
16206e34eeddSTheodore Ts'o  * causes the writes to be flagged as synchronous writes, but the
16216e34eeddSTheodore Ts'o  * block device queue will NOT be unplugged, since usually many pages
16226e34eeddSTheodore Ts'o  * will be pushed to the out before the higher-level caller actually
16236e34eeddSTheodore Ts'o  * waits for the writes to be completed.  The various wait functions,
16246e34eeddSTheodore Ts'o  * such as wait_on_writeback_range() will ultimately call sync_page()
16256e34eeddSTheodore Ts'o  * which will ultimately call blk_run_backing_dev(), which will end up
16266e34eeddSTheodore Ts'o  * unplugging the device queue.
16271da177e4SLinus Torvalds  */
16281da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
162935c80d5fSChris Mason 			get_block_t *get_block, struct writeback_control *wbc,
163035c80d5fSChris Mason 			bh_end_io_t *handler)
16311da177e4SLinus Torvalds {
16321da177e4SLinus Torvalds 	int err;
16331da177e4SLinus Torvalds 	sector_t block;
16341da177e4SLinus Torvalds 	sector_t last_block;
1635f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1636b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
16371da177e4SLinus Torvalds 	int nr_underway = 0;
16386e34eeddSTheodore Ts'o 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
16396e34eeddSTheodore Ts'o 			WRITE_SYNC_PLUG : WRITE);
16401da177e4SLinus Torvalds 
16411da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16421da177e4SLinus Torvalds 
16431da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16441da177e4SLinus Torvalds 
16451da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1646b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16471da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16481da177e4SLinus Torvalds 	}
16491da177e4SLinus Torvalds 
16501da177e4SLinus Torvalds 	/*
16511da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16521da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16531da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16541da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16551da177e4SLinus Torvalds 	 *
16561da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16571da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16581da177e4SLinus Torvalds 	 */
16591da177e4SLinus Torvalds 
166054b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16611da177e4SLinus Torvalds 	head = page_buffers(page);
16621da177e4SLinus Torvalds 	bh = head;
16631da177e4SLinus Torvalds 
16641da177e4SLinus Torvalds 	/*
16651da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16661da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16671da177e4SLinus Torvalds 	 */
16681da177e4SLinus Torvalds 	do {
16691da177e4SLinus Torvalds 		if (block > last_block) {
16701da177e4SLinus Torvalds 			/*
16711da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16721da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16731da177e4SLinus Torvalds 			 * truncate in progress.
16741da177e4SLinus Torvalds 			 */
16751da177e4SLinus Torvalds 			/*
16761da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16771da177e4SLinus Torvalds 			 */
16781da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16791da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
168029a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
168129a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1682b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16831da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16841da177e4SLinus Torvalds 			if (err)
16851da177e4SLinus Torvalds 				goto recover;
168629a814d2SAlex Tomas 			clear_buffer_delay(bh);
16871da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16881da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16891da177e4SLinus Torvalds 				clear_buffer_new(bh);
16901da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16911da177e4SLinus Torvalds 							bh->b_blocknr);
16921da177e4SLinus Torvalds 			}
16931da177e4SLinus Torvalds 		}
16941da177e4SLinus Torvalds 		bh = bh->b_this_page;
16951da177e4SLinus Torvalds 		block++;
16961da177e4SLinus Torvalds 	} while (bh != head);
16971da177e4SLinus Torvalds 
16981da177e4SLinus Torvalds 	do {
16991da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
17001da177e4SLinus Torvalds 			continue;
17011da177e4SLinus Torvalds 		/*
17021da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
17031da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
17045b0830cbSJens Axboe 		 * potentially cause a busy-wait loop from writeback threads
17055b0830cbSJens Axboe 		 * and kswapd activity, but those code paths have their own
17065b0830cbSJens Axboe 		 * higher-level throttling.
17071da177e4SLinus Torvalds 		 */
17081da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
17091da177e4SLinus Torvalds 			lock_buffer(bh);
1710ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
17111da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
17121da177e4SLinus Torvalds 			continue;
17131da177e4SLinus Torvalds 		}
17141da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
171535c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
17161da177e4SLinus Torvalds 		} else {
17171da177e4SLinus Torvalds 			unlock_buffer(bh);
17181da177e4SLinus Torvalds 		}
17191da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds 	/*
17221da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17231da177e4SLinus Torvalds 	 * drop the bh refcounts early.
17241da177e4SLinus Torvalds 	 */
17251da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17261da177e4SLinus Torvalds 	set_page_writeback(page);
17271da177e4SLinus Torvalds 
17281da177e4SLinus Torvalds 	do {
17291da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17301da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
1731a64c8610STheodore Ts'o 			submit_bh(write_op, bh);
17321da177e4SLinus Torvalds 			nr_underway++;
1733ad576e63SNick Piggin 		}
17341da177e4SLinus Torvalds 		bh = next;
17351da177e4SLinus Torvalds 	} while (bh != head);
173605937baaSAndrew Morton 	unlock_page(page);
17371da177e4SLinus Torvalds 
17381da177e4SLinus Torvalds 	err = 0;
17391da177e4SLinus Torvalds done:
17401da177e4SLinus Torvalds 	if (nr_underway == 0) {
17411da177e4SLinus Torvalds 		/*
17421da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17431da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17441da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17451da177e4SLinus Torvalds 		 */
17461da177e4SLinus Torvalds 		end_page_writeback(page);
17473d67f2d7SNick Piggin 
17481da177e4SLinus Torvalds 		/*
17491da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17501da177e4SLinus Torvalds 		 * here on.
17511da177e4SLinus Torvalds 		 */
17521da177e4SLinus Torvalds 	}
17531da177e4SLinus Torvalds 	return err;
17541da177e4SLinus Torvalds 
17551da177e4SLinus Torvalds recover:
17561da177e4SLinus Torvalds 	/*
17571da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17581da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17591da177e4SLinus Torvalds 	 * exposing stale data.
17601da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17611da177e4SLinus Torvalds 	 */
17621da177e4SLinus Torvalds 	bh = head;
17631da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17641da177e4SLinus Torvalds 	do {
176529a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
176629a814d2SAlex Tomas 		    !buffer_delay(bh)) {
17671da177e4SLinus Torvalds 			lock_buffer(bh);
176835c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
17691da177e4SLinus Torvalds 		} else {
17701da177e4SLinus Torvalds 			/*
17711da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17721da177e4SLinus Torvalds 			 * attachment to a dirty page.
17731da177e4SLinus Torvalds 			 */
17741da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17751da177e4SLinus Torvalds 		}
17761da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17771da177e4SLinus Torvalds 	SetPageError(page);
17781da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17797e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17801da177e4SLinus Torvalds 	set_page_writeback(page);
17811da177e4SLinus Torvalds 	do {
17821da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17831da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17841da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
1785a64c8610STheodore Ts'o 			submit_bh(write_op, bh);
17861da177e4SLinus Torvalds 			nr_underway++;
1787ad576e63SNick Piggin 		}
17881da177e4SLinus Torvalds 		bh = next;
17891da177e4SLinus Torvalds 	} while (bh != head);
1790ffda9d30SNick Piggin 	unlock_page(page);
17911da177e4SLinus Torvalds 	goto done;
17921da177e4SLinus Torvalds }
17931da177e4SLinus Torvalds 
1794afddba49SNick Piggin /*
1795afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1796afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1797afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1798afddba49SNick Piggin  */
1799afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1800afddba49SNick Piggin {
1801afddba49SNick Piggin 	unsigned int block_start, block_end;
1802afddba49SNick Piggin 	struct buffer_head *head, *bh;
1803afddba49SNick Piggin 
1804afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1805afddba49SNick Piggin 	if (!page_has_buffers(page))
1806afddba49SNick Piggin 		return;
1807afddba49SNick Piggin 
1808afddba49SNick Piggin 	bh = head = page_buffers(page);
1809afddba49SNick Piggin 	block_start = 0;
1810afddba49SNick Piggin 	do {
1811afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1812afddba49SNick Piggin 
1813afddba49SNick Piggin 		if (buffer_new(bh)) {
1814afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1815afddba49SNick Piggin 				if (!PageUptodate(page)) {
1816afddba49SNick Piggin 					unsigned start, size;
1817afddba49SNick Piggin 
1818afddba49SNick Piggin 					start = max(from, block_start);
1819afddba49SNick Piggin 					size = min(to, block_end) - start;
1820afddba49SNick Piggin 
1821eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1822afddba49SNick Piggin 					set_buffer_uptodate(bh);
1823afddba49SNick Piggin 				}
1824afddba49SNick Piggin 
1825afddba49SNick Piggin 				clear_buffer_new(bh);
1826afddba49SNick Piggin 				mark_buffer_dirty(bh);
1827afddba49SNick Piggin 			}
1828afddba49SNick Piggin 		}
1829afddba49SNick Piggin 
1830afddba49SNick Piggin 		block_start = block_end;
1831afddba49SNick Piggin 		bh = bh->b_this_page;
1832afddba49SNick Piggin 	} while (bh != head);
1833afddba49SNick Piggin }
1834afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1835afddba49SNick Piggin 
18361da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
18371da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
18381da177e4SLinus Torvalds {
18391da177e4SLinus Torvalds 	unsigned block_start, block_end;
18401da177e4SLinus Torvalds 	sector_t block;
18411da177e4SLinus Torvalds 	int err = 0;
18421da177e4SLinus Torvalds 	unsigned blocksize, bbits;
18431da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
18441da177e4SLinus Torvalds 
18451da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
18461da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
18471da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
18481da177e4SLinus Torvalds 	BUG_ON(from > to);
18491da177e4SLinus Torvalds 
18501da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18511da177e4SLinus Torvalds 	if (!page_has_buffers(page))
18521da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
18531da177e4SLinus Torvalds 	head = page_buffers(page);
18541da177e4SLinus Torvalds 
18551da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
18561da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
18571da177e4SLinus Torvalds 
18581da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
18591da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
18601da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18611da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18621da177e4SLinus Torvalds 			if (PageUptodate(page)) {
18631da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
18641da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18651da177e4SLinus Torvalds 			}
18661da177e4SLinus Torvalds 			continue;
18671da177e4SLinus Torvalds 		}
18681da177e4SLinus Torvalds 		if (buffer_new(bh))
18691da177e4SLinus Torvalds 			clear_buffer_new(bh);
18701da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1871b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18721da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18731da177e4SLinus Torvalds 			if (err)
1874f3ddbdc6SNick Piggin 				break;
18751da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18761da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18771da177e4SLinus Torvalds 							bh->b_blocknr);
18781da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1879637aff46SNick Piggin 					clear_buffer_new(bh);
18801da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1881637aff46SNick Piggin 					mark_buffer_dirty(bh);
18821da177e4SLinus Torvalds 					continue;
18831da177e4SLinus Torvalds 				}
1884eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1885eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1886eebd2aa3SChristoph Lameter 						to, block_end,
1887eebd2aa3SChristoph Lameter 						block_start, from);
18881da177e4SLinus Torvalds 				continue;
18891da177e4SLinus Torvalds 			}
18901da177e4SLinus Torvalds 		}
18911da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18921da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18931da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18941da177e4SLinus Torvalds 			continue;
18951da177e4SLinus Torvalds 		}
18961da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
189733a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18981da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18991da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
19001da177e4SLinus Torvalds 			*wait_bh++=bh;
19011da177e4SLinus Torvalds 		}
19021da177e4SLinus Torvalds 	}
19031da177e4SLinus Torvalds 	/*
19041da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
19051da177e4SLinus Torvalds 	 */
19061da177e4SLinus Torvalds 	while(wait_bh > wait) {
19071da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
19081da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1909f3ddbdc6SNick Piggin 			err = -EIO;
19101da177e4SLinus Torvalds 	}
1911afddba49SNick Piggin 	if (unlikely(err))
1912afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
19131da177e4SLinus Torvalds 	return err;
19141da177e4SLinus Torvalds }
19151da177e4SLinus Torvalds 
19161da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
19171da177e4SLinus Torvalds 		unsigned from, unsigned to)
19181da177e4SLinus Torvalds {
19191da177e4SLinus Torvalds 	unsigned block_start, block_end;
19201da177e4SLinus Torvalds 	int partial = 0;
19211da177e4SLinus Torvalds 	unsigned blocksize;
19221da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
19231da177e4SLinus Torvalds 
19241da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19251da177e4SLinus Torvalds 
19261da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
19271da177e4SLinus Torvalds 	    bh != head || !block_start;
19281da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
19291da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19301da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19311da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19321da177e4SLinus Torvalds 				partial = 1;
19331da177e4SLinus Torvalds 		} else {
19341da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19351da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19361da177e4SLinus Torvalds 		}
1937afddba49SNick Piggin 		clear_buffer_new(bh);
19381da177e4SLinus Torvalds 	}
19391da177e4SLinus Torvalds 
19401da177e4SLinus Torvalds 	/*
19411da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19421da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19431da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19441da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19451da177e4SLinus Torvalds 	 */
19461da177e4SLinus Torvalds 	if (!partial)
19471da177e4SLinus Torvalds 		SetPageUptodate(page);
19481da177e4SLinus Torvalds 	return 0;
19491da177e4SLinus Torvalds }
19501da177e4SLinus Torvalds 
19511da177e4SLinus Torvalds /*
19527bb46a67Snpiggin@suse.de  * Filesystems implementing the new truncate sequence should use the
19537bb46a67Snpiggin@suse.de  * _newtrunc postfix variant which won't incorrectly call vmtruncate.
19547bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
1955afddba49SNick Piggin  */
19567bb46a67Snpiggin@suse.de int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
1957afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
1958afddba49SNick Piggin 			struct page **pagep, void **fsdata,
1959afddba49SNick Piggin 			get_block_t *get_block)
1960afddba49SNick Piggin {
1961afddba49SNick Piggin 	struct inode *inode = mapping->host;
1962afddba49SNick Piggin 	int status = 0;
1963afddba49SNick Piggin 	struct page *page;
1964afddba49SNick Piggin 	pgoff_t index;
1965afddba49SNick Piggin 	unsigned start, end;
1966afddba49SNick Piggin 	int ownpage = 0;
1967afddba49SNick Piggin 
1968afddba49SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
1969afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1970afddba49SNick Piggin 	end = start + len;
1971afddba49SNick Piggin 
1972afddba49SNick Piggin 	page = *pagep;
1973afddba49SNick Piggin 	if (page == NULL) {
1974afddba49SNick Piggin 		ownpage = 1;
197554566b2cSNick Piggin 		page = grab_cache_page_write_begin(mapping, index, flags);
1976afddba49SNick Piggin 		if (!page) {
1977afddba49SNick Piggin 			status = -ENOMEM;
1978afddba49SNick Piggin 			goto out;
1979afddba49SNick Piggin 		}
1980afddba49SNick Piggin 		*pagep = page;
1981afddba49SNick Piggin 	} else
1982afddba49SNick Piggin 		BUG_ON(!PageLocked(page));
1983afddba49SNick Piggin 
1984afddba49SNick Piggin 	status = __block_prepare_write(inode, page, start, end, get_block);
1985afddba49SNick Piggin 	if (unlikely(status)) {
1986afddba49SNick Piggin 		ClearPageUptodate(page);
1987afddba49SNick Piggin 
1988afddba49SNick Piggin 		if (ownpage) {
1989afddba49SNick Piggin 			unlock_page(page);
1990afddba49SNick Piggin 			page_cache_release(page);
1991afddba49SNick Piggin 			*pagep = NULL;
1992afddba49SNick Piggin 		}
1993afddba49SNick Piggin 	}
1994afddba49SNick Piggin 
1995afddba49SNick Piggin out:
1996afddba49SNick Piggin 	return status;
1997afddba49SNick Piggin }
19987bb46a67Snpiggin@suse.de EXPORT_SYMBOL(block_write_begin_newtrunc);
19997bb46a67Snpiggin@suse.de 
20007bb46a67Snpiggin@suse.de /*
20017bb46a67Snpiggin@suse.de  * block_write_begin takes care of the basic task of block allocation and
20027bb46a67Snpiggin@suse.de  * bringing partial write blocks uptodate first.
20037bb46a67Snpiggin@suse.de  *
20047bb46a67Snpiggin@suse.de  * If *pagep is not NULL, then block_write_begin uses the locked page
20057bb46a67Snpiggin@suse.de  * at *pagep rather than allocating its own. In this case, the page will
20067bb46a67Snpiggin@suse.de  * not be unlocked or deallocated on failure.
20077bb46a67Snpiggin@suse.de  */
20087bb46a67Snpiggin@suse.de int block_write_begin(struct file *file, struct address_space *mapping,
20097bb46a67Snpiggin@suse.de 			loff_t pos, unsigned len, unsigned flags,
20107bb46a67Snpiggin@suse.de 			struct page **pagep, void **fsdata,
20117bb46a67Snpiggin@suse.de 			get_block_t *get_block)
20127bb46a67Snpiggin@suse.de {
20137bb46a67Snpiggin@suse.de 	int ret;
20147bb46a67Snpiggin@suse.de 
20157bb46a67Snpiggin@suse.de 	ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
20167bb46a67Snpiggin@suse.de 					pagep, fsdata, get_block);
20177bb46a67Snpiggin@suse.de 
20187bb46a67Snpiggin@suse.de 	/*
20197bb46a67Snpiggin@suse.de 	 * prepare_write() may have instantiated a few blocks
20207bb46a67Snpiggin@suse.de 	 * outside i_size.  Trim these off again. Don't need
20217bb46a67Snpiggin@suse.de 	 * i_size_read because we hold i_mutex.
20227bb46a67Snpiggin@suse.de 	 *
20237bb46a67Snpiggin@suse.de 	 * Filesystems which pass down their own page also cannot
20247bb46a67Snpiggin@suse.de 	 * call into vmtruncate here because it would lead to lock
20257bb46a67Snpiggin@suse.de 	 * inversion problems (*pagep is locked). This is a further
20267bb46a67Snpiggin@suse.de 	 * example of where the old truncate sequence is inadequate.
20277bb46a67Snpiggin@suse.de 	 */
20287bb46a67Snpiggin@suse.de 	if (unlikely(ret) && *pagep == NULL) {
20297bb46a67Snpiggin@suse.de 		loff_t isize = mapping->host->i_size;
20307bb46a67Snpiggin@suse.de 		if (pos + len > isize)
20317bb46a67Snpiggin@suse.de 			vmtruncate(mapping->host, isize);
20327bb46a67Snpiggin@suse.de 	}
20337bb46a67Snpiggin@suse.de 
20347bb46a67Snpiggin@suse.de 	return ret;
20357bb46a67Snpiggin@suse.de }
2036afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2037afddba49SNick Piggin 
2038afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2039afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2040afddba49SNick Piggin 			struct page *page, void *fsdata)
2041afddba49SNick Piggin {
2042afddba49SNick Piggin 	struct inode *inode = mapping->host;
2043afddba49SNick Piggin 	unsigned start;
2044afddba49SNick Piggin 
2045afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
2046afddba49SNick Piggin 
2047afddba49SNick Piggin 	if (unlikely(copied < len)) {
2048afddba49SNick Piggin 		/*
2049afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
2050afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
2051afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
2052afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
2053afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
2054afddba49SNick Piggin 		 * destroy our partial write.
2055afddba49SNick Piggin 		 *
2056afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2057afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2058afddba49SNick Piggin 		 * caller to redo the whole thing.
2059afddba49SNick Piggin 		 */
2060afddba49SNick Piggin 		if (!PageUptodate(page))
2061afddba49SNick Piggin 			copied = 0;
2062afddba49SNick Piggin 
2063afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2064afddba49SNick Piggin 	}
2065afddba49SNick Piggin 	flush_dcache_page(page);
2066afddba49SNick Piggin 
2067afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2068afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2069afddba49SNick Piggin 
2070afddba49SNick Piggin 	return copied;
2071afddba49SNick Piggin }
2072afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2073afddba49SNick Piggin 
2074afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2075afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2076afddba49SNick Piggin 			struct page *page, void *fsdata)
2077afddba49SNick Piggin {
2078afddba49SNick Piggin 	struct inode *inode = mapping->host;
2079c7d206b3SJan Kara 	int i_size_changed = 0;
2080afddba49SNick Piggin 
2081afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2082afddba49SNick Piggin 
2083afddba49SNick Piggin 	/*
2084afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
2085afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
2086afddba49SNick Piggin 	 *
2087afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
2088afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
2089afddba49SNick Piggin 	 */
2090afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
2091afddba49SNick Piggin 		i_size_write(inode, pos+copied);
2092c7d206b3SJan Kara 		i_size_changed = 1;
2093afddba49SNick Piggin 	}
2094afddba49SNick Piggin 
2095afddba49SNick Piggin 	unlock_page(page);
2096afddba49SNick Piggin 	page_cache_release(page);
2097afddba49SNick Piggin 
2098c7d206b3SJan Kara 	/*
2099c7d206b3SJan Kara 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2100c7d206b3SJan Kara 	 * makes the holding time of page lock longer. Second, it forces lock
2101c7d206b3SJan Kara 	 * ordering of page lock and transaction start for journaling
2102c7d206b3SJan Kara 	 * filesystems.
2103c7d206b3SJan Kara 	 */
2104c7d206b3SJan Kara 	if (i_size_changed)
2105c7d206b3SJan Kara 		mark_inode_dirty(inode);
2106c7d206b3SJan Kara 
2107afddba49SNick Piggin 	return copied;
2108afddba49SNick Piggin }
2109afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2110afddba49SNick Piggin 
2111afddba49SNick Piggin /*
21128ab22b9aSHisashi Hifumi  * block_is_partially_uptodate checks whether buffers within a page are
21138ab22b9aSHisashi Hifumi  * uptodate or not.
21148ab22b9aSHisashi Hifumi  *
21158ab22b9aSHisashi Hifumi  * Returns true if all buffers which correspond to a file portion
21168ab22b9aSHisashi Hifumi  * we want to read are uptodate.
21178ab22b9aSHisashi Hifumi  */
21188ab22b9aSHisashi Hifumi int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
21198ab22b9aSHisashi Hifumi 					unsigned long from)
21208ab22b9aSHisashi Hifumi {
21218ab22b9aSHisashi Hifumi 	struct inode *inode = page->mapping->host;
21228ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
21238ab22b9aSHisashi Hifumi 	unsigned to;
21248ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
21258ab22b9aSHisashi Hifumi 	int ret = 1;
21268ab22b9aSHisashi Hifumi 
21278ab22b9aSHisashi Hifumi 	if (!page_has_buffers(page))
21288ab22b9aSHisashi Hifumi 		return 0;
21298ab22b9aSHisashi Hifumi 
21308ab22b9aSHisashi Hifumi 	blocksize = 1 << inode->i_blkbits;
21318ab22b9aSHisashi Hifumi 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
21328ab22b9aSHisashi Hifumi 	to = from + to;
21338ab22b9aSHisashi Hifumi 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
21348ab22b9aSHisashi Hifumi 		return 0;
21358ab22b9aSHisashi Hifumi 
21368ab22b9aSHisashi Hifumi 	head = page_buffers(page);
21378ab22b9aSHisashi Hifumi 	bh = head;
21388ab22b9aSHisashi Hifumi 	block_start = 0;
21398ab22b9aSHisashi Hifumi 	do {
21408ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
21418ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
21428ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
21438ab22b9aSHisashi Hifumi 				ret = 0;
21448ab22b9aSHisashi Hifumi 				break;
21458ab22b9aSHisashi Hifumi 			}
21468ab22b9aSHisashi Hifumi 			if (block_end >= to)
21478ab22b9aSHisashi Hifumi 				break;
21488ab22b9aSHisashi Hifumi 		}
21498ab22b9aSHisashi Hifumi 		block_start = block_end;
21508ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
21518ab22b9aSHisashi Hifumi 	} while (bh != head);
21528ab22b9aSHisashi Hifumi 
21538ab22b9aSHisashi Hifumi 	return ret;
21548ab22b9aSHisashi Hifumi }
21558ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
21568ab22b9aSHisashi Hifumi 
21578ab22b9aSHisashi Hifumi /*
21581da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
21591da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
21601da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
21611da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
21621da177e4SLinus Torvalds  * page struct once IO has completed.
21631da177e4SLinus Torvalds  */
21641da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
21651da177e4SLinus Torvalds {
21661da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21671da177e4SLinus Torvalds 	sector_t iblock, lblock;
21681da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
21691da177e4SLinus Torvalds 	unsigned int blocksize;
21701da177e4SLinus Torvalds 	int nr, i;
21711da177e4SLinus Torvalds 	int fully_mapped = 1;
21721da177e4SLinus Torvalds 
2173cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
21741da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
21751da177e4SLinus Torvalds 	if (!page_has_buffers(page))
21761da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
21771da177e4SLinus Torvalds 	head = page_buffers(page);
21781da177e4SLinus Torvalds 
21791da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
21801da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
21811da177e4SLinus Torvalds 	bh = head;
21821da177e4SLinus Torvalds 	nr = 0;
21831da177e4SLinus Torvalds 	i = 0;
21841da177e4SLinus Torvalds 
21851da177e4SLinus Torvalds 	do {
21861da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21871da177e4SLinus Torvalds 			continue;
21881da177e4SLinus Torvalds 
21891da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2190c64610baSAndrew Morton 			int err = 0;
2191c64610baSAndrew Morton 
21921da177e4SLinus Torvalds 			fully_mapped = 0;
21931da177e4SLinus Torvalds 			if (iblock < lblock) {
2194b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2195c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2196c64610baSAndrew Morton 				if (err)
21971da177e4SLinus Torvalds 					SetPageError(page);
21981da177e4SLinus Torvalds 			}
21991da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2200eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2201c64610baSAndrew Morton 				if (!err)
22021da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
22031da177e4SLinus Torvalds 				continue;
22041da177e4SLinus Torvalds 			}
22051da177e4SLinus Torvalds 			/*
22061da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
22071da177e4SLinus Torvalds 			 * synchronously
22081da177e4SLinus Torvalds 			 */
22091da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
22101da177e4SLinus Torvalds 				continue;
22111da177e4SLinus Torvalds 		}
22121da177e4SLinus Torvalds 		arr[nr++] = bh;
22131da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
22141da177e4SLinus Torvalds 
22151da177e4SLinus Torvalds 	if (fully_mapped)
22161da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds 	if (!nr) {
22191da177e4SLinus Torvalds 		/*
22201da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
22211da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
22221da177e4SLinus Torvalds 		 */
22231da177e4SLinus Torvalds 		if (!PageError(page))
22241da177e4SLinus Torvalds 			SetPageUptodate(page);
22251da177e4SLinus Torvalds 		unlock_page(page);
22261da177e4SLinus Torvalds 		return 0;
22271da177e4SLinus Torvalds 	}
22281da177e4SLinus Torvalds 
22291da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
22301da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
22311da177e4SLinus Torvalds 		bh = arr[i];
22321da177e4SLinus Torvalds 		lock_buffer(bh);
22331da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
22341da177e4SLinus Torvalds 	}
22351da177e4SLinus Torvalds 
22361da177e4SLinus Torvalds 	/*
22371da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
22381da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
22391da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
22401da177e4SLinus Torvalds 	 */
22411da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
22421da177e4SLinus Torvalds 		bh = arr[i];
22431da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
22441da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
22451da177e4SLinus Torvalds 		else
22461da177e4SLinus Torvalds 			submit_bh(READ, bh);
22471da177e4SLinus Torvalds 	}
22481da177e4SLinus Torvalds 	return 0;
22491da177e4SLinus Torvalds }
22501fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_read_full_page);
22511da177e4SLinus Torvalds 
22521da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
225389e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
22541da177e4SLinus Torvalds  * deal with the hole.
22551da177e4SLinus Torvalds  */
225689e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
22571da177e4SLinus Torvalds {
22581da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
22591da177e4SLinus Torvalds 	struct page *page;
226089e10787SNick Piggin 	void *fsdata;
22611da177e4SLinus Torvalds 	int err;
22621da177e4SLinus Torvalds 
2263c08d3b0eSnpiggin@suse.de 	err = inode_newsize_ok(inode, size);
2264c08d3b0eSnpiggin@suse.de 	if (err)
22651da177e4SLinus Torvalds 		goto out;
22661da177e4SLinus Torvalds 
226789e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
226889e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
226989e10787SNick Piggin 				&page, &fsdata);
227089e10787SNick Piggin 	if (err)
227105eb0b51SOGAWA Hirofumi 		goto out;
227205eb0b51SOGAWA Hirofumi 
227389e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
227489e10787SNick Piggin 	BUG_ON(err > 0);
227505eb0b51SOGAWA Hirofumi 
227605eb0b51SOGAWA Hirofumi out:
227705eb0b51SOGAWA Hirofumi 	return err;
227805eb0b51SOGAWA Hirofumi }
22791fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
228005eb0b51SOGAWA Hirofumi 
2281f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
228289e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
228305eb0b51SOGAWA Hirofumi {
228489e10787SNick Piggin 	struct inode *inode = mapping->host;
228589e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
228689e10787SNick Piggin 	struct page *page;
228789e10787SNick Piggin 	void *fsdata;
228889e10787SNick Piggin 	pgoff_t index, curidx;
228989e10787SNick Piggin 	loff_t curpos;
229089e10787SNick Piggin 	unsigned zerofrom, offset, len;
229189e10787SNick Piggin 	int err = 0;
229205eb0b51SOGAWA Hirofumi 
229389e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
229489e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
229589e10787SNick Piggin 
229689e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
229789e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
229889e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
229989e10787SNick Piggin 			*bytes |= (blocksize-1);
230089e10787SNick Piggin 			(*bytes)++;
230189e10787SNick Piggin 		}
230289e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
230389e10787SNick Piggin 
230489e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
230589e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
230689e10787SNick Piggin 						&page, &fsdata);
230789e10787SNick Piggin 		if (err)
230889e10787SNick Piggin 			goto out;
2309eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
231089e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
231189e10787SNick Piggin 						page, fsdata);
231289e10787SNick Piggin 		if (err < 0)
231389e10787SNick Piggin 			goto out;
231489e10787SNick Piggin 		BUG_ON(err != len);
231589e10787SNick Piggin 		err = 0;
2316061e9746SOGAWA Hirofumi 
2317061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
231889e10787SNick Piggin 	}
231989e10787SNick Piggin 
232089e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
232189e10787SNick Piggin 	if (index == curidx) {
232289e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
232389e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
232489e10787SNick Piggin 		if (offset <= zerofrom) {
232589e10787SNick Piggin 			goto out;
232689e10787SNick Piggin 		}
232789e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
232889e10787SNick Piggin 			*bytes |= (blocksize-1);
232989e10787SNick Piggin 			(*bytes)++;
233089e10787SNick Piggin 		}
233189e10787SNick Piggin 		len = offset - zerofrom;
233289e10787SNick Piggin 
233389e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
233489e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
233589e10787SNick Piggin 						&page, &fsdata);
233689e10787SNick Piggin 		if (err)
233789e10787SNick Piggin 			goto out;
2338eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
233989e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
234089e10787SNick Piggin 						page, fsdata);
234189e10787SNick Piggin 		if (err < 0)
234289e10787SNick Piggin 			goto out;
234389e10787SNick Piggin 		BUG_ON(err != len);
234489e10787SNick Piggin 		err = 0;
234589e10787SNick Piggin 	}
234689e10787SNick Piggin out:
234789e10787SNick Piggin 	return err;
23481da177e4SLinus Torvalds }
23491da177e4SLinus Torvalds 
23501da177e4SLinus Torvalds /*
23511da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
23521da177e4SLinus Torvalds  * We may have to extend the file.
23531da177e4SLinus Torvalds  */
23547bb46a67Snpiggin@suse.de int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping,
235589e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
235689e10787SNick Piggin 			struct page **pagep, void **fsdata,
235789e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
23581da177e4SLinus Torvalds {
23591da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
23601da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
236189e10787SNick Piggin 	unsigned zerofrom;
236289e10787SNick Piggin 	int err;
23631da177e4SLinus Torvalds 
236489e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
236589e10787SNick Piggin 	if (err)
23661da177e4SLinus Torvalds 		goto out;
23671da177e4SLinus Torvalds 
23681da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
236989e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
23701da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
23711da177e4SLinus Torvalds 		(*bytes)++;
23721da177e4SLinus Torvalds 	}
23731da177e4SLinus Torvalds 
237489e10787SNick Piggin 	*pagep = NULL;
23757bb46a67Snpiggin@suse.de 	err = block_write_begin_newtrunc(file, mapping, pos, len,
237689e10787SNick Piggin 				flags, pagep, fsdata, get_block);
23771da177e4SLinus Torvalds out:
237889e10787SNick Piggin 	return err;
23791da177e4SLinus Torvalds }
23807bb46a67Snpiggin@suse.de EXPORT_SYMBOL(cont_write_begin_newtrunc);
23817bb46a67Snpiggin@suse.de 
23827bb46a67Snpiggin@suse.de int cont_write_begin(struct file *file, struct address_space *mapping,
23837bb46a67Snpiggin@suse.de 			loff_t pos, unsigned len, unsigned flags,
23847bb46a67Snpiggin@suse.de 			struct page **pagep, void **fsdata,
23857bb46a67Snpiggin@suse.de 			get_block_t *get_block, loff_t *bytes)
23867bb46a67Snpiggin@suse.de {
23877bb46a67Snpiggin@suse.de 	int ret;
23887bb46a67Snpiggin@suse.de 
23897bb46a67Snpiggin@suse.de 	ret = cont_write_begin_newtrunc(file, mapping, pos, len, flags,
23907bb46a67Snpiggin@suse.de 					pagep, fsdata, get_block, bytes);
23917bb46a67Snpiggin@suse.de 	if (unlikely(ret)) {
23927bb46a67Snpiggin@suse.de 		loff_t isize = mapping->host->i_size;
23937bb46a67Snpiggin@suse.de 		if (pos + len > isize)
23947bb46a67Snpiggin@suse.de 			vmtruncate(mapping->host, isize);
23957bb46a67Snpiggin@suse.de 	}
23967bb46a67Snpiggin@suse.de 
23977bb46a67Snpiggin@suse.de 	return ret;
23987bb46a67Snpiggin@suse.de }
23991fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
24001da177e4SLinus Torvalds 
24011da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
24021da177e4SLinus Torvalds 			get_block_t *get_block)
24031da177e4SLinus Torvalds {
24041da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
24051da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
24061da177e4SLinus Torvalds 	if (err)
24071da177e4SLinus Torvalds 		ClearPageUptodate(page);
24081da177e4SLinus Torvalds 	return err;
24091da177e4SLinus Torvalds }
24101fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_prepare_write);
24111da177e4SLinus Torvalds 
24121da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
24131da177e4SLinus Torvalds {
24141da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
24151da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
24161da177e4SLinus Torvalds 	return 0;
24171da177e4SLinus Torvalds }
24181fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
24191da177e4SLinus Torvalds 
242054171690SDavid Chinner /*
242154171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
242254171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
242354171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
242454171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
242554171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
242654171690SDavid Chinner  * support these features.
242754171690SDavid Chinner  *
242854171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
242954171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
24307bb46a67Snpiggin@suse.de  * truncate writes the inode size before removing pages, once we have the
243154171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
243254171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
243354171690SDavid Chinner  * unlock the page.
243454171690SDavid Chinner  */
243554171690SDavid Chinner int
2436c2ec175cSNick Piggin block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
243754171690SDavid Chinner 		   get_block_t get_block)
243854171690SDavid Chinner {
2439c2ec175cSNick Piggin 	struct page *page = vmf->page;
244054171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
244154171690SDavid Chinner 	unsigned long end;
244254171690SDavid Chinner 	loff_t size;
244356a76f82SNick Piggin 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
244454171690SDavid Chinner 
244554171690SDavid Chinner 	lock_page(page);
244654171690SDavid Chinner 	size = i_size_read(inode);
244754171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
244818336338SNick Piggin 	    (page_offset(page) > size)) {
244954171690SDavid Chinner 		/* page got truncated out from underneath us */
2450b827e496SNick Piggin 		unlock_page(page);
2451b827e496SNick Piggin 		goto out;
245254171690SDavid Chinner 	}
245354171690SDavid Chinner 
245454171690SDavid Chinner 	/* page is wholly or partially inside EOF */
245554171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
245654171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
245754171690SDavid Chinner 	else
245854171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
245954171690SDavid Chinner 
246054171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
246154171690SDavid Chinner 	if (!ret)
246254171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
246354171690SDavid Chinner 
246456a76f82SNick Piggin 	if (unlikely(ret)) {
2465b827e496SNick Piggin 		unlock_page(page);
246656a76f82SNick Piggin 		if (ret == -ENOMEM)
246756a76f82SNick Piggin 			ret = VM_FAULT_OOM;
246856a76f82SNick Piggin 		else /* -ENOSPC, -EIO, etc */
2469c2ec175cSNick Piggin 			ret = VM_FAULT_SIGBUS;
2470b827e496SNick Piggin 	} else
2471b827e496SNick Piggin 		ret = VM_FAULT_LOCKED;
2472c2ec175cSNick Piggin 
2473b827e496SNick Piggin out:
247454171690SDavid Chinner 	return ret;
247554171690SDavid Chinner }
24761fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
24771da177e4SLinus Torvalds 
24781da177e4SLinus Torvalds /*
247903158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
24801da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
24811da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
24821da177e4SLinus Torvalds  */
24831da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
24841da177e4SLinus Torvalds {
248568671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
24861da177e4SLinus Torvalds }
24871da177e4SLinus Torvalds 
24881da177e4SLinus Torvalds /*
248903158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
249003158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
249103158cd7SNick Piggin  * dirty races).
249203158cd7SNick Piggin  */
249303158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
249403158cd7SNick Piggin {
249503158cd7SNick Piggin 	struct buffer_head *bh;
249603158cd7SNick Piggin 
249703158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
249803158cd7SNick Piggin 
249903158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
250003158cd7SNick Piggin 	bh = head;
250103158cd7SNick Piggin 	do {
250203158cd7SNick Piggin 		if (PageDirty(page))
250303158cd7SNick Piggin 			set_buffer_dirty(bh);
250403158cd7SNick Piggin 		if (!bh->b_this_page)
250503158cd7SNick Piggin 			bh->b_this_page = head;
250603158cd7SNick Piggin 		bh = bh->b_this_page;
250703158cd7SNick Piggin 	} while (bh != head);
250803158cd7SNick Piggin 	attach_page_buffers(page, head);
250903158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
251003158cd7SNick Piggin }
251103158cd7SNick Piggin 
251203158cd7SNick Piggin /*
2513*ea0f04e5SChristoph Hellwig  * On entry, the page is fully not uptodate.
2514*ea0f04e5SChristoph Hellwig  * On exit the page is fully uptodate in the areas outside (from,to)
25157bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
25161da177e4SLinus Torvalds  */
2517*ea0f04e5SChristoph Hellwig int nobh_write_begin(struct address_space *mapping,
251803158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
251903158cd7SNick Piggin 			struct page **pagep, void **fsdata,
25201da177e4SLinus Torvalds 			get_block_t *get_block)
25211da177e4SLinus Torvalds {
252203158cd7SNick Piggin 	struct inode *inode = mapping->host;
25231da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
25241da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2525a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
252603158cd7SNick Piggin 	struct page *page;
252703158cd7SNick Piggin 	pgoff_t index;
252803158cd7SNick Piggin 	unsigned from, to;
25291da177e4SLinus Torvalds 	unsigned block_in_page;
2530a4b0672dSNick Piggin 	unsigned block_start, block_end;
25311da177e4SLinus Torvalds 	sector_t block_in_file;
25321da177e4SLinus Torvalds 	int nr_reads = 0;
25331da177e4SLinus Torvalds 	int ret = 0;
25341da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
25351da177e4SLinus Torvalds 
253603158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
253703158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
253803158cd7SNick Piggin 	to = from + len;
253903158cd7SNick Piggin 
254054566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
254103158cd7SNick Piggin 	if (!page)
254203158cd7SNick Piggin 		return -ENOMEM;
254303158cd7SNick Piggin 	*pagep = page;
254403158cd7SNick Piggin 	*fsdata = NULL;
254503158cd7SNick Piggin 
254603158cd7SNick Piggin 	if (page_has_buffers(page)) {
254703158cd7SNick Piggin 		unlock_page(page);
254803158cd7SNick Piggin 		page_cache_release(page);
254903158cd7SNick Piggin 		*pagep = NULL;
2550*ea0f04e5SChristoph Hellwig 		return block_write_begin_newtrunc(NULL, mapping, pos, len,
25517bb46a67Snpiggin@suse.de 					flags, pagep, fsdata, get_block);
255203158cd7SNick Piggin 	}
2553a4b0672dSNick Piggin 
25541da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
25551da177e4SLinus Torvalds 		return 0;
25561da177e4SLinus Torvalds 
2557a4b0672dSNick Piggin 	/*
2558a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2559a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2560a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2561a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2562a4b0672dSNick Piggin 	 *
2563a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2564a4b0672dSNick Piggin 	 * than the circular one we're used to.
2565a4b0672dSNick Piggin 	 */
2566a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
256703158cd7SNick Piggin 	if (!head) {
256803158cd7SNick Piggin 		ret = -ENOMEM;
256903158cd7SNick Piggin 		goto out_release;
257003158cd7SNick Piggin 	}
2571a4b0672dSNick Piggin 
25721da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
25731da177e4SLinus Torvalds 
25741da177e4SLinus Torvalds 	/*
25751da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
25761da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
25771da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
25781da177e4SLinus Torvalds 	 */
2579a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
25801da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2581a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
25821da177e4SLinus Torvalds 		int create;
25831da177e4SLinus Torvalds 
2584a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2585a4b0672dSNick Piggin 		bh->b_state = 0;
25861da177e4SLinus Torvalds 		create = 1;
25871da177e4SLinus Torvalds 		if (block_start >= to)
25881da177e4SLinus Torvalds 			create = 0;
25891da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2590a4b0672dSNick Piggin 					bh, create);
25911da177e4SLinus Torvalds 		if (ret)
25921da177e4SLinus Torvalds 			goto failed;
2593a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
25941da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2595a4b0672dSNick Piggin 		if (buffer_new(bh))
2596a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2597a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2598a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
25991da177e4SLinus Torvalds 			continue;
2600a4b0672dSNick Piggin 		}
2601a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2602eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2603eebd2aa3SChristoph Lameter 							to, block_end);
26041da177e4SLinus Torvalds 			continue;
26051da177e4SLinus Torvalds 		}
2606a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
26071da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
26081da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2609a4b0672dSNick Piggin 			lock_buffer(bh);
2610a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2611a4b0672dSNick Piggin 			submit_bh(READ, bh);
2612a4b0672dSNick Piggin 			nr_reads++;
26131da177e4SLinus Torvalds 		}
26141da177e4SLinus Torvalds 	}
26151da177e4SLinus Torvalds 
26161da177e4SLinus Torvalds 	if (nr_reads) {
26171da177e4SLinus Torvalds 		/*
26181da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
26191da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
26201da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
26211da177e4SLinus Torvalds 		 */
2622a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
26231da177e4SLinus Torvalds 			wait_on_buffer(bh);
26241da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
26251da177e4SLinus Torvalds 				ret = -EIO;
26261da177e4SLinus Torvalds 		}
26271da177e4SLinus Torvalds 		if (ret)
26281da177e4SLinus Torvalds 			goto failed;
26291da177e4SLinus Torvalds 	}
26301da177e4SLinus Torvalds 
26311da177e4SLinus Torvalds 	if (is_mapped_to_disk)
26321da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
26331da177e4SLinus Torvalds 
263403158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2635a4b0672dSNick Piggin 
26361da177e4SLinus Torvalds 	return 0;
26371da177e4SLinus Torvalds 
26381da177e4SLinus Torvalds failed:
263903158cd7SNick Piggin 	BUG_ON(!ret);
26401da177e4SLinus Torvalds 	/*
2641a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2642a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2643a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2644a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2645a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
26461da177e4SLinus Torvalds 	 */
264703158cd7SNick Piggin 	attach_nobh_buffers(page, head);
264803158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2649a4b0672dSNick Piggin 
265003158cd7SNick Piggin out_release:
265103158cd7SNick Piggin 	unlock_page(page);
265203158cd7SNick Piggin 	page_cache_release(page);
265303158cd7SNick Piggin 	*pagep = NULL;
2654a4b0672dSNick Piggin 
26557bb46a67Snpiggin@suse.de 	return ret;
26567bb46a67Snpiggin@suse.de }
265703158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
26581da177e4SLinus Torvalds 
265903158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
266003158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
266103158cd7SNick Piggin 			struct page *page, void *fsdata)
26621da177e4SLinus Torvalds {
26631da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2664efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
266503158cd7SNick Piggin 	struct buffer_head *bh;
26665b41e74aSDmitri Monakhov 	BUG_ON(fsdata != NULL && page_has_buffers(page));
26671da177e4SLinus Torvalds 
2668d4cf109fSDave Kleikamp 	if (unlikely(copied < len) && head)
266903158cd7SNick Piggin 		attach_nobh_buffers(page, head);
2670a4b0672dSNick Piggin 	if (page_has_buffers(page))
267103158cd7SNick Piggin 		return generic_write_end(file, mapping, pos, len,
267203158cd7SNick Piggin 					copied, page, fsdata);
2673a4b0672dSNick Piggin 
267422c8ca78SNick Piggin 	SetPageUptodate(page);
26751da177e4SLinus Torvalds 	set_page_dirty(page);
267603158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
267703158cd7SNick Piggin 		i_size_write(inode, pos+copied);
26781da177e4SLinus Torvalds 		mark_inode_dirty(inode);
26791da177e4SLinus Torvalds 	}
268003158cd7SNick Piggin 
268103158cd7SNick Piggin 	unlock_page(page);
268203158cd7SNick Piggin 	page_cache_release(page);
268303158cd7SNick Piggin 
268403158cd7SNick Piggin 	while (head) {
268503158cd7SNick Piggin 		bh = head;
268603158cd7SNick Piggin 		head = head->b_this_page;
268703158cd7SNick Piggin 		free_buffer_head(bh);
26881da177e4SLinus Torvalds 	}
268903158cd7SNick Piggin 
269003158cd7SNick Piggin 	return copied;
269103158cd7SNick Piggin }
269203158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds /*
26951da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
26961da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
26971da177e4SLinus Torvalds  * the page.
26981da177e4SLinus Torvalds  */
26991da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
27001da177e4SLinus Torvalds 			struct writeback_control *wbc)
27011da177e4SLinus Torvalds {
27021da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
27031da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27041da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
27051da177e4SLinus Torvalds 	unsigned offset;
27061da177e4SLinus Torvalds 	int ret;
27071da177e4SLinus Torvalds 
27081da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
27091da177e4SLinus Torvalds 	if (page->index < end_index)
27101da177e4SLinus Torvalds 		goto out;
27111da177e4SLinus Torvalds 
27121da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
27131da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
27141da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
27151da177e4SLinus Torvalds 		/*
27161da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
27171da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
27181da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
27191da177e4SLinus Torvalds 		 */
27201da177e4SLinus Torvalds #if 0
27211da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
27221da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
27231da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
27241da177e4SLinus Torvalds #endif
27251da177e4SLinus Torvalds 		unlock_page(page);
27261da177e4SLinus Torvalds 		return 0; /* don't care */
27271da177e4SLinus Torvalds 	}
27281da177e4SLinus Torvalds 
27291da177e4SLinus Torvalds 	/*
27301da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
27311da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
27321da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
27331da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
27341da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
27351da177e4SLinus Torvalds 	 */
2736eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
27371da177e4SLinus Torvalds out:
27381da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
27391da177e4SLinus Torvalds 	if (ret == -EAGAIN)
274035c80d5fSChris Mason 		ret = __block_write_full_page(inode, page, get_block, wbc,
274135c80d5fSChris Mason 					      end_buffer_async_write);
27421da177e4SLinus Torvalds 	return ret;
27431da177e4SLinus Torvalds }
27441da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
27451da177e4SLinus Torvalds 
274603158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
274703158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
27481da177e4SLinus Torvalds {
27491da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27501da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
275103158cd7SNick Piggin 	unsigned blocksize;
275203158cd7SNick Piggin 	sector_t iblock;
275303158cd7SNick Piggin 	unsigned length, pos;
275403158cd7SNick Piggin 	struct inode *inode = mapping->host;
27551da177e4SLinus Torvalds 	struct page *page;
275603158cd7SNick Piggin 	struct buffer_head map_bh;
275703158cd7SNick Piggin 	int err;
27581da177e4SLinus Torvalds 
275903158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
276003158cd7SNick Piggin 	length = offset & (blocksize - 1);
27611da177e4SLinus Torvalds 
276203158cd7SNick Piggin 	/* Block boundary? Nothing to do */
276303158cd7SNick Piggin 	if (!length)
276403158cd7SNick Piggin 		return 0;
276503158cd7SNick Piggin 
276603158cd7SNick Piggin 	length = blocksize - length;
276703158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
276803158cd7SNick Piggin 
27691da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
277003158cd7SNick Piggin 	err = -ENOMEM;
27711da177e4SLinus Torvalds 	if (!page)
27721da177e4SLinus Torvalds 		goto out;
27731da177e4SLinus Torvalds 
277403158cd7SNick Piggin 	if (page_has_buffers(page)) {
277503158cd7SNick Piggin has_buffers:
277603158cd7SNick Piggin 		unlock_page(page);
277703158cd7SNick Piggin 		page_cache_release(page);
277803158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
27791da177e4SLinus Torvalds 	}
278003158cd7SNick Piggin 
278103158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
278203158cd7SNick Piggin 	pos = blocksize;
278303158cd7SNick Piggin 	while (offset >= pos) {
278403158cd7SNick Piggin 		iblock++;
278503158cd7SNick Piggin 		pos += blocksize;
278603158cd7SNick Piggin 	}
278703158cd7SNick Piggin 
2788460bcf57STheodore Ts'o 	map_bh.b_size = blocksize;
2789460bcf57STheodore Ts'o 	map_bh.b_state = 0;
279003158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
279103158cd7SNick Piggin 	if (err)
279203158cd7SNick Piggin 		goto unlock;
279303158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
279403158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
279503158cd7SNick Piggin 		goto unlock;
279603158cd7SNick Piggin 
279703158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
279803158cd7SNick Piggin 	if (!PageUptodate(page)) {
279903158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
280003158cd7SNick Piggin 		if (err) {
280103158cd7SNick Piggin 			page_cache_release(page);
280203158cd7SNick Piggin 			goto out;
280303158cd7SNick Piggin 		}
280403158cd7SNick Piggin 		lock_page(page);
280503158cd7SNick Piggin 		if (!PageUptodate(page)) {
280603158cd7SNick Piggin 			err = -EIO;
280703158cd7SNick Piggin 			goto unlock;
280803158cd7SNick Piggin 		}
280903158cd7SNick Piggin 		if (page_has_buffers(page))
281003158cd7SNick Piggin 			goto has_buffers;
281103158cd7SNick Piggin 	}
2812eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
281303158cd7SNick Piggin 	set_page_dirty(page);
281403158cd7SNick Piggin 	err = 0;
281503158cd7SNick Piggin 
281603158cd7SNick Piggin unlock:
28171da177e4SLinus Torvalds 	unlock_page(page);
28181da177e4SLinus Torvalds 	page_cache_release(page);
28191da177e4SLinus Torvalds out:
282003158cd7SNick Piggin 	return err;
28211da177e4SLinus Torvalds }
28221da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
28231da177e4SLinus Torvalds 
28241da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
28251da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
28261da177e4SLinus Torvalds {
28271da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
28281da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
28291da177e4SLinus Torvalds 	unsigned blocksize;
283054b21a79SAndrew Morton 	sector_t iblock;
28311da177e4SLinus Torvalds 	unsigned length, pos;
28321da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
28331da177e4SLinus Torvalds 	struct page *page;
28341da177e4SLinus Torvalds 	struct buffer_head *bh;
28351da177e4SLinus Torvalds 	int err;
28361da177e4SLinus Torvalds 
28371da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
28381da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
28391da177e4SLinus Torvalds 
28401da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
28411da177e4SLinus Torvalds 	if (!length)
28421da177e4SLinus Torvalds 		return 0;
28431da177e4SLinus Torvalds 
28441da177e4SLinus Torvalds 	length = blocksize - length;
284554b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
28461da177e4SLinus Torvalds 
28471da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
28481da177e4SLinus Torvalds 	err = -ENOMEM;
28491da177e4SLinus Torvalds 	if (!page)
28501da177e4SLinus Torvalds 		goto out;
28511da177e4SLinus Torvalds 
28521da177e4SLinus Torvalds 	if (!page_has_buffers(page))
28531da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
28541da177e4SLinus Torvalds 
28551da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
28561da177e4SLinus Torvalds 	bh = page_buffers(page);
28571da177e4SLinus Torvalds 	pos = blocksize;
28581da177e4SLinus Torvalds 	while (offset >= pos) {
28591da177e4SLinus Torvalds 		bh = bh->b_this_page;
28601da177e4SLinus Torvalds 		iblock++;
28611da177e4SLinus Torvalds 		pos += blocksize;
28621da177e4SLinus Torvalds 	}
28631da177e4SLinus Torvalds 
28641da177e4SLinus Torvalds 	err = 0;
28651da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2866b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
28671da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
28681da177e4SLinus Torvalds 		if (err)
28691da177e4SLinus Torvalds 			goto unlock;
28701da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
28711da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
28721da177e4SLinus Torvalds 			goto unlock;
28731da177e4SLinus Torvalds 	}
28741da177e4SLinus Torvalds 
28751da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
28761da177e4SLinus Torvalds 	if (PageUptodate(page))
28771da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
28781da177e4SLinus Torvalds 
287933a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
28801da177e4SLinus Torvalds 		err = -EIO;
28811da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
28821da177e4SLinus Torvalds 		wait_on_buffer(bh);
28831da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
28841da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
28851da177e4SLinus Torvalds 			goto unlock;
28861da177e4SLinus Torvalds 	}
28871da177e4SLinus Torvalds 
2888eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
28891da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
28901da177e4SLinus Torvalds 	err = 0;
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds unlock:
28931da177e4SLinus Torvalds 	unlock_page(page);
28941da177e4SLinus Torvalds 	page_cache_release(page);
28951da177e4SLinus Torvalds out:
28961da177e4SLinus Torvalds 	return err;
28971da177e4SLinus Torvalds }
28981fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
28991da177e4SLinus Torvalds 
29001da177e4SLinus Torvalds /*
29011da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
290235c80d5fSChris Mason  * this form passes in the end_io handler used to finish the IO.
29031da177e4SLinus Torvalds  */
290435c80d5fSChris Mason int block_write_full_page_endio(struct page *page, get_block_t *get_block,
290535c80d5fSChris Mason 			struct writeback_control *wbc, bh_end_io_t *handler)
29061da177e4SLinus Torvalds {
29071da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
29081da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
29091da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
29101da177e4SLinus Torvalds 	unsigned offset;
29111da177e4SLinus Torvalds 
29121da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
29131da177e4SLinus Torvalds 	if (page->index < end_index)
291435c80d5fSChris Mason 		return __block_write_full_page(inode, page, get_block, wbc,
291535c80d5fSChris Mason 					       handler);
29161da177e4SLinus Torvalds 
29171da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
29181da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
29191da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
29201da177e4SLinus Torvalds 		/*
29211da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
29221da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
29231da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
29241da177e4SLinus Torvalds 		 */
2925aaa4059bSJan Kara 		do_invalidatepage(page, 0);
29261da177e4SLinus Torvalds 		unlock_page(page);
29271da177e4SLinus Torvalds 		return 0; /* don't care */
29281da177e4SLinus Torvalds 	}
29291da177e4SLinus Torvalds 
29301da177e4SLinus Torvalds 	/*
29311da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
29322a61aa40SAdam Buchbinder 	 * writepage invocation because it may be mmapped.  "A file is mapped
29331da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
29341da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
29351da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
29361da177e4SLinus Torvalds 	 */
2937eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
293835c80d5fSChris Mason 	return __block_write_full_page(inode, page, get_block, wbc, handler);
29391da177e4SLinus Torvalds }
29401fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page_endio);
29411da177e4SLinus Torvalds 
294235c80d5fSChris Mason /*
294335c80d5fSChris Mason  * The generic ->writepage function for buffer-backed address_spaces
294435c80d5fSChris Mason  */
294535c80d5fSChris Mason int block_write_full_page(struct page *page, get_block_t *get_block,
294635c80d5fSChris Mason 			struct writeback_control *wbc)
294735c80d5fSChris Mason {
294835c80d5fSChris Mason 	return block_write_full_page_endio(page, get_block, wbc,
294935c80d5fSChris Mason 					   end_buffer_async_write);
295035c80d5fSChris Mason }
29511fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page);
295235c80d5fSChris Mason 
29531da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
29541da177e4SLinus Torvalds 			    get_block_t *get_block)
29551da177e4SLinus Torvalds {
29561da177e4SLinus Torvalds 	struct buffer_head tmp;
29571da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
29581da177e4SLinus Torvalds 	tmp.b_state = 0;
29591da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2960b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
29611da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
29621da177e4SLinus Torvalds 	return tmp.b_blocknr;
29631da177e4SLinus Torvalds }
29641fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
29651da177e4SLinus Torvalds 
29666712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
29671da177e4SLinus Torvalds {
29681da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
29691da177e4SLinus Torvalds 
29701da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
29711da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
29721da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
29731da177e4SLinus Torvalds 	}
29741da177e4SLinus Torvalds 
297508bafc03SKeith Mannthey 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
297608bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
297708bafc03SKeith Mannthey 
29781da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
29791da177e4SLinus Torvalds 	bio_put(bio);
29801da177e4SLinus Torvalds }
29811da177e4SLinus Torvalds 
29821da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
29831da177e4SLinus Torvalds {
29841da177e4SLinus Torvalds 	struct bio *bio;
29851da177e4SLinus Torvalds 	int ret = 0;
29861da177e4SLinus Torvalds 
29871da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
29881da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
29891da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
29908fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_delay(bh));
29918fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_unwritten(bh));
29921da177e4SLinus Torvalds 
299348fd4f93SJens Axboe 	/*
299448fd4f93SJens Axboe 	 * Mask in barrier bit for a write (could be either a WRITE or a
299548fd4f93SJens Axboe 	 * WRITE_SYNC
299648fd4f93SJens Axboe 	 */
299748fd4f93SJens Axboe 	if (buffer_ordered(bh) && (rw & WRITE))
299848fd4f93SJens Axboe 		rw |= WRITE_BARRIER;
29991da177e4SLinus Torvalds 
30001da177e4SLinus Torvalds 	/*
300148fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
30021da177e4SLinus Torvalds 	 */
300348fd4f93SJens Axboe 	if (test_set_buffer_req(bh) && (rw & WRITE))
30041da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
30051da177e4SLinus Torvalds 
30061da177e4SLinus Torvalds 	/*
30071da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
30081da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
30091da177e4SLinus Torvalds 	 */
30101da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
30111da177e4SLinus Torvalds 
30121da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
30131da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
30141da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
30151da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
30161da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
30171da177e4SLinus Torvalds 
30181da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
30191da177e4SLinus Torvalds 	bio->bi_idx = 0;
30201da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
30211da177e4SLinus Torvalds 
30221da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
30231da177e4SLinus Torvalds 	bio->bi_private = bh;
30241da177e4SLinus Torvalds 
30251da177e4SLinus Torvalds 	bio_get(bio);
30261da177e4SLinus Torvalds 	submit_bio(rw, bio);
30271da177e4SLinus Torvalds 
30281da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
30291da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
30301da177e4SLinus Torvalds 
30311da177e4SLinus Torvalds 	bio_put(bio);
30321da177e4SLinus Torvalds 	return ret;
30331da177e4SLinus Torvalds }
30341fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
30351da177e4SLinus Torvalds 
30361da177e4SLinus Torvalds /**
30371da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
3038a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
30391da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
30401da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
30411da177e4SLinus Torvalds  *
3042a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3043a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3044a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3045a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
3046a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
30471da177e4SLinus Torvalds  *
30481da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
3049a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3050a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
3051a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
3052a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
3053a7662236SJan Kara  * actually clean until the buffer gets unlocked).
30541da177e4SLinus Torvalds  *
30551da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
30561da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
30571da177e4SLinus Torvalds  * any waiters.
30581da177e4SLinus Torvalds  *
30591da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
30601da177e4SLinus Torvalds  * multiple of the current approved size for the device.
30611da177e4SLinus Torvalds  */
30621da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
30631da177e4SLinus Torvalds {
30641da177e4SLinus Torvalds 	int i;
30651da177e4SLinus Torvalds 
30661da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
30671da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
30681da177e4SLinus Torvalds 
30699cf6b720SJens Axboe 		if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3070a7662236SJan Kara 			lock_buffer(bh);
3071ca5de404SNick Piggin 		else if (!trylock_buffer(bh))
30721da177e4SLinus Torvalds 			continue;
30731da177e4SLinus Torvalds 
30749cf6b720SJens Axboe 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
30759cf6b720SJens Axboe 		    rw == SWRITE_SYNC_PLUG) {
30761da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
307776c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
3078e60e5c50SOGAWA Hirofumi 				get_bh(bh);
307918ce3751SJens Axboe 				if (rw == SWRITE_SYNC)
308018ce3751SJens Axboe 					submit_bh(WRITE_SYNC, bh);
308118ce3751SJens Axboe 				else
30821da177e4SLinus Torvalds 					submit_bh(WRITE, bh);
30831da177e4SLinus Torvalds 				continue;
30841da177e4SLinus Torvalds 			}
30851da177e4SLinus Torvalds 		} else {
30861da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
308776c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
3088e60e5c50SOGAWA Hirofumi 				get_bh(bh);
30891da177e4SLinus Torvalds 				submit_bh(rw, bh);
30901da177e4SLinus Torvalds 				continue;
30911da177e4SLinus Torvalds 			}
30921da177e4SLinus Torvalds 		}
30931da177e4SLinus Torvalds 		unlock_buffer(bh);
30941da177e4SLinus Torvalds 	}
30951da177e4SLinus Torvalds }
30961fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(ll_rw_block);
30971da177e4SLinus Torvalds 
30981da177e4SLinus Torvalds /*
30991da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
31001da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
31011da177e4SLinus Torvalds  * the buffer_head.
31021da177e4SLinus Torvalds  */
31031da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
31041da177e4SLinus Torvalds {
31051da177e4SLinus Torvalds 	int ret = 0;
31061da177e4SLinus Torvalds 
31071da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
31081da177e4SLinus Torvalds 	lock_buffer(bh);
31091da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
31101da177e4SLinus Torvalds 		get_bh(bh);
31111da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
31121aa2a7ccSJens Axboe 		ret = submit_bh(WRITE_SYNC, bh);
31131da177e4SLinus Torvalds 		wait_on_buffer(bh);
31141da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
31151da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
31161da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
31171da177e4SLinus Torvalds 		}
31181da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
31191da177e4SLinus Torvalds 			ret = -EIO;
31201da177e4SLinus Torvalds 	} else {
31211da177e4SLinus Torvalds 		unlock_buffer(bh);
31221da177e4SLinus Torvalds 	}
31231da177e4SLinus Torvalds 	return ret;
31241da177e4SLinus Torvalds }
31251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
31261da177e4SLinus Torvalds 
31271da177e4SLinus Torvalds /*
31281da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
31291da177e4SLinus Torvalds  * are unused, and releases them if so.
31301da177e4SLinus Torvalds  *
31311da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
31321da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
31331da177e4SLinus Torvalds  *
31341da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
31351da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
31361da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
31371da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
31381da177e4SLinus Torvalds  * filesystem data on the same device.
31391da177e4SLinus Torvalds  *
31401da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
31411da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
31421da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
31431da177e4SLinus Torvalds  * private_lock.
31441da177e4SLinus Torvalds  *
31451da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
31461da177e4SLinus Torvalds  */
31471da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
31481da177e4SLinus Torvalds {
31491da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
31501da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
31511da177e4SLinus Torvalds }
31521da177e4SLinus Torvalds 
31531da177e4SLinus Torvalds static int
31541da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
31551da177e4SLinus Torvalds {
31561da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
31571da177e4SLinus Torvalds 	struct buffer_head *bh;
31581da177e4SLinus Torvalds 
31591da177e4SLinus Torvalds 	bh = head;
31601da177e4SLinus Torvalds 	do {
3161de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
31621da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
31631da177e4SLinus Torvalds 		if (buffer_busy(bh))
31641da177e4SLinus Torvalds 			goto failed;
31651da177e4SLinus Torvalds 		bh = bh->b_this_page;
31661da177e4SLinus Torvalds 	} while (bh != head);
31671da177e4SLinus Torvalds 
31681da177e4SLinus Torvalds 	do {
31691da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
31701da177e4SLinus Torvalds 
3171535ee2fbSJan Kara 		if (bh->b_assoc_map)
31721da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
31731da177e4SLinus Torvalds 		bh = next;
31741da177e4SLinus Torvalds 	} while (bh != head);
31751da177e4SLinus Torvalds 	*buffers_to_free = head;
31761da177e4SLinus Torvalds 	__clear_page_buffers(page);
31771da177e4SLinus Torvalds 	return 1;
31781da177e4SLinus Torvalds failed:
31791da177e4SLinus Torvalds 	return 0;
31801da177e4SLinus Torvalds }
31811da177e4SLinus Torvalds 
31821da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
31831da177e4SLinus Torvalds {
31841da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
31851da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
31861da177e4SLinus Torvalds 	int ret = 0;
31871da177e4SLinus Torvalds 
31881da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3189ecdfc978SLinus Torvalds 	if (PageWriteback(page))
31901da177e4SLinus Torvalds 		return 0;
31911da177e4SLinus Torvalds 
31921da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
31931da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
31941da177e4SLinus Torvalds 		goto out;
31951da177e4SLinus Torvalds 	}
31961da177e4SLinus Torvalds 
31971da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
31981da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3199ecdfc978SLinus Torvalds 
3200ecdfc978SLinus Torvalds 	/*
3201ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3202ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3203ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3204ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3205ecdfc978SLinus Torvalds 	 *
3206ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3207ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3208ecdfc978SLinus Torvalds 	 * the page also.
320987df7241SNick Piggin 	 *
321087df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
321187df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
321287df7241SNick Piggin 	 * dirty bit from being lost.
3213ecdfc978SLinus Torvalds 	 */
3214ecdfc978SLinus Torvalds 	if (ret)
3215ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
321687df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
32171da177e4SLinus Torvalds out:
32181da177e4SLinus Torvalds 	if (buffers_to_free) {
32191da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
32201da177e4SLinus Torvalds 
32211da177e4SLinus Torvalds 		do {
32221da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
32231da177e4SLinus Torvalds 			free_buffer_head(bh);
32241da177e4SLinus Torvalds 			bh = next;
32251da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
32261da177e4SLinus Torvalds 	}
32271da177e4SLinus Torvalds 	return ret;
32281da177e4SLinus Torvalds }
32291da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
32301da177e4SLinus Torvalds 
32313978d717SNeilBrown void block_sync_page(struct page *page)
32321da177e4SLinus Torvalds {
32331da177e4SLinus Torvalds 	struct address_space *mapping;
32341da177e4SLinus Torvalds 
32351da177e4SLinus Torvalds 	smp_mb();
32361da177e4SLinus Torvalds 	mapping = page_mapping(page);
32371da177e4SLinus Torvalds 	if (mapping)
32381da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
32391da177e4SLinus Torvalds }
32401fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_sync_page);
32411da177e4SLinus Torvalds 
32421da177e4SLinus Torvalds /*
32431da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
32441da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
32451da177e4SLinus Torvalds  *
32461da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
32475b0830cbSJens Axboe  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
32481da177e4SLinus Torvalds  */
3249bdc480e3SHeiko Carstens SYSCALL_DEFINE2(bdflush, int, func, long, data)
32501da177e4SLinus Torvalds {
32511da177e4SLinus Torvalds 	static int msg_count;
32521da177e4SLinus Torvalds 
32531da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
32541da177e4SLinus Torvalds 		return -EPERM;
32551da177e4SLinus Torvalds 
32561da177e4SLinus Torvalds 	if (msg_count < 5) {
32571da177e4SLinus Torvalds 		msg_count++;
32581da177e4SLinus Torvalds 		printk(KERN_INFO
32591da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
32601da177e4SLinus Torvalds 			" system call\n", current->comm);
32611da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
32621da177e4SLinus Torvalds 	}
32631da177e4SLinus Torvalds 
32641da177e4SLinus Torvalds 	if (func == 1)
32651da177e4SLinus Torvalds 		do_exit(0);
32661da177e4SLinus Torvalds 	return 0;
32671da177e4SLinus Torvalds }
32681da177e4SLinus Torvalds 
32691da177e4SLinus Torvalds /*
32701da177e4SLinus Torvalds  * Buffer-head allocation
32711da177e4SLinus Torvalds  */
3272e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
32731da177e4SLinus Torvalds 
32741da177e4SLinus Torvalds /*
32751da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
32761da177e4SLinus Torvalds  * stripping them in writeback.
32771da177e4SLinus Torvalds  */
32781da177e4SLinus Torvalds static int max_buffer_heads;
32791da177e4SLinus Torvalds 
32801da177e4SLinus Torvalds int buffer_heads_over_limit;
32811da177e4SLinus Torvalds 
32821da177e4SLinus Torvalds struct bh_accounting {
32831da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
32841da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
32851da177e4SLinus Torvalds };
32861da177e4SLinus Torvalds 
32871da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
32881da177e4SLinus Torvalds 
32891da177e4SLinus Torvalds static void recalc_bh_state(void)
32901da177e4SLinus Torvalds {
32911da177e4SLinus Torvalds 	int i;
32921da177e4SLinus Torvalds 	int tot = 0;
32931da177e4SLinus Torvalds 
32941da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
32951da177e4SLinus Torvalds 		return;
32961da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
32978a143426SEric Dumazet 	for_each_online_cpu(i)
32981da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
32991da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
33001da177e4SLinus Torvalds }
33011da177e4SLinus Torvalds 
3302dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
33031da177e4SLinus Torvalds {
3304019b4d12SRichard Kennedy 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
33051da177e4SLinus Torvalds 	if (ret) {
3306a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3307736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
33081da177e4SLinus Torvalds 		recalc_bh_state();
3309736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
33101da177e4SLinus Torvalds 	}
33111da177e4SLinus Torvalds 	return ret;
33121da177e4SLinus Torvalds }
33131da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
33141da177e4SLinus Torvalds 
33151da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
33161da177e4SLinus Torvalds {
33171da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
33181da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3319736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
33201da177e4SLinus Torvalds 	recalc_bh_state();
3321736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
33221da177e4SLinus Torvalds }
33231da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
33241da177e4SLinus Torvalds 
33251da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
33261da177e4SLinus Torvalds {
33271da177e4SLinus Torvalds 	int i;
33281da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
33291da177e4SLinus Torvalds 
33301da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
33311da177e4SLinus Torvalds 		brelse(b->bhs[i]);
33321da177e4SLinus Torvalds 		b->bhs[i] = NULL;
33331da177e4SLinus Torvalds 	}
33348a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
33358a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
33368a143426SEric Dumazet 	put_cpu_var(bh_accounting);
33371da177e4SLinus Torvalds }
33381da177e4SLinus Torvalds 
33391da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
33401da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
33411da177e4SLinus Torvalds {
33428bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
33431da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
33441da177e4SLinus Torvalds 	return NOTIFY_OK;
33451da177e4SLinus Torvalds }
33461da177e4SLinus Torvalds 
3347389d1b08SAneesh Kumar K.V /**
3348a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3349389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3350389d1b08SAneesh Kumar K.V  *
3351389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3352389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3353389d1b08SAneesh Kumar K.V  */
3354389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3355389d1b08SAneesh Kumar K.V {
3356389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3357389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3358389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3359389d1b08SAneesh Kumar K.V 			return 0;
3360389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3361389d1b08SAneesh Kumar K.V 	}
3362389d1b08SAneesh Kumar K.V 	return 1;
3363389d1b08SAneesh Kumar K.V }
3364389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3365389d1b08SAneesh Kumar K.V 
3366389d1b08SAneesh Kumar K.V /**
3367a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3368389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3369389d1b08SAneesh Kumar K.V  *
3370389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3371389d1b08SAneesh Kumar K.V  */
3372389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3373389d1b08SAneesh Kumar K.V {
3374389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3375389d1b08SAneesh Kumar K.V 
3376389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3377389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3378389d1b08SAneesh Kumar K.V 		return 0;
3379389d1b08SAneesh Kumar K.V 	}
3380389d1b08SAneesh Kumar K.V 
3381389d1b08SAneesh Kumar K.V 	get_bh(bh);
3382389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3383389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3384389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3385389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3386389d1b08SAneesh Kumar K.V 		return 0;
3387389d1b08SAneesh Kumar K.V 	return -EIO;
3388389d1b08SAneesh Kumar K.V }
3389389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3390389d1b08SAneesh Kumar K.V 
33911da177e4SLinus Torvalds void __init buffer_init(void)
33921da177e4SLinus Torvalds {
33931da177e4SLinus Torvalds 	int nrpages;
33941da177e4SLinus Torvalds 
3395b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3396b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3397b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3398b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3399019b4d12SRichard Kennedy 				NULL);
34001da177e4SLinus Torvalds 
34011da177e4SLinus Torvalds 	/*
34021da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
34031da177e4SLinus Torvalds 	 */
34041da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
34051da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
34061da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
34071da177e4SLinus Torvalds }
3408