xref: /linux/fs/buffer.c (revision e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
70fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
77fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7951b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
801da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
811da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
821da177e4SLinus Torvalds }
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds /*
851da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
861da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
871da177e4SLinus Torvalds  * if you want to preserve its state.
881da177e4SLinus Torvalds  */
891da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
901da177e4SLinus Torvalds {
911da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
921da177e4SLinus Torvalds }
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds static void
951da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
961da177e4SLinus Torvalds {
971da177e4SLinus Torvalds 	ClearPagePrivate(page);
984c21e2f2SHugh Dickins 	set_page_private(page, 0);
991da177e4SLinus Torvalds 	page_cache_release(page);
1001da177e4SLinus Torvalds }
1011da177e4SLinus Torvalds 
10208bafc03SKeith Mannthey 
10308bafc03SKeith Mannthey static int quiet_error(struct buffer_head *bh)
10408bafc03SKeith Mannthey {
10508bafc03SKeith Mannthey 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
10608bafc03SKeith Mannthey 		return 0;
10708bafc03SKeith Mannthey 	return 1;
10808bafc03SKeith Mannthey }
10908bafc03SKeith Mannthey 
11008bafc03SKeith Mannthey 
1111da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1121da177e4SLinus Torvalds {
1131da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1141da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1151da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1161da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1171da177e4SLinus Torvalds }
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /*
12068671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
12168671f35SDmitry Monakhov  * unlocking it.
12268671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
12368671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
12468671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
12568671f35SDmitry Monakhov  * itself.
1261da177e4SLinus Torvalds  */
12768671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1281da177e4SLinus Torvalds {
1291da177e4SLinus Torvalds 	if (uptodate) {
1301da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1311da177e4SLinus Torvalds 	} else {
1321da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1331da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1341da177e4SLinus Torvalds 	}
1351da177e4SLinus Torvalds 	unlock_buffer(bh);
13668671f35SDmitry Monakhov }
13768671f35SDmitry Monakhov 
13868671f35SDmitry Monakhov /*
13968671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
14068671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
14168671f35SDmitry Monakhov  */
14268671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
14368671f35SDmitry Monakhov {
14468671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1451da177e4SLinus Torvalds 	put_bh(bh);
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1491da177e4SLinus Torvalds {
1501da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds 	if (uptodate) {
1531da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1541da177e4SLinus Torvalds 	} else {
15508bafc03SKeith Mannthey 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1561da177e4SLinus Torvalds 			buffer_io_error(bh);
1571da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1581da177e4SLinus Torvalds 					"I/O error on %s\n",
1591da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1601da177e4SLinus Torvalds 		}
1611da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1621da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1631da177e4SLinus Torvalds 	}
1641da177e4SLinus Torvalds 	unlock_buffer(bh);
1651da177e4SLinus Torvalds 	put_bh(bh);
1661da177e4SLinus Torvalds }
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds /*
1691da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1701da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1711da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1721da177e4SLinus Torvalds  * private_lock.
1731da177e4SLinus Torvalds  *
1741da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
1751da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
1761da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
1771da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
1781da177e4SLinus Torvalds  */
1791da177e4SLinus Torvalds static struct buffer_head *
180385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1811da177e4SLinus Torvalds {
1821da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1831da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1841da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1851da177e4SLinus Torvalds 	pgoff_t index;
1861da177e4SLinus Torvalds 	struct buffer_head *bh;
1871da177e4SLinus Torvalds 	struct buffer_head *head;
1881da177e4SLinus Torvalds 	struct page *page;
1891da177e4SLinus Torvalds 	int all_mapped = 1;
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
1921da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
1931da177e4SLinus Torvalds 	if (!page)
1941da177e4SLinus Torvalds 		goto out;
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
1971da177e4SLinus Torvalds 	if (!page_has_buffers(page))
1981da177e4SLinus Torvalds 		goto out_unlock;
1991da177e4SLinus Torvalds 	head = page_buffers(page);
2001da177e4SLinus Torvalds 	bh = head;
2011da177e4SLinus Torvalds 	do {
20297f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
20397f76d3dSNikanth Karthikesan 			all_mapped = 0;
20497f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2051da177e4SLinus Torvalds 			ret = bh;
2061da177e4SLinus Torvalds 			get_bh(bh);
2071da177e4SLinus Torvalds 			goto out_unlock;
2081da177e4SLinus Torvalds 		}
2091da177e4SLinus Torvalds 		bh = bh->b_this_page;
2101da177e4SLinus Torvalds 	} while (bh != head);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2131da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2141da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2151da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2161da177e4SLinus Torvalds 	 */
2171da177e4SLinus Torvalds 	if (all_mapped) {
2181da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
2191da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
220205f87f6SBadari Pulavarty 			(unsigned long long)block,
221205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
222205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
223205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
2241da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
2251da177e4SLinus Torvalds 	}
2261da177e4SLinus Torvalds out_unlock:
2271da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
2281da177e4SLinus Torvalds 	page_cache_release(page);
2291da177e4SLinus Torvalds out:
2301da177e4SLinus Torvalds 	return ret;
2311da177e4SLinus Torvalds }
2321da177e4SLinus Torvalds 
2331da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
2341da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
2351da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
2361da177e4SLinus Torvalds    by the user.
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
2391da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
2401da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
2431da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
2461da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
2471da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
2481da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
2491da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
2501da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
2511da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
2521da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
2531da177e4SLinus Torvalds 
2541da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
2551da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
2561da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
2571da177e4SLinus Torvalds 
2581da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
2591da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
2601da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
2611da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
2621da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
2631da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
2641da177e4SLinus Torvalds    pass does the actual I/O. */
265f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
2661da177e4SLinus Torvalds {
2670e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
2680e1dfc66SAndrew Morton 
2690e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
2700e1dfc66SAndrew Morton 		return;
2710e1dfc66SAndrew Morton 
2721da177e4SLinus Torvalds 	invalidate_bh_lrus();
273fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
2741da177e4SLinus Torvalds }
2751da177e4SLinus Torvalds 
2761da177e4SLinus Torvalds /*
2771da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
2781da177e4SLinus Torvalds  */
2791da177e4SLinus Torvalds static void free_more_memory(void)
2801da177e4SLinus Torvalds {
28119770b32SMel Gorman 	struct zone *zone;
2820e88460dSMel Gorman 	int nid;
2831da177e4SLinus Torvalds 
284687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
2851da177e4SLinus Torvalds 	yield();
2861da177e4SLinus Torvalds 
2870e88460dSMel Gorman 	for_each_online_node(nid) {
28819770b32SMel Gorman 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
28919770b32SMel Gorman 						gfp_zone(GFP_NOFS), NULL,
29019770b32SMel Gorman 						&zone);
29119770b32SMel Gorman 		if (zone)
29254a6eb5cSMel Gorman 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293327c0e96SKAMEZAWA Hiroyuki 						GFP_NOFS, NULL);
2941da177e4SLinus Torvalds 	}
2951da177e4SLinus Torvalds }
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds /*
2981da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
2991da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3001da177e4SLinus Torvalds  */
3011da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3021da177e4SLinus Torvalds {
3031da177e4SLinus Torvalds 	unsigned long flags;
304a3972203SNick Piggin 	struct buffer_head *first;
3051da177e4SLinus Torvalds 	struct buffer_head *tmp;
3061da177e4SLinus Torvalds 	struct page *page;
3071da177e4SLinus Torvalds 	int page_uptodate = 1;
3081da177e4SLinus Torvalds 
3091da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds 	page = bh->b_page;
3121da177e4SLinus Torvalds 	if (uptodate) {
3131da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3141da177e4SLinus Torvalds 	} else {
3151da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
31608bafc03SKeith Mannthey 		if (!quiet_error(bh))
3171da177e4SLinus Torvalds 			buffer_io_error(bh);
3181da177e4SLinus Torvalds 		SetPageError(page);
3191da177e4SLinus Torvalds 	}
3201da177e4SLinus Torvalds 
3211da177e4SLinus Torvalds 	/*
3221da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
3231da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
3241da177e4SLinus Torvalds 	 * decide that the page is now completely done.
3251da177e4SLinus Torvalds 	 */
326a3972203SNick Piggin 	first = page_buffers(page);
327a3972203SNick Piggin 	local_irq_save(flags);
328a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
3291da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
3301da177e4SLinus Torvalds 	unlock_buffer(bh);
3311da177e4SLinus Torvalds 	tmp = bh;
3321da177e4SLinus Torvalds 	do {
3331da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
3341da177e4SLinus Torvalds 			page_uptodate = 0;
3351da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
3361da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3371da177e4SLinus Torvalds 			goto still_busy;
3381da177e4SLinus Torvalds 		}
3391da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
3401da177e4SLinus Torvalds 	} while (tmp != bh);
341a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342a3972203SNick Piggin 	local_irq_restore(flags);
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds 	/*
3451da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
3461da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
3471da177e4SLinus Torvalds 	 */
3481da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
3491da177e4SLinus Torvalds 		SetPageUptodate(page);
3501da177e4SLinus Torvalds 	unlock_page(page);
3511da177e4SLinus Torvalds 	return;
3521da177e4SLinus Torvalds 
3531da177e4SLinus Torvalds still_busy:
354a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355a3972203SNick Piggin 	local_irq_restore(flags);
3561da177e4SLinus Torvalds 	return;
3571da177e4SLinus Torvalds }
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds /*
3601da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3611da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3621da177e4SLinus Torvalds  */
36335c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3641da177e4SLinus Torvalds {
3651da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
3661da177e4SLinus Torvalds 	unsigned long flags;
367a3972203SNick Piggin 	struct buffer_head *first;
3681da177e4SLinus Torvalds 	struct buffer_head *tmp;
3691da177e4SLinus Torvalds 	struct page *page;
3701da177e4SLinus Torvalds 
3711da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3721da177e4SLinus Torvalds 
3731da177e4SLinus Torvalds 	page = bh->b_page;
3741da177e4SLinus Torvalds 	if (uptodate) {
3751da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3761da177e4SLinus Torvalds 	} else {
37708bafc03SKeith Mannthey 		if (!quiet_error(bh)) {
3781da177e4SLinus Torvalds 			buffer_io_error(bh);
3791da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
3801da177e4SLinus Torvalds 					"I/O error on %s\n",
3811da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
3821da177e4SLinus Torvalds 		}
3831da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
38458ff407bSJan Kara 		set_buffer_write_io_error(bh);
3851da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3861da177e4SLinus Torvalds 		SetPageError(page);
3871da177e4SLinus Torvalds 	}
3881da177e4SLinus Torvalds 
389a3972203SNick Piggin 	first = page_buffers(page);
390a3972203SNick Piggin 	local_irq_save(flags);
391a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392a3972203SNick Piggin 
3931da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
3941da177e4SLinus Torvalds 	unlock_buffer(bh);
3951da177e4SLinus Torvalds 	tmp = bh->b_this_page;
3961da177e4SLinus Torvalds 	while (tmp != bh) {
3971da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
3981da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3991da177e4SLinus Torvalds 			goto still_busy;
4001da177e4SLinus Torvalds 		}
4011da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4021da177e4SLinus Torvalds 	}
403a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404a3972203SNick Piggin 	local_irq_restore(flags);
4051da177e4SLinus Torvalds 	end_page_writeback(page);
4061da177e4SLinus Torvalds 	return;
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds still_busy:
409a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410a3972203SNick Piggin 	local_irq_restore(flags);
4111da177e4SLinus Torvalds 	return;
4121da177e4SLinus Torvalds }
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds /*
4151da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4161da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4171da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4181da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4191da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4201da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4211da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4221da177e4SLinus Torvalds  *
4231da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4241da177e4SLinus Torvalds  * left.
4251da177e4SLinus Torvalds  *
4261da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4271da177e4SLinus Torvalds  * the buffers.
4281da177e4SLinus Torvalds  *
4291da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4301da177e4SLinus Torvalds  * page.
4311da177e4SLinus Torvalds  *
4321da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4331da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4341da177e4SLinus Torvalds  */
4351da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4361da177e4SLinus Torvalds {
4371da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
4381da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4391da177e4SLinus Torvalds }
4401da177e4SLinus Torvalds 
44135c80d5fSChris Mason void mark_buffer_async_write_endio(struct buffer_head *bh,
44235c80d5fSChris Mason 				   bh_end_io_t *handler)
44335c80d5fSChris Mason {
44435c80d5fSChris Mason 	bh->b_end_io = handler;
44535c80d5fSChris Mason 	set_buffer_async_write(bh);
44635c80d5fSChris Mason }
44735c80d5fSChris Mason 
4481da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4491da177e4SLinus Torvalds {
45035c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4511da177e4SLinus Torvalds }
4521da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds 
4551da177e4SLinus Torvalds /*
4561da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4571da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4581da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4591da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4601da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4611da177e4SLinus Torvalds  *
4621da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4631da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4641da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4651da177e4SLinus Torvalds  *
4661da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4671da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4681da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4691da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4701da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4711da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4721da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4731da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4741da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4751da177e4SLinus Torvalds  * ->private_lock.
4761da177e4SLinus Torvalds  *
4771da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4781da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4791da177e4SLinus Torvalds  *
4801da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4811da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4821da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4831da177e4SLinus Torvalds  * be true at clear_inode() time.
4841da177e4SLinus Torvalds  *
4851da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4861da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4871da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4881da177e4SLinus Torvalds  *
4891da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4901da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4911da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4921da177e4SLinus Torvalds  * queued up.
4931da177e4SLinus Torvalds  *
4941da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
4951da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
4961da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
4971da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
4981da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
4991da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5001da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5011da177e4SLinus Torvalds  * b_inode back.
5021da177e4SLinus Torvalds  */
5031da177e4SLinus Torvalds 
5041da177e4SLinus Torvalds /*
5051da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5061da177e4SLinus Torvalds  */
507dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5081da177e4SLinus Torvalds {
5091da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
51058ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
51158ff407bSJan Kara 	if (buffer_write_io_error(bh))
51258ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
51358ff407bSJan Kara 	bh->b_assoc_map = NULL;
5141da177e4SLinus Torvalds }
5151da177e4SLinus Torvalds 
5161da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5171da177e4SLinus Torvalds {
5181da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5191da177e4SLinus Torvalds }
5201da177e4SLinus Torvalds 
5211da177e4SLinus Torvalds /*
5221da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5231da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5241da177e4SLinus Torvalds  * writes to the disk.
5251da177e4SLinus Torvalds  *
5261da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
5271da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
5281da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5291da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5301da177e4SLinus Torvalds  */
5311da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5321da177e4SLinus Torvalds {
5331da177e4SLinus Torvalds 	struct buffer_head *bh;
5341da177e4SLinus Torvalds 	struct list_head *p;
5351da177e4SLinus Torvalds 	int err = 0;
5361da177e4SLinus Torvalds 
5371da177e4SLinus Torvalds 	spin_lock(lock);
5381da177e4SLinus Torvalds repeat:
5391da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5401da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5411da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5421da177e4SLinus Torvalds 			get_bh(bh);
5431da177e4SLinus Torvalds 			spin_unlock(lock);
5441da177e4SLinus Torvalds 			wait_on_buffer(bh);
5451da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5461da177e4SLinus Torvalds 				err = -EIO;
5471da177e4SLinus Torvalds 			brelse(bh);
5481da177e4SLinus Torvalds 			spin_lock(lock);
5491da177e4SLinus Torvalds 			goto repeat;
5501da177e4SLinus Torvalds 		}
5511da177e4SLinus Torvalds 	}
5521da177e4SLinus Torvalds 	spin_unlock(lock);
5531da177e4SLinus Torvalds 	return err;
5541da177e4SLinus Torvalds }
5551da177e4SLinus Torvalds 
556053c525fSJens Axboe void do_thaw_all(struct work_struct *work)
557c2d75438SEric Sandeen {
558c2d75438SEric Sandeen 	struct super_block *sb;
559c2d75438SEric Sandeen 	char b[BDEVNAME_SIZE];
560c2d75438SEric Sandeen 
561c2d75438SEric Sandeen 	spin_lock(&sb_lock);
562c2d75438SEric Sandeen restart:
563c2d75438SEric Sandeen 	list_for_each_entry(sb, &super_blocks, s_list) {
564c2d75438SEric Sandeen 		sb->s_count++;
565c2d75438SEric Sandeen 		spin_unlock(&sb_lock);
566c2d75438SEric Sandeen 		down_read(&sb->s_umount);
567c2d75438SEric Sandeen 		while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568c2d75438SEric Sandeen 			printk(KERN_WARNING "Emergency Thaw on %s\n",
569c2d75438SEric Sandeen 			       bdevname(sb->s_bdev, b));
570c2d75438SEric Sandeen 		up_read(&sb->s_umount);
571c2d75438SEric Sandeen 		spin_lock(&sb_lock);
572c2d75438SEric Sandeen 		if (__put_super_and_need_restart(sb))
573c2d75438SEric Sandeen 			goto restart;
574c2d75438SEric Sandeen 	}
575c2d75438SEric Sandeen 	spin_unlock(&sb_lock);
576053c525fSJens Axboe 	kfree(work);
577c2d75438SEric Sandeen 	printk(KERN_WARNING "Emergency Thaw complete\n");
578c2d75438SEric Sandeen }
579c2d75438SEric Sandeen 
580c2d75438SEric Sandeen /**
581c2d75438SEric Sandeen  * emergency_thaw_all -- forcibly thaw every frozen filesystem
582c2d75438SEric Sandeen  *
583c2d75438SEric Sandeen  * Used for emergency unfreeze of all filesystems via SysRq
584c2d75438SEric Sandeen  */
585c2d75438SEric Sandeen void emergency_thaw_all(void)
586c2d75438SEric Sandeen {
587053c525fSJens Axboe 	struct work_struct *work;
588053c525fSJens Axboe 
589053c525fSJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
590053c525fSJens Axboe 	if (work) {
591053c525fSJens Axboe 		INIT_WORK(work, do_thaw_all);
592053c525fSJens Axboe 		schedule_work(work);
593053c525fSJens Axboe 	}
594c2d75438SEric Sandeen }
595c2d75438SEric Sandeen 
5961da177e4SLinus Torvalds /**
59778a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
59867be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5991da177e4SLinus Torvalds  *
6001da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
6011da177e4SLinus Torvalds  * that I/O.
6021da177e4SLinus Torvalds  *
60367be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
60467be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
60567be2dd1SMartin Waitz  * a successful fsync().
6061da177e4SLinus Torvalds  */
6071da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
6081da177e4SLinus Torvalds {
6091da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6101da177e4SLinus Torvalds 
6111da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6121da177e4SLinus Torvalds 		return 0;
6131da177e4SLinus Torvalds 
6141da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6151da177e4SLinus Torvalds 					&mapping->private_list);
6161da177e4SLinus Torvalds }
6171da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6181da177e4SLinus Torvalds 
6191da177e4SLinus Torvalds /*
6201da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6211da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6221da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6231da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6241da177e4SLinus Torvalds  */
6251da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6261da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6271da177e4SLinus Torvalds {
6281da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6291da177e4SLinus Torvalds 	if (bh) {
6301da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6311da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6321da177e4SLinus Torvalds 		put_bh(bh);
6331da177e4SLinus Torvalds 	}
6341da177e4SLinus Torvalds }
6351da177e4SLinus Torvalds 
6361da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6371da177e4SLinus Torvalds {
6381da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6391da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6401da177e4SLinus Torvalds 
6411da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6421da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6431da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6441da177e4SLinus Torvalds 	} else {
645e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6461da177e4SLinus Torvalds 	}
647535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6481da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6491da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6501da177e4SLinus Torvalds 				&mapping->private_list);
65158ff407bSJan Kara 		bh->b_assoc_map = mapping;
6521da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6531da177e4SLinus Torvalds 	}
6541da177e4SLinus Torvalds }
6551da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6561da177e4SLinus Torvalds 
6571da177e4SLinus Torvalds /*
658787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
659787d2214SNick Piggin  * dirty.
660787d2214SNick Piggin  *
661787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
662787d2214SNick Piggin  * not been truncated.
663787d2214SNick Piggin  */
664a8e7d49aSLinus Torvalds static void __set_page_dirty(struct page *page,
665787d2214SNick Piggin 		struct address_space *mapping, int warn)
666787d2214SNick Piggin {
66719fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
668787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
669787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
670e3a7cca1SEdward Shishkin 		account_page_dirtied(page, mapping);
671787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
672787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
673787d2214SNick Piggin 	}
67419fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
675787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
676787d2214SNick Piggin }
677787d2214SNick Piggin 
678787d2214SNick Piggin /*
6791da177e4SLinus Torvalds  * Add a page to the dirty page list.
6801da177e4SLinus Torvalds  *
6811da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6821da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6831da177e4SLinus Torvalds  *
6841da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6851da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6861da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6871da177e4SLinus Torvalds  * dirty.
6881da177e4SLinus Torvalds  *
6891da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6901da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6911da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6921da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6931da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6941da177e4SLinus Torvalds  * page on the dirty page list.
6951da177e4SLinus Torvalds  *
6961da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6971da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6981da177e4SLinus Torvalds  * added to the page after it was set dirty.
6991da177e4SLinus Torvalds  *
7001da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
7011da177e4SLinus Torvalds  * address_space though.
7021da177e4SLinus Torvalds  */
7031da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
7041da177e4SLinus Torvalds {
705a8e7d49aSLinus Torvalds 	int newly_dirty;
706787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
707ebf7a227SNick Piggin 
708ebf7a227SNick Piggin 	if (unlikely(!mapping))
709ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7101da177e4SLinus Torvalds 
7111da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7121da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7131da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7141da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7151da177e4SLinus Torvalds 
7161da177e4SLinus Torvalds 		do {
7171da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7181da177e4SLinus Torvalds 			bh = bh->b_this_page;
7191da177e4SLinus Torvalds 		} while (bh != head);
7201da177e4SLinus Torvalds 	}
721a8e7d49aSLinus Torvalds 	newly_dirty = !TestSetPageDirty(page);
7221da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7231da177e4SLinus Torvalds 
724a8e7d49aSLinus Torvalds 	if (newly_dirty)
725a8e7d49aSLinus Torvalds 		__set_page_dirty(page, mapping, 1);
726a8e7d49aSLinus Torvalds 	return newly_dirty;
7271da177e4SLinus Torvalds }
7281da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7291da177e4SLinus Torvalds 
7301da177e4SLinus Torvalds /*
7311da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7321da177e4SLinus Torvalds  *
7331da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7341da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7351da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7361da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7371da177e4SLinus Torvalds  *
7381da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7391da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7401da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7411da177e4SLinus Torvalds  *
7421da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7431da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7441da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7451da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7461da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7471da177e4SLinus Torvalds  * any newly dirty buffers for write.
7481da177e4SLinus Torvalds  */
7491da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7501da177e4SLinus Torvalds {
7511da177e4SLinus Torvalds 	struct buffer_head *bh;
7521da177e4SLinus Torvalds 	struct list_head tmp;
7539cf6b720SJens Axboe 	struct address_space *mapping, *prev_mapping = NULL;
7541da177e4SLinus Torvalds 	int err = 0, err2;
7551da177e4SLinus Torvalds 
7561da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7571da177e4SLinus Torvalds 
7581da177e4SLinus Torvalds 	spin_lock(lock);
7591da177e4SLinus Torvalds 	while (!list_empty(list)) {
7601da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
761535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
76258ff407bSJan Kara 		__remove_assoc_queue(bh);
763535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
764535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
765535ee2fbSJan Kara 		smp_mb();
7661da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7671da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
768535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7691da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7701da177e4SLinus Torvalds 				get_bh(bh);
7711da177e4SLinus Torvalds 				spin_unlock(lock);
7721da177e4SLinus Torvalds 				/*
7731da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7741da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
7751da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
7761da177e4SLinus Torvalds 				 * flight on potentially older contents.
7771da177e4SLinus Torvalds 				 */
7789cf6b720SJens Axboe 				ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
7799cf6b720SJens Axboe 
7809cf6b720SJens Axboe 				/*
7819cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
7829cf6b720SJens Axboe 				 * that we will not run the very last mapping,
7839cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
7849cf6b720SJens Axboe 				 * through sync_buffer().
7859cf6b720SJens Axboe 				 */
7869cf6b720SJens Axboe 				if (prev_mapping && prev_mapping != mapping)
7879cf6b720SJens Axboe 					blk_run_address_space(prev_mapping);
7889cf6b720SJens Axboe 				prev_mapping = mapping;
7899cf6b720SJens Axboe 
7901da177e4SLinus Torvalds 				brelse(bh);
7911da177e4SLinus Torvalds 				spin_lock(lock);
7921da177e4SLinus Torvalds 			}
7931da177e4SLinus Torvalds 		}
7941da177e4SLinus Torvalds 	}
7951da177e4SLinus Torvalds 
7961da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7971da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
7981da177e4SLinus Torvalds 		get_bh(bh);
799535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
800535ee2fbSJan Kara 		__remove_assoc_queue(bh);
801535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
802535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
803535ee2fbSJan Kara 		smp_mb();
804535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
805535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
806e3892296SJan Kara 				 &mapping->private_list);
807535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
808535ee2fbSJan Kara 		}
8091da177e4SLinus Torvalds 		spin_unlock(lock);
8101da177e4SLinus Torvalds 		wait_on_buffer(bh);
8111da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8121da177e4SLinus Torvalds 			err = -EIO;
8131da177e4SLinus Torvalds 		brelse(bh);
8141da177e4SLinus Torvalds 		spin_lock(lock);
8151da177e4SLinus Torvalds 	}
8161da177e4SLinus Torvalds 
8171da177e4SLinus Torvalds 	spin_unlock(lock);
8181da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8191da177e4SLinus Torvalds 	if (err)
8201da177e4SLinus Torvalds 		return err;
8211da177e4SLinus Torvalds 	else
8221da177e4SLinus Torvalds 		return err2;
8231da177e4SLinus Torvalds }
8241da177e4SLinus Torvalds 
8251da177e4SLinus Torvalds /*
8261da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8271da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8281da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8291da177e4SLinus Torvalds  *
8301da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8311da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8321da177e4SLinus Torvalds  * for reiserfs.
8331da177e4SLinus Torvalds  */
8341da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8351da177e4SLinus Torvalds {
8361da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8371da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8381da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8391da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8421da177e4SLinus Torvalds 		while (!list_empty(list))
8431da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8441da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8451da177e4SLinus Torvalds 	}
8461da177e4SLinus Torvalds }
84752b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8481da177e4SLinus Torvalds 
8491da177e4SLinus Torvalds /*
8501da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8511da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8521da177e4SLinus Torvalds  *
8531da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8541da177e4SLinus Torvalds  */
8551da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8561da177e4SLinus Torvalds {
8571da177e4SLinus Torvalds 	int ret = 1;
8581da177e4SLinus Torvalds 
8591da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8601da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8611da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8621da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8631da177e4SLinus Torvalds 
8641da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8651da177e4SLinus Torvalds 		while (!list_empty(list)) {
8661da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8671da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8681da177e4SLinus Torvalds 				ret = 0;
8691da177e4SLinus Torvalds 				break;
8701da177e4SLinus Torvalds 			}
8711da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8721da177e4SLinus Torvalds 		}
8731da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8741da177e4SLinus Torvalds 	}
8751da177e4SLinus Torvalds 	return ret;
8761da177e4SLinus Torvalds }
8771da177e4SLinus Torvalds 
8781da177e4SLinus Torvalds /*
8791da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8801da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8811da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8821da177e4SLinus Torvalds  * buffers.
8831da177e4SLinus Torvalds  *
8841da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8851da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8861da177e4SLinus Torvalds  */
8871da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
8881da177e4SLinus Torvalds 		int retry)
8891da177e4SLinus Torvalds {
8901da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
8911da177e4SLinus Torvalds 	long offset;
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds try_again:
8941da177e4SLinus Torvalds 	head = NULL;
8951da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8961da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
8971da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
8981da177e4SLinus Torvalds 		if (!bh)
8991da177e4SLinus Torvalds 			goto no_grow;
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 		bh->b_bdev = NULL;
9021da177e4SLinus Torvalds 		bh->b_this_page = head;
9031da177e4SLinus Torvalds 		bh->b_blocknr = -1;
9041da177e4SLinus Torvalds 		head = bh;
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds 		bh->b_state = 0;
9071da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
908fc5cd582SChris Mason 		bh->b_private = NULL;
9091da177e4SLinus Torvalds 		bh->b_size = size;
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 		/* Link the buffer to its page */
9121da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9131da177e4SLinus Torvalds 
91401ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9151da177e4SLinus Torvalds 	}
9161da177e4SLinus Torvalds 	return head;
9171da177e4SLinus Torvalds /*
9181da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9191da177e4SLinus Torvalds  */
9201da177e4SLinus Torvalds no_grow:
9211da177e4SLinus Torvalds 	if (head) {
9221da177e4SLinus Torvalds 		do {
9231da177e4SLinus Torvalds 			bh = head;
9241da177e4SLinus Torvalds 			head = head->b_this_page;
9251da177e4SLinus Torvalds 			free_buffer_head(bh);
9261da177e4SLinus Torvalds 		} while (head);
9271da177e4SLinus Torvalds 	}
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 	/*
9301da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9311da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9321da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9331da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9341da177e4SLinus Torvalds 	 */
9351da177e4SLinus Torvalds 	if (!retry)
9361da177e4SLinus Torvalds 		return NULL;
9371da177e4SLinus Torvalds 
9381da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9391da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9401da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9411da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9421da177e4SLinus Torvalds 	 * async buffer heads in use.
9431da177e4SLinus Torvalds 	 */
9441da177e4SLinus Torvalds 	free_more_memory();
9451da177e4SLinus Torvalds 	goto try_again;
9461da177e4SLinus Torvalds }
9471da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9481da177e4SLinus Torvalds 
9491da177e4SLinus Torvalds static inline void
9501da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9511da177e4SLinus Torvalds {
9521da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9531da177e4SLinus Torvalds 
9541da177e4SLinus Torvalds 	bh = head;
9551da177e4SLinus Torvalds 	do {
9561da177e4SLinus Torvalds 		tail = bh;
9571da177e4SLinus Torvalds 		bh = bh->b_this_page;
9581da177e4SLinus Torvalds 	} while (bh);
9591da177e4SLinus Torvalds 	tail->b_this_page = head;
9601da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds /*
9641da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9651da177e4SLinus Torvalds  */
9661da177e4SLinus Torvalds static void
9671da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9681da177e4SLinus Torvalds 			sector_t block, int size)
9691da177e4SLinus Torvalds {
9701da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9711da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9721da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9731da177e4SLinus Torvalds 
9741da177e4SLinus Torvalds 	do {
9751da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9761da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9771da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9781da177e4SLinus Torvalds 			bh->b_blocknr = block;
9791da177e4SLinus Torvalds 			if (uptodate)
9801da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9811da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9821da177e4SLinus Torvalds 		}
9831da177e4SLinus Torvalds 		block++;
9841da177e4SLinus Torvalds 		bh = bh->b_this_page;
9851da177e4SLinus Torvalds 	} while (bh != head);
9861da177e4SLinus Torvalds }
9871da177e4SLinus Torvalds 
9881da177e4SLinus Torvalds /*
9891da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9901da177e4SLinus Torvalds  *
9911da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
9921da177e4SLinus Torvalds  */
9931da177e4SLinus Torvalds static struct page *
9941da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9951da177e4SLinus Torvalds 		pgoff_t index, int size)
9961da177e4SLinus Torvalds {
9971da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9981da177e4SLinus Torvalds 	struct page *page;
9991da177e4SLinus Torvalds 	struct buffer_head *bh;
10001da177e4SLinus Torvalds 
1001ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
1002769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
10031da177e4SLinus Torvalds 	if (!page)
10041da177e4SLinus Torvalds 		return NULL;
10051da177e4SLinus Torvalds 
1006e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
10091da177e4SLinus Torvalds 		bh = page_buffers(page);
10101da177e4SLinus Torvalds 		if (bh->b_size == size) {
10111da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
10121da177e4SLinus Torvalds 			return page;
10131da177e4SLinus Torvalds 		}
10141da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10151da177e4SLinus Torvalds 			goto failed;
10161da177e4SLinus Torvalds 	}
10171da177e4SLinus Torvalds 
10181da177e4SLinus Torvalds 	/*
10191da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10201da177e4SLinus Torvalds 	 */
10211da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10221da177e4SLinus Torvalds 	if (!bh)
10231da177e4SLinus Torvalds 		goto failed;
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds 	/*
10261da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10271da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10281da177e4SLinus Torvalds 	 * run under the page lock.
10291da177e4SLinus Torvalds 	 */
10301da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10311da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10321da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10331da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10341da177e4SLinus Torvalds 	return page;
10351da177e4SLinus Torvalds 
10361da177e4SLinus Torvalds failed:
10371da177e4SLinus Torvalds 	BUG();
10381da177e4SLinus Torvalds 	unlock_page(page);
10391da177e4SLinus Torvalds 	page_cache_release(page);
10401da177e4SLinus Torvalds 	return NULL;
10411da177e4SLinus Torvalds }
10421da177e4SLinus Torvalds 
10431da177e4SLinus Torvalds /*
10441da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10451da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10461da177e4SLinus Torvalds  */
1047858119e1SArjan van de Ven static int
10481da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10491da177e4SLinus Torvalds {
10501da177e4SLinus Torvalds 	struct page *page;
10511da177e4SLinus Torvalds 	pgoff_t index;
10521da177e4SLinus Torvalds 	int sizebits;
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds 	sizebits = -1;
10551da177e4SLinus Torvalds 	do {
10561da177e4SLinus Torvalds 		sizebits++;
10571da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10581da177e4SLinus Torvalds 
10591da177e4SLinus Torvalds 	index = block >> sizebits;
10601da177e4SLinus Torvalds 
1061e5657933SAndrew Morton 	/*
1062e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1063e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1064e5657933SAndrew Morton 	 */
1065e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1066e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1067e5657933SAndrew Morton 
1068e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1069e5657933SAndrew Morton 			"device %s\n",
10708e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1071e5657933SAndrew Morton 			bdevname(bdev, b));
1072e5657933SAndrew Morton 		return -EIO;
1073e5657933SAndrew Morton 	}
1074e5657933SAndrew Morton 	block = index << sizebits;
10751da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10761da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10771da177e4SLinus Torvalds 	if (!page)
10781da177e4SLinus Torvalds 		return 0;
10791da177e4SLinus Torvalds 	unlock_page(page);
10801da177e4SLinus Torvalds 	page_cache_release(page);
10811da177e4SLinus Torvalds 	return 1;
10821da177e4SLinus Torvalds }
10831da177e4SLinus Torvalds 
108475c96f85SAdrian Bunk static struct buffer_head *
10851da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
10861da177e4SLinus Torvalds {
10871da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1088*e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
10891da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10901da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10911da177e4SLinus Torvalds 					size);
1092*e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1093*e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
10941da177e4SLinus Torvalds 
10951da177e4SLinus Torvalds 		dump_stack();
10961da177e4SLinus Torvalds 		return NULL;
10971da177e4SLinus Torvalds 	}
10981da177e4SLinus Torvalds 
10991da177e4SLinus Torvalds 	for (;;) {
11001da177e4SLinus Torvalds 		struct buffer_head * bh;
1101e5657933SAndrew Morton 		int ret;
11021da177e4SLinus Torvalds 
11031da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
11041da177e4SLinus Torvalds 		if (bh)
11051da177e4SLinus Torvalds 			return bh;
11061da177e4SLinus Torvalds 
1107e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1108e5657933SAndrew Morton 		if (ret < 0)
1109e5657933SAndrew Morton 			return NULL;
1110e5657933SAndrew Morton 		if (ret == 0)
11111da177e4SLinus Torvalds 			free_more_memory();
11121da177e4SLinus Torvalds 	}
11131da177e4SLinus Torvalds }
11141da177e4SLinus Torvalds 
11151da177e4SLinus Torvalds /*
11161da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11171da177e4SLinus Torvalds  *
11181da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11191da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11201da177e4SLinus Torvalds  *
11211da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11221da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11231da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11241da177e4SLinus Torvalds  *
11251da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11261da177e4SLinus Torvalds  * (if the page has buffers).
11271da177e4SLinus Torvalds  *
11281da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11291da177e4SLinus Torvalds  * buffers are not.
11301da177e4SLinus Torvalds  *
11311da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11321da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11331da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11341da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11351da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11361da177e4SLinus Torvalds  */
11371da177e4SLinus Torvalds 
11381da177e4SLinus Torvalds /**
11391da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
114067be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11411da177e4SLinus Torvalds  *
11421da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11431da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11441da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11451da177e4SLinus Torvalds  * inode list.
11461da177e4SLinus Torvalds  *
11471da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11481da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11491da177e4SLinus Torvalds  */
1150fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11511da177e4SLinus Torvalds {
1152787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11531be62dc1SLinus Torvalds 
11541be62dc1SLinus Torvalds 	/*
11551be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11561be62dc1SLinus Torvalds 	 *
11571be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11581be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11591be62dc1SLinus Torvalds 	 */
11601be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11611be62dc1SLinus Torvalds 		smp_mb();
11621be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11631be62dc1SLinus Torvalds 			return;
11641be62dc1SLinus Torvalds 	}
11651be62dc1SLinus Torvalds 
1166a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1167a8e7d49aSLinus Torvalds 		struct page *page = bh->b_page;
1168a8e7d49aSLinus Torvalds 		if (!TestSetPageDirty(page))
1169a8e7d49aSLinus Torvalds 			__set_page_dirty(page, page_mapping(page), 0);
1170a8e7d49aSLinus Torvalds 	}
11711da177e4SLinus Torvalds }
11721da177e4SLinus Torvalds 
11731da177e4SLinus Torvalds /*
11741da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11751da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11761da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11771da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11781da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11791da177e4SLinus Torvalds  */
11801da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11811da177e4SLinus Torvalds {
11821da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11831da177e4SLinus Torvalds 		put_bh(buf);
11841da177e4SLinus Torvalds 		return;
11851da177e4SLinus Torvalds 	}
11865c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11871da177e4SLinus Torvalds }
11881da177e4SLinus Torvalds 
11891da177e4SLinus Torvalds /*
11901da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11911da177e4SLinus Torvalds  * potentially dirty data.
11921da177e4SLinus Torvalds  */
11931da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11941da177e4SLinus Torvalds {
11951da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1196535ee2fbSJan Kara 	if (bh->b_assoc_map) {
11971da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11981da177e4SLinus Torvalds 
11991da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12001da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
120158ff407bSJan Kara 		bh->b_assoc_map = NULL;
12021da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12031da177e4SLinus Torvalds 	}
12041da177e4SLinus Torvalds 	__brelse(bh);
12051da177e4SLinus Torvalds }
12061da177e4SLinus Torvalds 
12071da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12081da177e4SLinus Torvalds {
12091da177e4SLinus Torvalds 	lock_buffer(bh);
12101da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12111da177e4SLinus Torvalds 		unlock_buffer(bh);
12121da177e4SLinus Torvalds 		return bh;
12131da177e4SLinus Torvalds 	} else {
12141da177e4SLinus Torvalds 		get_bh(bh);
12151da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12161da177e4SLinus Torvalds 		submit_bh(READ, bh);
12171da177e4SLinus Torvalds 		wait_on_buffer(bh);
12181da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12191da177e4SLinus Torvalds 			return bh;
12201da177e4SLinus Torvalds 	}
12211da177e4SLinus Torvalds 	brelse(bh);
12221da177e4SLinus Torvalds 	return NULL;
12231da177e4SLinus Torvalds }
12241da177e4SLinus Torvalds 
12251da177e4SLinus Torvalds /*
12261da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12271da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12281da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12291da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12301da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12311da177e4SLinus Torvalds  *
12321da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12331da177e4SLinus Torvalds  * sb_find_get_block().
12341da177e4SLinus Torvalds  *
12351da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12361da177e4SLinus Torvalds  * a local interrupt disable for that.
12371da177e4SLinus Torvalds  */
12381da177e4SLinus Torvalds 
12391da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12401da177e4SLinus Torvalds 
12411da177e4SLinus Torvalds struct bh_lru {
12421da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12431da177e4SLinus Torvalds };
12441da177e4SLinus Torvalds 
12451da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12461da177e4SLinus Torvalds 
12471da177e4SLinus Torvalds #ifdef CONFIG_SMP
12481da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12491da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12501da177e4SLinus Torvalds #else
12511da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12521da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12531da177e4SLinus Torvalds #endif
12541da177e4SLinus Torvalds 
12551da177e4SLinus Torvalds static inline void check_irqs_on(void)
12561da177e4SLinus Torvalds {
12571da177e4SLinus Torvalds #ifdef irqs_disabled
12581da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12591da177e4SLinus Torvalds #endif
12601da177e4SLinus Torvalds }
12611da177e4SLinus Torvalds 
12621da177e4SLinus Torvalds /*
12631da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12641da177e4SLinus Torvalds  */
12651da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12661da177e4SLinus Torvalds {
12671da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12681da177e4SLinus Torvalds 	struct bh_lru *lru;
12691da177e4SLinus Torvalds 
12701da177e4SLinus Torvalds 	check_irqs_on();
12711da177e4SLinus Torvalds 	bh_lru_lock();
12721da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12731da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12741da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12751da177e4SLinus Torvalds 		int in;
12761da177e4SLinus Torvalds 		int out = 0;
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds 		get_bh(bh);
12791da177e4SLinus Torvalds 		bhs[out++] = bh;
12801da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12811da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12821da177e4SLinus Torvalds 
12831da177e4SLinus Torvalds 			if (bh2 == bh) {
12841da177e4SLinus Torvalds 				__brelse(bh2);
12851da177e4SLinus Torvalds 			} else {
12861da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12871da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12881da177e4SLinus Torvalds 					evictee = bh2;
12891da177e4SLinus Torvalds 				} else {
12901da177e4SLinus Torvalds 					bhs[out++] = bh2;
12911da177e4SLinus Torvalds 				}
12921da177e4SLinus Torvalds 			}
12931da177e4SLinus Torvalds 		}
12941da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12951da177e4SLinus Torvalds 			bhs[out++] = NULL;
12961da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
12971da177e4SLinus Torvalds 	}
12981da177e4SLinus Torvalds 	bh_lru_unlock();
12991da177e4SLinus Torvalds 
13001da177e4SLinus Torvalds 	if (evictee)
13011da177e4SLinus Torvalds 		__brelse(evictee);
13021da177e4SLinus Torvalds }
13031da177e4SLinus Torvalds 
13041da177e4SLinus Torvalds /*
13051da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13061da177e4SLinus Torvalds  */
1307858119e1SArjan van de Ven static struct buffer_head *
13083991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13091da177e4SLinus Torvalds {
13101da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13111da177e4SLinus Torvalds 	struct bh_lru *lru;
13123991d3bdSTomasz Kvarsin 	unsigned int i;
13131da177e4SLinus Torvalds 
13141da177e4SLinus Torvalds 	check_irqs_on();
13151da177e4SLinus Torvalds 	bh_lru_lock();
13161da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13171da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13181da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
13211da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13221da177e4SLinus Torvalds 			if (i) {
13231da177e4SLinus Torvalds 				while (i) {
13241da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
13251da177e4SLinus Torvalds 					i--;
13261da177e4SLinus Torvalds 				}
13271da177e4SLinus Torvalds 				lru->bhs[0] = bh;
13281da177e4SLinus Torvalds 			}
13291da177e4SLinus Torvalds 			get_bh(bh);
13301da177e4SLinus Torvalds 			ret = bh;
13311da177e4SLinus Torvalds 			break;
13321da177e4SLinus Torvalds 		}
13331da177e4SLinus Torvalds 	}
13341da177e4SLinus Torvalds 	bh_lru_unlock();
13351da177e4SLinus Torvalds 	return ret;
13361da177e4SLinus Torvalds }
13371da177e4SLinus Torvalds 
13381da177e4SLinus Torvalds /*
13391da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13401da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13411da177e4SLinus Torvalds  * NULL
13421da177e4SLinus Torvalds  */
13431da177e4SLinus Torvalds struct buffer_head *
13443991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13451da177e4SLinus Torvalds {
13461da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	if (bh == NULL) {
1349385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13501da177e4SLinus Torvalds 		if (bh)
13511da177e4SLinus Torvalds 			bh_lru_install(bh);
13521da177e4SLinus Torvalds 	}
13531da177e4SLinus Torvalds 	if (bh)
13541da177e4SLinus Torvalds 		touch_buffer(bh);
13551da177e4SLinus Torvalds 	return bh;
13561da177e4SLinus Torvalds }
13571da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds /*
13601da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13611da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13621da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13631da177e4SLinus Torvalds  *
13641da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13651da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13661da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13671da177e4SLinus Torvalds  *
13681da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13691da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13701da177e4SLinus Torvalds  */
13711da177e4SLinus Torvalds struct buffer_head *
13723991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13731da177e4SLinus Torvalds {
13741da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13751da177e4SLinus Torvalds 
13761da177e4SLinus Torvalds 	might_sleep();
13771da177e4SLinus Torvalds 	if (bh == NULL)
13781da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13791da177e4SLinus Torvalds 	return bh;
13801da177e4SLinus Torvalds }
13811da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13821da177e4SLinus Torvalds 
13831da177e4SLinus Torvalds /*
13841da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13851da177e4SLinus Torvalds  */
13863991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13871da177e4SLinus Torvalds {
13881da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1389a3e713b5SAndrew Morton 	if (likely(bh)) {
13901da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13911da177e4SLinus Torvalds 		brelse(bh);
13921da177e4SLinus Torvalds 	}
1393a3e713b5SAndrew Morton }
13941da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13951da177e4SLinus Torvalds 
13961da177e4SLinus Torvalds /**
13971da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
139867be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13991da177e4SLinus Torvalds  *  @block: number of block
14001da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14011da177e4SLinus Torvalds  *
14021da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14031da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14041da177e4SLinus Torvalds  */
14051da177e4SLinus Torvalds struct buffer_head *
14063991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
14071da177e4SLinus Torvalds {
14081da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14091da177e4SLinus Torvalds 
1410a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14111da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14121da177e4SLinus Torvalds 	return bh;
14131da177e4SLinus Torvalds }
14141da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds /*
14171da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14181da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14191da177e4SLinus Torvalds  * or with preempt disabled.
14201da177e4SLinus Torvalds  */
14211da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14221da177e4SLinus Torvalds {
14231da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14241da177e4SLinus Torvalds 	int i;
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14271da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14281da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14291da177e4SLinus Torvalds 	}
14301da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14311da177e4SLinus Torvalds }
14321da177e4SLinus Torvalds 
1433f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14341da177e4SLinus Torvalds {
143515c8b6c1SJens Axboe 	on_each_cpu(invalidate_bh_lru, NULL, 1);
14361da177e4SLinus Torvalds }
14379db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14381da177e4SLinus Torvalds 
14391da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14401da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14411da177e4SLinus Torvalds {
14421da177e4SLinus Torvalds 	bh->b_page = page;
1443e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14441da177e4SLinus Torvalds 	if (PageHighMem(page))
14451da177e4SLinus Torvalds 		/*
14461da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14471da177e4SLinus Torvalds 		 */
14481da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14491da177e4SLinus Torvalds 	else
14501da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14511da177e4SLinus Torvalds }
14521da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14531da177e4SLinus Torvalds 
14541da177e4SLinus Torvalds /*
14551da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14561da177e4SLinus Torvalds  */
1457858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14581da177e4SLinus Torvalds {
14591da177e4SLinus Torvalds 	lock_buffer(bh);
14601da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14611da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14621da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14631da177e4SLinus Torvalds 	clear_buffer_req(bh);
14641da177e4SLinus Torvalds 	clear_buffer_new(bh);
14651da177e4SLinus Torvalds 	clear_buffer_delay(bh);
146633a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14671da177e4SLinus Torvalds 	unlock_buffer(bh);
14681da177e4SLinus Torvalds }
14691da177e4SLinus Torvalds 
14701da177e4SLinus Torvalds /**
14711da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14721da177e4SLinus Torvalds  *
14731da177e4SLinus Torvalds  * @page: the page which is affected
14741da177e4SLinus Torvalds  * @offset: the index of the truncation point
14751da177e4SLinus Torvalds  *
14761da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14771da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14781da177e4SLinus Torvalds  *
14791da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14801da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14811da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14821da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14831da177e4SLinus Torvalds  * blocks on-disk.
14841da177e4SLinus Torvalds  */
14852ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14861da177e4SLinus Torvalds {
14871da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14881da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14891da177e4SLinus Torvalds 
14901da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14911da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14921da177e4SLinus Torvalds 		goto out;
14931da177e4SLinus Torvalds 
14941da177e4SLinus Torvalds 	head = page_buffers(page);
14951da177e4SLinus Torvalds 	bh = head;
14961da177e4SLinus Torvalds 	do {
14971da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
14981da177e4SLinus Torvalds 		next = bh->b_this_page;
14991da177e4SLinus Torvalds 
15001da177e4SLinus Torvalds 		/*
15011da177e4SLinus Torvalds 		 * is this block fully invalidated?
15021da177e4SLinus Torvalds 		 */
15031da177e4SLinus Torvalds 		if (offset <= curr_off)
15041da177e4SLinus Torvalds 			discard_buffer(bh);
15051da177e4SLinus Torvalds 		curr_off = next_off;
15061da177e4SLinus Torvalds 		bh = next;
15071da177e4SLinus Torvalds 	} while (bh != head);
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds 	/*
15101da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
15111da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15121da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15131da177e4SLinus Torvalds 	 */
15141da177e4SLinus Torvalds 	if (offset == 0)
15152ff28e22SNeilBrown 		try_to_release_page(page, 0);
15161da177e4SLinus Torvalds out:
15172ff28e22SNeilBrown 	return;
15181da177e4SLinus Torvalds }
15191da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
15201da177e4SLinus Torvalds 
15211da177e4SLinus Torvalds /*
15221da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15231da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15241da177e4SLinus Torvalds  * is already excluded via the page lock.
15251da177e4SLinus Torvalds  */
15261da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15271da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15281da177e4SLinus Torvalds {
15291da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15301da177e4SLinus Torvalds 
15311da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15321da177e4SLinus Torvalds 	bh = head;
15331da177e4SLinus Torvalds 	do {
15341da177e4SLinus Torvalds 		bh->b_state |= b_state;
15351da177e4SLinus Torvalds 		tail = bh;
15361da177e4SLinus Torvalds 		bh = bh->b_this_page;
15371da177e4SLinus Torvalds 	} while (bh);
15381da177e4SLinus Torvalds 	tail->b_this_page = head;
15391da177e4SLinus Torvalds 
15401da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15411da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15421da177e4SLinus Torvalds 		bh = head;
15431da177e4SLinus Torvalds 		do {
15441da177e4SLinus Torvalds 			if (PageDirty(page))
15451da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15461da177e4SLinus Torvalds 			if (PageUptodate(page))
15471da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15481da177e4SLinus Torvalds 			bh = bh->b_this_page;
15491da177e4SLinus Torvalds 		} while (bh != head);
15501da177e4SLinus Torvalds 	}
15511da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15521da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15531da177e4SLinus Torvalds }
15541da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds /*
15571da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15581da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15591da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15601da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15611da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15621da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15631da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15641da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15651da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15661da177e4SLinus Torvalds  *
15671da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15681da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15691da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15701da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15711da177e4SLinus Torvalds  */
15721da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15731da177e4SLinus Torvalds {
15741da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15751da177e4SLinus Torvalds 
15761da177e4SLinus Torvalds 	might_sleep();
15771da177e4SLinus Torvalds 
1578385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15791da177e4SLinus Torvalds 	if (old_bh) {
15801da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15811da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15821da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15831da177e4SLinus Torvalds 		__brelse(old_bh);
15841da177e4SLinus Torvalds 	}
15851da177e4SLinus Torvalds }
15861da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15871da177e4SLinus Torvalds 
15881da177e4SLinus Torvalds /*
15891da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15901da177e4SLinus Torvalds  *
15911da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15921da177e4SLinus Torvalds  *
15931da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15941da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15951da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15961da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15971da177e4SLinus Torvalds  *
15981da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
15991da177e4SLinus Torvalds  */
16001da177e4SLinus Torvalds 
16011da177e4SLinus Torvalds /*
16021da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
16031da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
16041da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
16051da177e4SLinus Torvalds  * state inside lock_buffer().
16061da177e4SLinus Torvalds  *
16071da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
16081da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
16091da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
16101da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
16111da177e4SLinus Torvalds  * prevents this contention from occurring.
16126e34eeddSTheodore Ts'o  *
16136e34eeddSTheodore Ts'o  * If block_write_full_page() is called with wbc->sync_mode ==
16146e34eeddSTheodore Ts'o  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
16156e34eeddSTheodore Ts'o  * causes the writes to be flagged as synchronous writes, but the
16166e34eeddSTheodore Ts'o  * block device queue will NOT be unplugged, since usually many pages
16176e34eeddSTheodore Ts'o  * will be pushed to the out before the higher-level caller actually
16186e34eeddSTheodore Ts'o  * waits for the writes to be completed.  The various wait functions,
16196e34eeddSTheodore Ts'o  * such as wait_on_writeback_range() will ultimately call sync_page()
16206e34eeddSTheodore Ts'o  * which will ultimately call blk_run_backing_dev(), which will end up
16216e34eeddSTheodore Ts'o  * unplugging the device queue.
16221da177e4SLinus Torvalds  */
16231da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
162435c80d5fSChris Mason 			get_block_t *get_block, struct writeback_control *wbc,
162535c80d5fSChris Mason 			bh_end_io_t *handler)
16261da177e4SLinus Torvalds {
16271da177e4SLinus Torvalds 	int err;
16281da177e4SLinus Torvalds 	sector_t block;
16291da177e4SLinus Torvalds 	sector_t last_block;
1630f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1631b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
16321da177e4SLinus Torvalds 	int nr_underway = 0;
16336e34eeddSTheodore Ts'o 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
16346e34eeddSTheodore Ts'o 			WRITE_SYNC_PLUG : WRITE);
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16371da177e4SLinus Torvalds 
16381da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16391da177e4SLinus Torvalds 
16401da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1641b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16421da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16431da177e4SLinus Torvalds 	}
16441da177e4SLinus Torvalds 
16451da177e4SLinus Torvalds 	/*
16461da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16471da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16481da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16491da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16501da177e4SLinus Torvalds 	 *
16511da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16521da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16531da177e4SLinus Torvalds 	 */
16541da177e4SLinus Torvalds 
165554b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16561da177e4SLinus Torvalds 	head = page_buffers(page);
16571da177e4SLinus Torvalds 	bh = head;
16581da177e4SLinus Torvalds 
16591da177e4SLinus Torvalds 	/*
16601da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16611da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16621da177e4SLinus Torvalds 	 */
16631da177e4SLinus Torvalds 	do {
16641da177e4SLinus Torvalds 		if (block > last_block) {
16651da177e4SLinus Torvalds 			/*
16661da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16671da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16681da177e4SLinus Torvalds 			 * truncate in progress.
16691da177e4SLinus Torvalds 			 */
16701da177e4SLinus Torvalds 			/*
16711da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16721da177e4SLinus Torvalds 			 */
16731da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16741da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
167529a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
167629a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1677b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16781da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16791da177e4SLinus Torvalds 			if (err)
16801da177e4SLinus Torvalds 				goto recover;
168129a814d2SAlex Tomas 			clear_buffer_delay(bh);
16821da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16831da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16841da177e4SLinus Torvalds 				clear_buffer_new(bh);
16851da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16861da177e4SLinus Torvalds 							bh->b_blocknr);
16871da177e4SLinus Torvalds 			}
16881da177e4SLinus Torvalds 		}
16891da177e4SLinus Torvalds 		bh = bh->b_this_page;
16901da177e4SLinus Torvalds 		block++;
16911da177e4SLinus Torvalds 	} while (bh != head);
16921da177e4SLinus Torvalds 
16931da177e4SLinus Torvalds 	do {
16941da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16951da177e4SLinus Torvalds 			continue;
16961da177e4SLinus Torvalds 		/*
16971da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16981da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16991da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
17001da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
17011da177e4SLinus Torvalds 		 * throttling.
17021da177e4SLinus Torvalds 		 */
17031da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
17041da177e4SLinus Torvalds 			lock_buffer(bh);
1705ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
17061da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
17071da177e4SLinus Torvalds 			continue;
17081da177e4SLinus Torvalds 		}
17091da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
171035c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
17111da177e4SLinus Torvalds 		} else {
17121da177e4SLinus Torvalds 			unlock_buffer(bh);
17131da177e4SLinus Torvalds 		}
17141da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17151da177e4SLinus Torvalds 
17161da177e4SLinus Torvalds 	/*
17171da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17181da177e4SLinus Torvalds 	 * drop the bh refcounts early.
17191da177e4SLinus Torvalds 	 */
17201da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17211da177e4SLinus Torvalds 	set_page_writeback(page);
17221da177e4SLinus Torvalds 
17231da177e4SLinus Torvalds 	do {
17241da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17251da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
1726a64c8610STheodore Ts'o 			submit_bh(write_op, bh);
17271da177e4SLinus Torvalds 			nr_underway++;
1728ad576e63SNick Piggin 		}
17291da177e4SLinus Torvalds 		bh = next;
17301da177e4SLinus Torvalds 	} while (bh != head);
173105937baaSAndrew Morton 	unlock_page(page);
17321da177e4SLinus Torvalds 
17331da177e4SLinus Torvalds 	err = 0;
17341da177e4SLinus Torvalds done:
17351da177e4SLinus Torvalds 	if (nr_underway == 0) {
17361da177e4SLinus Torvalds 		/*
17371da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17381da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17391da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17401da177e4SLinus Torvalds 		 */
17411da177e4SLinus Torvalds 		end_page_writeback(page);
17423d67f2d7SNick Piggin 
17431da177e4SLinus Torvalds 		/*
17441da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17451da177e4SLinus Torvalds 		 * here on.
17461da177e4SLinus Torvalds 		 */
17471da177e4SLinus Torvalds 	}
17481da177e4SLinus Torvalds 	return err;
17491da177e4SLinus Torvalds 
17501da177e4SLinus Torvalds recover:
17511da177e4SLinus Torvalds 	/*
17521da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17531da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17541da177e4SLinus Torvalds 	 * exposing stale data.
17551da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17561da177e4SLinus Torvalds 	 */
17571da177e4SLinus Torvalds 	bh = head;
17581da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17591da177e4SLinus Torvalds 	do {
176029a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
176129a814d2SAlex Tomas 		    !buffer_delay(bh)) {
17621da177e4SLinus Torvalds 			lock_buffer(bh);
176335c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
17641da177e4SLinus Torvalds 		} else {
17651da177e4SLinus Torvalds 			/*
17661da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17671da177e4SLinus Torvalds 			 * attachment to a dirty page.
17681da177e4SLinus Torvalds 			 */
17691da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17701da177e4SLinus Torvalds 		}
17711da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17721da177e4SLinus Torvalds 	SetPageError(page);
17731da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17747e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17751da177e4SLinus Torvalds 	set_page_writeback(page);
17761da177e4SLinus Torvalds 	do {
17771da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17781da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17791da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
1780a64c8610STheodore Ts'o 			submit_bh(write_op, bh);
17811da177e4SLinus Torvalds 			nr_underway++;
1782ad576e63SNick Piggin 		}
17831da177e4SLinus Torvalds 		bh = next;
17841da177e4SLinus Torvalds 	} while (bh != head);
1785ffda9d30SNick Piggin 	unlock_page(page);
17861da177e4SLinus Torvalds 	goto done;
17871da177e4SLinus Torvalds }
17881da177e4SLinus Torvalds 
1789afddba49SNick Piggin /*
1790afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1791afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1792afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1793afddba49SNick Piggin  */
1794afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1795afddba49SNick Piggin {
1796afddba49SNick Piggin 	unsigned int block_start, block_end;
1797afddba49SNick Piggin 	struct buffer_head *head, *bh;
1798afddba49SNick Piggin 
1799afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1800afddba49SNick Piggin 	if (!page_has_buffers(page))
1801afddba49SNick Piggin 		return;
1802afddba49SNick Piggin 
1803afddba49SNick Piggin 	bh = head = page_buffers(page);
1804afddba49SNick Piggin 	block_start = 0;
1805afddba49SNick Piggin 	do {
1806afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1807afddba49SNick Piggin 
1808afddba49SNick Piggin 		if (buffer_new(bh)) {
1809afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1810afddba49SNick Piggin 				if (!PageUptodate(page)) {
1811afddba49SNick Piggin 					unsigned start, size;
1812afddba49SNick Piggin 
1813afddba49SNick Piggin 					start = max(from, block_start);
1814afddba49SNick Piggin 					size = min(to, block_end) - start;
1815afddba49SNick Piggin 
1816eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1817afddba49SNick Piggin 					set_buffer_uptodate(bh);
1818afddba49SNick Piggin 				}
1819afddba49SNick Piggin 
1820afddba49SNick Piggin 				clear_buffer_new(bh);
1821afddba49SNick Piggin 				mark_buffer_dirty(bh);
1822afddba49SNick Piggin 			}
1823afddba49SNick Piggin 		}
1824afddba49SNick Piggin 
1825afddba49SNick Piggin 		block_start = block_end;
1826afddba49SNick Piggin 		bh = bh->b_this_page;
1827afddba49SNick Piggin 	} while (bh != head);
1828afddba49SNick Piggin }
1829afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1830afddba49SNick Piggin 
18311da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
18321da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
18331da177e4SLinus Torvalds {
18341da177e4SLinus Torvalds 	unsigned block_start, block_end;
18351da177e4SLinus Torvalds 	sector_t block;
18361da177e4SLinus Torvalds 	int err = 0;
18371da177e4SLinus Torvalds 	unsigned blocksize, bbits;
18381da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
18391da177e4SLinus Torvalds 
18401da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
18411da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
18421da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
18431da177e4SLinus Torvalds 	BUG_ON(from > to);
18441da177e4SLinus Torvalds 
18451da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18461da177e4SLinus Torvalds 	if (!page_has_buffers(page))
18471da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
18481da177e4SLinus Torvalds 	head = page_buffers(page);
18491da177e4SLinus Torvalds 
18501da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
18511da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
18521da177e4SLinus Torvalds 
18531da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
18541da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
18551da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18561da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18571da177e4SLinus Torvalds 			if (PageUptodate(page)) {
18581da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
18591da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18601da177e4SLinus Torvalds 			}
18611da177e4SLinus Torvalds 			continue;
18621da177e4SLinus Torvalds 		}
18631da177e4SLinus Torvalds 		if (buffer_new(bh))
18641da177e4SLinus Torvalds 			clear_buffer_new(bh);
18651da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1866b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18671da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18681da177e4SLinus Torvalds 			if (err)
1869f3ddbdc6SNick Piggin 				break;
18701da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18711da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18721da177e4SLinus Torvalds 							bh->b_blocknr);
18731da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1874637aff46SNick Piggin 					clear_buffer_new(bh);
18751da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1876637aff46SNick Piggin 					mark_buffer_dirty(bh);
18771da177e4SLinus Torvalds 					continue;
18781da177e4SLinus Torvalds 				}
1879eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1880eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1881eebd2aa3SChristoph Lameter 						to, block_end,
1882eebd2aa3SChristoph Lameter 						block_start, from);
18831da177e4SLinus Torvalds 				continue;
18841da177e4SLinus Torvalds 			}
18851da177e4SLinus Torvalds 		}
18861da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18871da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18881da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18891da177e4SLinus Torvalds 			continue;
18901da177e4SLinus Torvalds 		}
18911da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
189233a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18931da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18941da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18951da177e4SLinus Torvalds 			*wait_bh++=bh;
18961da177e4SLinus Torvalds 		}
18971da177e4SLinus Torvalds 	}
18981da177e4SLinus Torvalds 	/*
18991da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
19001da177e4SLinus Torvalds 	 */
19011da177e4SLinus Torvalds 	while(wait_bh > wait) {
19021da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
19031da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1904f3ddbdc6SNick Piggin 			err = -EIO;
19051da177e4SLinus Torvalds 	}
1906afddba49SNick Piggin 	if (unlikely(err))
1907afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
19081da177e4SLinus Torvalds 	return err;
19091da177e4SLinus Torvalds }
19101da177e4SLinus Torvalds 
19111da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
19121da177e4SLinus Torvalds 		unsigned from, unsigned to)
19131da177e4SLinus Torvalds {
19141da177e4SLinus Torvalds 	unsigned block_start, block_end;
19151da177e4SLinus Torvalds 	int partial = 0;
19161da177e4SLinus Torvalds 	unsigned blocksize;
19171da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
19181da177e4SLinus Torvalds 
19191da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19201da177e4SLinus Torvalds 
19211da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
19221da177e4SLinus Torvalds 	    bh != head || !block_start;
19231da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
19241da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19251da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19261da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19271da177e4SLinus Torvalds 				partial = 1;
19281da177e4SLinus Torvalds 		} else {
19291da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19301da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19311da177e4SLinus Torvalds 		}
1932afddba49SNick Piggin 		clear_buffer_new(bh);
19331da177e4SLinus Torvalds 	}
19341da177e4SLinus Torvalds 
19351da177e4SLinus Torvalds 	/*
19361da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19371da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19381da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19391da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19401da177e4SLinus Torvalds 	 */
19411da177e4SLinus Torvalds 	if (!partial)
19421da177e4SLinus Torvalds 		SetPageUptodate(page);
19431da177e4SLinus Torvalds 	return 0;
19441da177e4SLinus Torvalds }
19451da177e4SLinus Torvalds 
19461da177e4SLinus Torvalds /*
1947afddba49SNick Piggin  * block_write_begin takes care of the basic task of block allocation and
1948afddba49SNick Piggin  * bringing partial write blocks uptodate first.
1949afddba49SNick Piggin  *
1950afddba49SNick Piggin  * If *pagep is not NULL, then block_write_begin uses the locked page
1951afddba49SNick Piggin  * at *pagep rather than allocating its own. In this case, the page will
1952afddba49SNick Piggin  * not be unlocked or deallocated on failure.
1953afddba49SNick Piggin  */
1954afddba49SNick Piggin int block_write_begin(struct file *file, struct address_space *mapping,
1955afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
1956afddba49SNick Piggin 			struct page **pagep, void **fsdata,
1957afddba49SNick Piggin 			get_block_t *get_block)
1958afddba49SNick Piggin {
1959afddba49SNick Piggin 	struct inode *inode = mapping->host;
1960afddba49SNick Piggin 	int status = 0;
1961afddba49SNick Piggin 	struct page *page;
1962afddba49SNick Piggin 	pgoff_t index;
1963afddba49SNick Piggin 	unsigned start, end;
1964afddba49SNick Piggin 	int ownpage = 0;
1965afddba49SNick Piggin 
1966afddba49SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
1967afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1968afddba49SNick Piggin 	end = start + len;
1969afddba49SNick Piggin 
1970afddba49SNick Piggin 	page = *pagep;
1971afddba49SNick Piggin 	if (page == NULL) {
1972afddba49SNick Piggin 		ownpage = 1;
197354566b2cSNick Piggin 		page = grab_cache_page_write_begin(mapping, index, flags);
1974afddba49SNick Piggin 		if (!page) {
1975afddba49SNick Piggin 			status = -ENOMEM;
1976afddba49SNick Piggin 			goto out;
1977afddba49SNick Piggin 		}
1978afddba49SNick Piggin 		*pagep = page;
1979afddba49SNick Piggin 	} else
1980afddba49SNick Piggin 		BUG_ON(!PageLocked(page));
1981afddba49SNick Piggin 
1982afddba49SNick Piggin 	status = __block_prepare_write(inode, page, start, end, get_block);
1983afddba49SNick Piggin 	if (unlikely(status)) {
1984afddba49SNick Piggin 		ClearPageUptodate(page);
1985afddba49SNick Piggin 
1986afddba49SNick Piggin 		if (ownpage) {
1987afddba49SNick Piggin 			unlock_page(page);
1988afddba49SNick Piggin 			page_cache_release(page);
1989afddba49SNick Piggin 			*pagep = NULL;
1990afddba49SNick Piggin 
1991afddba49SNick Piggin 			/*
1992afddba49SNick Piggin 			 * prepare_write() may have instantiated a few blocks
1993afddba49SNick Piggin 			 * outside i_size.  Trim these off again. Don't need
1994afddba49SNick Piggin 			 * i_size_read because we hold i_mutex.
1995afddba49SNick Piggin 			 */
1996afddba49SNick Piggin 			if (pos + len > inode->i_size)
1997afddba49SNick Piggin 				vmtruncate(inode, inode->i_size);
1998afddba49SNick Piggin 		}
1999afddba49SNick Piggin 	}
2000afddba49SNick Piggin 
2001afddba49SNick Piggin out:
2002afddba49SNick Piggin 	return status;
2003afddba49SNick Piggin }
2004afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2005afddba49SNick Piggin 
2006afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2007afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2008afddba49SNick Piggin 			struct page *page, void *fsdata)
2009afddba49SNick Piggin {
2010afddba49SNick Piggin 	struct inode *inode = mapping->host;
2011afddba49SNick Piggin 	unsigned start;
2012afddba49SNick Piggin 
2013afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
2014afddba49SNick Piggin 
2015afddba49SNick Piggin 	if (unlikely(copied < len)) {
2016afddba49SNick Piggin 		/*
2017afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
2018afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
2019afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
2020afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
2021afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
2022afddba49SNick Piggin 		 * destroy our partial write.
2023afddba49SNick Piggin 		 *
2024afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
2025afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
2026afddba49SNick Piggin 		 * caller to redo the whole thing.
2027afddba49SNick Piggin 		 */
2028afddba49SNick Piggin 		if (!PageUptodate(page))
2029afddba49SNick Piggin 			copied = 0;
2030afddba49SNick Piggin 
2031afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
2032afddba49SNick Piggin 	}
2033afddba49SNick Piggin 	flush_dcache_page(page);
2034afddba49SNick Piggin 
2035afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
2036afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
2037afddba49SNick Piggin 
2038afddba49SNick Piggin 	return copied;
2039afddba49SNick Piggin }
2040afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2041afddba49SNick Piggin 
2042afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2043afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2044afddba49SNick Piggin 			struct page *page, void *fsdata)
2045afddba49SNick Piggin {
2046afddba49SNick Piggin 	struct inode *inode = mapping->host;
2047c7d206b3SJan Kara 	int i_size_changed = 0;
2048afddba49SNick Piggin 
2049afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2050afddba49SNick Piggin 
2051afddba49SNick Piggin 	/*
2052afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
2053afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
2054afddba49SNick Piggin 	 *
2055afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
2056afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
2057afddba49SNick Piggin 	 */
2058afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
2059afddba49SNick Piggin 		i_size_write(inode, pos+copied);
2060c7d206b3SJan Kara 		i_size_changed = 1;
2061afddba49SNick Piggin 	}
2062afddba49SNick Piggin 
2063afddba49SNick Piggin 	unlock_page(page);
2064afddba49SNick Piggin 	page_cache_release(page);
2065afddba49SNick Piggin 
2066c7d206b3SJan Kara 	/*
2067c7d206b3SJan Kara 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2068c7d206b3SJan Kara 	 * makes the holding time of page lock longer. Second, it forces lock
2069c7d206b3SJan Kara 	 * ordering of page lock and transaction start for journaling
2070c7d206b3SJan Kara 	 * filesystems.
2071c7d206b3SJan Kara 	 */
2072c7d206b3SJan Kara 	if (i_size_changed)
2073c7d206b3SJan Kara 		mark_inode_dirty(inode);
2074c7d206b3SJan Kara 
2075afddba49SNick Piggin 	return copied;
2076afddba49SNick Piggin }
2077afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2078afddba49SNick Piggin 
2079afddba49SNick Piggin /*
20808ab22b9aSHisashi Hifumi  * block_is_partially_uptodate checks whether buffers within a page are
20818ab22b9aSHisashi Hifumi  * uptodate or not.
20828ab22b9aSHisashi Hifumi  *
20838ab22b9aSHisashi Hifumi  * Returns true if all buffers which correspond to a file portion
20848ab22b9aSHisashi Hifumi  * we want to read are uptodate.
20858ab22b9aSHisashi Hifumi  */
20868ab22b9aSHisashi Hifumi int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
20878ab22b9aSHisashi Hifumi 					unsigned long from)
20888ab22b9aSHisashi Hifumi {
20898ab22b9aSHisashi Hifumi 	struct inode *inode = page->mapping->host;
20908ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
20918ab22b9aSHisashi Hifumi 	unsigned to;
20928ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
20938ab22b9aSHisashi Hifumi 	int ret = 1;
20948ab22b9aSHisashi Hifumi 
20958ab22b9aSHisashi Hifumi 	if (!page_has_buffers(page))
20968ab22b9aSHisashi Hifumi 		return 0;
20978ab22b9aSHisashi Hifumi 
20988ab22b9aSHisashi Hifumi 	blocksize = 1 << inode->i_blkbits;
20998ab22b9aSHisashi Hifumi 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
21008ab22b9aSHisashi Hifumi 	to = from + to;
21018ab22b9aSHisashi Hifumi 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
21028ab22b9aSHisashi Hifumi 		return 0;
21038ab22b9aSHisashi Hifumi 
21048ab22b9aSHisashi Hifumi 	head = page_buffers(page);
21058ab22b9aSHisashi Hifumi 	bh = head;
21068ab22b9aSHisashi Hifumi 	block_start = 0;
21078ab22b9aSHisashi Hifumi 	do {
21088ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
21098ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
21108ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
21118ab22b9aSHisashi Hifumi 				ret = 0;
21128ab22b9aSHisashi Hifumi 				break;
21138ab22b9aSHisashi Hifumi 			}
21148ab22b9aSHisashi Hifumi 			if (block_end >= to)
21158ab22b9aSHisashi Hifumi 				break;
21168ab22b9aSHisashi Hifumi 		}
21178ab22b9aSHisashi Hifumi 		block_start = block_end;
21188ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
21198ab22b9aSHisashi Hifumi 	} while (bh != head);
21208ab22b9aSHisashi Hifumi 
21218ab22b9aSHisashi Hifumi 	return ret;
21228ab22b9aSHisashi Hifumi }
21238ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
21248ab22b9aSHisashi Hifumi 
21258ab22b9aSHisashi Hifumi /*
21261da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
21271da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
21281da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
21291da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
21301da177e4SLinus Torvalds  * page struct once IO has completed.
21311da177e4SLinus Torvalds  */
21321da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
21331da177e4SLinus Torvalds {
21341da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
21351da177e4SLinus Torvalds 	sector_t iblock, lblock;
21361da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
21371da177e4SLinus Torvalds 	unsigned int blocksize;
21381da177e4SLinus Torvalds 	int nr, i;
21391da177e4SLinus Torvalds 	int fully_mapped = 1;
21401da177e4SLinus Torvalds 
2141cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
21421da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
21431da177e4SLinus Torvalds 	if (!page_has_buffers(page))
21441da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
21451da177e4SLinus Torvalds 	head = page_buffers(page);
21461da177e4SLinus Torvalds 
21471da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
21481da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
21491da177e4SLinus Torvalds 	bh = head;
21501da177e4SLinus Torvalds 	nr = 0;
21511da177e4SLinus Torvalds 	i = 0;
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds 	do {
21541da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21551da177e4SLinus Torvalds 			continue;
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2158c64610baSAndrew Morton 			int err = 0;
2159c64610baSAndrew Morton 
21601da177e4SLinus Torvalds 			fully_mapped = 0;
21611da177e4SLinus Torvalds 			if (iblock < lblock) {
2162b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2163c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2164c64610baSAndrew Morton 				if (err)
21651da177e4SLinus Torvalds 					SetPageError(page);
21661da177e4SLinus Torvalds 			}
21671da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2168eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2169c64610baSAndrew Morton 				if (!err)
21701da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21711da177e4SLinus Torvalds 				continue;
21721da177e4SLinus Torvalds 			}
21731da177e4SLinus Torvalds 			/*
21741da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
21751da177e4SLinus Torvalds 			 * synchronously
21761da177e4SLinus Torvalds 			 */
21771da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
21781da177e4SLinus Torvalds 				continue;
21791da177e4SLinus Torvalds 		}
21801da177e4SLinus Torvalds 		arr[nr++] = bh;
21811da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
21821da177e4SLinus Torvalds 
21831da177e4SLinus Torvalds 	if (fully_mapped)
21841da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
21851da177e4SLinus Torvalds 
21861da177e4SLinus Torvalds 	if (!nr) {
21871da177e4SLinus Torvalds 		/*
21881da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
21891da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
21901da177e4SLinus Torvalds 		 */
21911da177e4SLinus Torvalds 		if (!PageError(page))
21921da177e4SLinus Torvalds 			SetPageUptodate(page);
21931da177e4SLinus Torvalds 		unlock_page(page);
21941da177e4SLinus Torvalds 		return 0;
21951da177e4SLinus Torvalds 	}
21961da177e4SLinus Torvalds 
21971da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
21981da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21991da177e4SLinus Torvalds 		bh = arr[i];
22001da177e4SLinus Torvalds 		lock_buffer(bh);
22011da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
22021da177e4SLinus Torvalds 	}
22031da177e4SLinus Torvalds 
22041da177e4SLinus Torvalds 	/*
22051da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
22061da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
22071da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
22081da177e4SLinus Torvalds 	 */
22091da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
22101da177e4SLinus Torvalds 		bh = arr[i];
22111da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
22121da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
22131da177e4SLinus Torvalds 		else
22141da177e4SLinus Torvalds 			submit_bh(READ, bh);
22151da177e4SLinus Torvalds 	}
22161da177e4SLinus Torvalds 	return 0;
22171da177e4SLinus Torvalds }
22181da177e4SLinus Torvalds 
22191da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
222089e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
22211da177e4SLinus Torvalds  * deal with the hole.
22221da177e4SLinus Torvalds  */
222389e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
22241da177e4SLinus Torvalds {
22251da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
22261da177e4SLinus Torvalds 	struct page *page;
222789e10787SNick Piggin 	void *fsdata;
222805eb0b51SOGAWA Hirofumi 	unsigned long limit;
22291da177e4SLinus Torvalds 	int err;
22301da177e4SLinus Torvalds 
22311da177e4SLinus Torvalds 	err = -EFBIG;
22321da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
22331da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
22341da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
22351da177e4SLinus Torvalds 		goto out;
22361da177e4SLinus Torvalds 	}
22371da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
22381da177e4SLinus Torvalds 		goto out;
22391da177e4SLinus Torvalds 
224089e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
224189e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
224289e10787SNick Piggin 				&page, &fsdata);
224389e10787SNick Piggin 	if (err)
224405eb0b51SOGAWA Hirofumi 		goto out;
224505eb0b51SOGAWA Hirofumi 
224689e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
224789e10787SNick Piggin 	BUG_ON(err > 0);
224805eb0b51SOGAWA Hirofumi 
224905eb0b51SOGAWA Hirofumi out:
225005eb0b51SOGAWA Hirofumi 	return err;
225105eb0b51SOGAWA Hirofumi }
225205eb0b51SOGAWA Hirofumi 
2253f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
225489e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
225505eb0b51SOGAWA Hirofumi {
225689e10787SNick Piggin 	struct inode *inode = mapping->host;
225789e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
225889e10787SNick Piggin 	struct page *page;
225989e10787SNick Piggin 	void *fsdata;
226089e10787SNick Piggin 	pgoff_t index, curidx;
226189e10787SNick Piggin 	loff_t curpos;
226289e10787SNick Piggin 	unsigned zerofrom, offset, len;
226389e10787SNick Piggin 	int err = 0;
226405eb0b51SOGAWA Hirofumi 
226589e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
226689e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
226789e10787SNick Piggin 
226889e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
226989e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
227089e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
227189e10787SNick Piggin 			*bytes |= (blocksize-1);
227289e10787SNick Piggin 			(*bytes)++;
227389e10787SNick Piggin 		}
227489e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
227589e10787SNick Piggin 
227689e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
227789e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
227889e10787SNick Piggin 						&page, &fsdata);
227989e10787SNick Piggin 		if (err)
228089e10787SNick Piggin 			goto out;
2281eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
228289e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
228389e10787SNick Piggin 						page, fsdata);
228489e10787SNick Piggin 		if (err < 0)
228589e10787SNick Piggin 			goto out;
228689e10787SNick Piggin 		BUG_ON(err != len);
228789e10787SNick Piggin 		err = 0;
2288061e9746SOGAWA Hirofumi 
2289061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
229089e10787SNick Piggin 	}
229189e10787SNick Piggin 
229289e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
229389e10787SNick Piggin 	if (index == curidx) {
229489e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
229589e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
229689e10787SNick Piggin 		if (offset <= zerofrom) {
229789e10787SNick Piggin 			goto out;
229889e10787SNick Piggin 		}
229989e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
230089e10787SNick Piggin 			*bytes |= (blocksize-1);
230189e10787SNick Piggin 			(*bytes)++;
230289e10787SNick Piggin 		}
230389e10787SNick Piggin 		len = offset - zerofrom;
230489e10787SNick Piggin 
230589e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
230689e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
230789e10787SNick Piggin 						&page, &fsdata);
230889e10787SNick Piggin 		if (err)
230989e10787SNick Piggin 			goto out;
2310eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
231189e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
231289e10787SNick Piggin 						page, fsdata);
231389e10787SNick Piggin 		if (err < 0)
231489e10787SNick Piggin 			goto out;
231589e10787SNick Piggin 		BUG_ON(err != len);
231689e10787SNick Piggin 		err = 0;
231789e10787SNick Piggin 	}
231889e10787SNick Piggin out:
231989e10787SNick Piggin 	return err;
23201da177e4SLinus Torvalds }
23211da177e4SLinus Torvalds 
23221da177e4SLinus Torvalds /*
23231da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
23241da177e4SLinus Torvalds  * We may have to extend the file.
23251da177e4SLinus Torvalds  */
232689e10787SNick Piggin int cont_write_begin(struct file *file, struct address_space *mapping,
232789e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
232889e10787SNick Piggin 			struct page **pagep, void **fsdata,
232989e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
23301da177e4SLinus Torvalds {
23311da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
23321da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
233389e10787SNick Piggin 	unsigned zerofrom;
233489e10787SNick Piggin 	int err;
23351da177e4SLinus Torvalds 
233689e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
233789e10787SNick Piggin 	if (err)
23381da177e4SLinus Torvalds 		goto out;
23391da177e4SLinus Torvalds 
23401da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
234189e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
23421da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
23431da177e4SLinus Torvalds 		(*bytes)++;
23441da177e4SLinus Torvalds 	}
23451da177e4SLinus Torvalds 
234689e10787SNick Piggin 	*pagep = NULL;
234789e10787SNick Piggin 	err = block_write_begin(file, mapping, pos, len,
234889e10787SNick Piggin 				flags, pagep, fsdata, get_block);
23491da177e4SLinus Torvalds out:
235089e10787SNick Piggin 	return err;
23511da177e4SLinus Torvalds }
23521da177e4SLinus Torvalds 
23531da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
23541da177e4SLinus Torvalds 			get_block_t *get_block)
23551da177e4SLinus Torvalds {
23561da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23571da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
23581da177e4SLinus Torvalds 	if (err)
23591da177e4SLinus Torvalds 		ClearPageUptodate(page);
23601da177e4SLinus Torvalds 	return err;
23611da177e4SLinus Torvalds }
23621da177e4SLinus Torvalds 
23631da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
23641da177e4SLinus Torvalds {
23651da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23661da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
23671da177e4SLinus Torvalds 	return 0;
23681da177e4SLinus Torvalds }
23691da177e4SLinus Torvalds 
237054171690SDavid Chinner /*
237154171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
237254171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
237354171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
237454171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
237554171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
237654171690SDavid Chinner  * support these features.
237754171690SDavid Chinner  *
237854171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
237954171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
238054171690SDavid Chinner  * vmtruncate() writes the inode size before removing pages, once we have the
238154171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
238254171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
238354171690SDavid Chinner  * unlock the page.
238454171690SDavid Chinner  */
238554171690SDavid Chinner int
2386c2ec175cSNick Piggin block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
238754171690SDavid Chinner 		   get_block_t get_block)
238854171690SDavid Chinner {
2389c2ec175cSNick Piggin 	struct page *page = vmf->page;
239054171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
239154171690SDavid Chinner 	unsigned long end;
239254171690SDavid Chinner 	loff_t size;
239356a76f82SNick Piggin 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
239454171690SDavid Chinner 
239554171690SDavid Chinner 	lock_page(page);
239654171690SDavid Chinner 	size = i_size_read(inode);
239754171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
239818336338SNick Piggin 	    (page_offset(page) > size)) {
239954171690SDavid Chinner 		/* page got truncated out from underneath us */
2400b827e496SNick Piggin 		unlock_page(page);
2401b827e496SNick Piggin 		goto out;
240254171690SDavid Chinner 	}
240354171690SDavid Chinner 
240454171690SDavid Chinner 	/* page is wholly or partially inside EOF */
240554171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
240654171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
240754171690SDavid Chinner 	else
240854171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
240954171690SDavid Chinner 
241054171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
241154171690SDavid Chinner 	if (!ret)
241254171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
241354171690SDavid Chinner 
241456a76f82SNick Piggin 	if (unlikely(ret)) {
2415b827e496SNick Piggin 		unlock_page(page);
241656a76f82SNick Piggin 		if (ret == -ENOMEM)
241756a76f82SNick Piggin 			ret = VM_FAULT_OOM;
241856a76f82SNick Piggin 		else /* -ENOSPC, -EIO, etc */
2419c2ec175cSNick Piggin 			ret = VM_FAULT_SIGBUS;
2420b827e496SNick Piggin 	} else
2421b827e496SNick Piggin 		ret = VM_FAULT_LOCKED;
2422c2ec175cSNick Piggin 
2423b827e496SNick Piggin out:
242454171690SDavid Chinner 	return ret;
242554171690SDavid Chinner }
24261da177e4SLinus Torvalds 
24271da177e4SLinus Torvalds /*
242803158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
24291da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
24301da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
24311da177e4SLinus Torvalds  */
24321da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
24331da177e4SLinus Torvalds {
243468671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
24351da177e4SLinus Torvalds }
24361da177e4SLinus Torvalds 
24371da177e4SLinus Torvalds /*
243803158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
243903158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
244003158cd7SNick Piggin  * dirty races).
244103158cd7SNick Piggin  */
244203158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
244303158cd7SNick Piggin {
244403158cd7SNick Piggin 	struct buffer_head *bh;
244503158cd7SNick Piggin 
244603158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
244703158cd7SNick Piggin 
244803158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
244903158cd7SNick Piggin 	bh = head;
245003158cd7SNick Piggin 	do {
245103158cd7SNick Piggin 		if (PageDirty(page))
245203158cd7SNick Piggin 			set_buffer_dirty(bh);
245303158cd7SNick Piggin 		if (!bh->b_this_page)
245403158cd7SNick Piggin 			bh->b_this_page = head;
245503158cd7SNick Piggin 		bh = bh->b_this_page;
245603158cd7SNick Piggin 	} while (bh != head);
245703158cd7SNick Piggin 	attach_page_buffers(page, head);
245803158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
245903158cd7SNick Piggin }
246003158cd7SNick Piggin 
246103158cd7SNick Piggin /*
24621da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
24631da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
24641da177e4SLinus Torvalds  */
246503158cd7SNick Piggin int nobh_write_begin(struct file *file, struct address_space *mapping,
246603158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
246703158cd7SNick Piggin 			struct page **pagep, void **fsdata,
24681da177e4SLinus Torvalds 			get_block_t *get_block)
24691da177e4SLinus Torvalds {
247003158cd7SNick Piggin 	struct inode *inode = mapping->host;
24711da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
24721da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2473a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
247403158cd7SNick Piggin 	struct page *page;
247503158cd7SNick Piggin 	pgoff_t index;
247603158cd7SNick Piggin 	unsigned from, to;
24771da177e4SLinus Torvalds 	unsigned block_in_page;
2478a4b0672dSNick Piggin 	unsigned block_start, block_end;
24791da177e4SLinus Torvalds 	sector_t block_in_file;
24801da177e4SLinus Torvalds 	int nr_reads = 0;
24811da177e4SLinus Torvalds 	int ret = 0;
24821da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
24831da177e4SLinus Torvalds 
248403158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
248503158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
248603158cd7SNick Piggin 	to = from + len;
248703158cd7SNick Piggin 
248854566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
248903158cd7SNick Piggin 	if (!page)
249003158cd7SNick Piggin 		return -ENOMEM;
249103158cd7SNick Piggin 	*pagep = page;
249203158cd7SNick Piggin 	*fsdata = NULL;
249303158cd7SNick Piggin 
249403158cd7SNick Piggin 	if (page_has_buffers(page)) {
249503158cd7SNick Piggin 		unlock_page(page);
249603158cd7SNick Piggin 		page_cache_release(page);
249703158cd7SNick Piggin 		*pagep = NULL;
249803158cd7SNick Piggin 		return block_write_begin(file, mapping, pos, len, flags, pagep,
249903158cd7SNick Piggin 					fsdata, get_block);
250003158cd7SNick Piggin 	}
2501a4b0672dSNick Piggin 
25021da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
25031da177e4SLinus Torvalds 		return 0;
25041da177e4SLinus Torvalds 
2505a4b0672dSNick Piggin 	/*
2506a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2507a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2508a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2509a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2510a4b0672dSNick Piggin 	 *
2511a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2512a4b0672dSNick Piggin 	 * than the circular one we're used to.
2513a4b0672dSNick Piggin 	 */
2514a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
251503158cd7SNick Piggin 	if (!head) {
251603158cd7SNick Piggin 		ret = -ENOMEM;
251703158cd7SNick Piggin 		goto out_release;
251803158cd7SNick Piggin 	}
2519a4b0672dSNick Piggin 
25201da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
25211da177e4SLinus Torvalds 
25221da177e4SLinus Torvalds 	/*
25231da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
25241da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
25251da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
25261da177e4SLinus Torvalds 	 */
2527a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
25281da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2529a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
25301da177e4SLinus Torvalds 		int create;
25311da177e4SLinus Torvalds 
2532a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2533a4b0672dSNick Piggin 		bh->b_state = 0;
25341da177e4SLinus Torvalds 		create = 1;
25351da177e4SLinus Torvalds 		if (block_start >= to)
25361da177e4SLinus Torvalds 			create = 0;
25371da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2538a4b0672dSNick Piggin 					bh, create);
25391da177e4SLinus Torvalds 		if (ret)
25401da177e4SLinus Torvalds 			goto failed;
2541a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
25421da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2543a4b0672dSNick Piggin 		if (buffer_new(bh))
2544a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2545a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2546a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
25471da177e4SLinus Torvalds 			continue;
2548a4b0672dSNick Piggin 		}
2549a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2550eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2551eebd2aa3SChristoph Lameter 							to, block_end);
25521da177e4SLinus Torvalds 			continue;
25531da177e4SLinus Torvalds 		}
2554a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
25551da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
25561da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2557a4b0672dSNick Piggin 			lock_buffer(bh);
2558a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2559a4b0672dSNick Piggin 			submit_bh(READ, bh);
2560a4b0672dSNick Piggin 			nr_reads++;
25611da177e4SLinus Torvalds 		}
25621da177e4SLinus Torvalds 	}
25631da177e4SLinus Torvalds 
25641da177e4SLinus Torvalds 	if (nr_reads) {
25651da177e4SLinus Torvalds 		/*
25661da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
25671da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
25681da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
25691da177e4SLinus Torvalds 		 */
2570a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
25711da177e4SLinus Torvalds 			wait_on_buffer(bh);
25721da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
25731da177e4SLinus Torvalds 				ret = -EIO;
25741da177e4SLinus Torvalds 		}
25751da177e4SLinus Torvalds 		if (ret)
25761da177e4SLinus Torvalds 			goto failed;
25771da177e4SLinus Torvalds 	}
25781da177e4SLinus Torvalds 
25791da177e4SLinus Torvalds 	if (is_mapped_to_disk)
25801da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
25811da177e4SLinus Torvalds 
258203158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2583a4b0672dSNick Piggin 
25841da177e4SLinus Torvalds 	return 0;
25851da177e4SLinus Torvalds 
25861da177e4SLinus Torvalds failed:
258703158cd7SNick Piggin 	BUG_ON(!ret);
25881da177e4SLinus Torvalds 	/*
2589a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2590a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2591a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2592a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2593a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
25941da177e4SLinus Torvalds 	 */
259503158cd7SNick Piggin 	attach_nobh_buffers(page, head);
259603158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2597a4b0672dSNick Piggin 
259803158cd7SNick Piggin out_release:
259903158cd7SNick Piggin 	unlock_page(page);
260003158cd7SNick Piggin 	page_cache_release(page);
260103158cd7SNick Piggin 	*pagep = NULL;
2602a4b0672dSNick Piggin 
260303158cd7SNick Piggin 	if (pos + len > inode->i_size)
260403158cd7SNick Piggin 		vmtruncate(inode, inode->i_size);
2605a4b0672dSNick Piggin 
26061da177e4SLinus Torvalds 	return ret;
26071da177e4SLinus Torvalds }
260803158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
26091da177e4SLinus Torvalds 
261003158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
261103158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
261203158cd7SNick Piggin 			struct page *page, void *fsdata)
26131da177e4SLinus Torvalds {
26141da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2615efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
261603158cd7SNick Piggin 	struct buffer_head *bh;
26175b41e74aSDmitri Monakhov 	BUG_ON(fsdata != NULL && page_has_buffers(page));
26181da177e4SLinus Torvalds 
2619d4cf109fSDave Kleikamp 	if (unlikely(copied < len) && head)
262003158cd7SNick Piggin 		attach_nobh_buffers(page, head);
2621a4b0672dSNick Piggin 	if (page_has_buffers(page))
262203158cd7SNick Piggin 		return generic_write_end(file, mapping, pos, len,
262303158cd7SNick Piggin 					copied, page, fsdata);
2624a4b0672dSNick Piggin 
262522c8ca78SNick Piggin 	SetPageUptodate(page);
26261da177e4SLinus Torvalds 	set_page_dirty(page);
262703158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
262803158cd7SNick Piggin 		i_size_write(inode, pos+copied);
26291da177e4SLinus Torvalds 		mark_inode_dirty(inode);
26301da177e4SLinus Torvalds 	}
263103158cd7SNick Piggin 
263203158cd7SNick Piggin 	unlock_page(page);
263303158cd7SNick Piggin 	page_cache_release(page);
263403158cd7SNick Piggin 
263503158cd7SNick Piggin 	while (head) {
263603158cd7SNick Piggin 		bh = head;
263703158cd7SNick Piggin 		head = head->b_this_page;
263803158cd7SNick Piggin 		free_buffer_head(bh);
26391da177e4SLinus Torvalds 	}
264003158cd7SNick Piggin 
264103158cd7SNick Piggin 	return copied;
264203158cd7SNick Piggin }
264303158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
26441da177e4SLinus Torvalds 
26451da177e4SLinus Torvalds /*
26461da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
26471da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
26481da177e4SLinus Torvalds  * the page.
26491da177e4SLinus Torvalds  */
26501da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
26511da177e4SLinus Torvalds 			struct writeback_control *wbc)
26521da177e4SLinus Torvalds {
26531da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
26541da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
26551da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
26561da177e4SLinus Torvalds 	unsigned offset;
26571da177e4SLinus Torvalds 	int ret;
26581da177e4SLinus Torvalds 
26591da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
26601da177e4SLinus Torvalds 	if (page->index < end_index)
26611da177e4SLinus Torvalds 		goto out;
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
26641da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
26651da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26661da177e4SLinus Torvalds 		/*
26671da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
26681da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
26691da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
26701da177e4SLinus Torvalds 		 */
26711da177e4SLinus Torvalds #if 0
26721da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
26731da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
26741da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
26751da177e4SLinus Torvalds #endif
26761da177e4SLinus Torvalds 		unlock_page(page);
26771da177e4SLinus Torvalds 		return 0; /* don't care */
26781da177e4SLinus Torvalds 	}
26791da177e4SLinus Torvalds 
26801da177e4SLinus Torvalds 	/*
26811da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26821da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
26831da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26841da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26851da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26861da177e4SLinus Torvalds 	 */
2687eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
26881da177e4SLinus Torvalds out:
26891da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
26901da177e4SLinus Torvalds 	if (ret == -EAGAIN)
269135c80d5fSChris Mason 		ret = __block_write_full_page(inode, page, get_block, wbc,
269235c80d5fSChris Mason 					      end_buffer_async_write);
26931da177e4SLinus Torvalds 	return ret;
26941da177e4SLinus Torvalds }
26951da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
26961da177e4SLinus Torvalds 
269703158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
269803158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
26991da177e4SLinus Torvalds {
27001da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27011da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
270203158cd7SNick Piggin 	unsigned blocksize;
270303158cd7SNick Piggin 	sector_t iblock;
270403158cd7SNick Piggin 	unsigned length, pos;
270503158cd7SNick Piggin 	struct inode *inode = mapping->host;
27061da177e4SLinus Torvalds 	struct page *page;
270703158cd7SNick Piggin 	struct buffer_head map_bh;
270803158cd7SNick Piggin 	int err;
27091da177e4SLinus Torvalds 
271003158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
271103158cd7SNick Piggin 	length = offset & (blocksize - 1);
27121da177e4SLinus Torvalds 
271303158cd7SNick Piggin 	/* Block boundary? Nothing to do */
271403158cd7SNick Piggin 	if (!length)
271503158cd7SNick Piggin 		return 0;
271603158cd7SNick Piggin 
271703158cd7SNick Piggin 	length = blocksize - length;
271803158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
271903158cd7SNick Piggin 
27201da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
272103158cd7SNick Piggin 	err = -ENOMEM;
27221da177e4SLinus Torvalds 	if (!page)
27231da177e4SLinus Torvalds 		goto out;
27241da177e4SLinus Torvalds 
272503158cd7SNick Piggin 	if (page_has_buffers(page)) {
272603158cd7SNick Piggin has_buffers:
272703158cd7SNick Piggin 		unlock_page(page);
272803158cd7SNick Piggin 		page_cache_release(page);
272903158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
27301da177e4SLinus Torvalds 	}
273103158cd7SNick Piggin 
273203158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
273303158cd7SNick Piggin 	pos = blocksize;
273403158cd7SNick Piggin 	while (offset >= pos) {
273503158cd7SNick Piggin 		iblock++;
273603158cd7SNick Piggin 		pos += blocksize;
273703158cd7SNick Piggin 	}
273803158cd7SNick Piggin 
273903158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
274003158cd7SNick Piggin 	if (err)
274103158cd7SNick Piggin 		goto unlock;
274203158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
274303158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
274403158cd7SNick Piggin 		goto unlock;
274503158cd7SNick Piggin 
274603158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
274703158cd7SNick Piggin 	if (!PageUptodate(page)) {
274803158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
274903158cd7SNick Piggin 		if (err) {
275003158cd7SNick Piggin 			page_cache_release(page);
275103158cd7SNick Piggin 			goto out;
275203158cd7SNick Piggin 		}
275303158cd7SNick Piggin 		lock_page(page);
275403158cd7SNick Piggin 		if (!PageUptodate(page)) {
275503158cd7SNick Piggin 			err = -EIO;
275603158cd7SNick Piggin 			goto unlock;
275703158cd7SNick Piggin 		}
275803158cd7SNick Piggin 		if (page_has_buffers(page))
275903158cd7SNick Piggin 			goto has_buffers;
276003158cd7SNick Piggin 	}
2761eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
276203158cd7SNick Piggin 	set_page_dirty(page);
276303158cd7SNick Piggin 	err = 0;
276403158cd7SNick Piggin 
276503158cd7SNick Piggin unlock:
27661da177e4SLinus Torvalds 	unlock_page(page);
27671da177e4SLinus Torvalds 	page_cache_release(page);
27681da177e4SLinus Torvalds out:
276903158cd7SNick Piggin 	return err;
27701da177e4SLinus Torvalds }
27711da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
27721da177e4SLinus Torvalds 
27731da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
27741da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
27751da177e4SLinus Torvalds {
27761da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27771da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
27781da177e4SLinus Torvalds 	unsigned blocksize;
277954b21a79SAndrew Morton 	sector_t iblock;
27801da177e4SLinus Torvalds 	unsigned length, pos;
27811da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27821da177e4SLinus Torvalds 	struct page *page;
27831da177e4SLinus Torvalds 	struct buffer_head *bh;
27841da177e4SLinus Torvalds 	int err;
27851da177e4SLinus Torvalds 
27861da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
27871da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
27881da177e4SLinus Torvalds 
27891da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
27901da177e4SLinus Torvalds 	if (!length)
27911da177e4SLinus Torvalds 		return 0;
27921da177e4SLinus Torvalds 
27931da177e4SLinus Torvalds 	length = blocksize - length;
279454b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
27951da177e4SLinus Torvalds 
27961da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
27971da177e4SLinus Torvalds 	err = -ENOMEM;
27981da177e4SLinus Torvalds 	if (!page)
27991da177e4SLinus Torvalds 		goto out;
28001da177e4SLinus Torvalds 
28011da177e4SLinus Torvalds 	if (!page_has_buffers(page))
28021da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
28031da177e4SLinus Torvalds 
28041da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
28051da177e4SLinus Torvalds 	bh = page_buffers(page);
28061da177e4SLinus Torvalds 	pos = blocksize;
28071da177e4SLinus Torvalds 	while (offset >= pos) {
28081da177e4SLinus Torvalds 		bh = bh->b_this_page;
28091da177e4SLinus Torvalds 		iblock++;
28101da177e4SLinus Torvalds 		pos += blocksize;
28111da177e4SLinus Torvalds 	}
28121da177e4SLinus Torvalds 
28131da177e4SLinus Torvalds 	err = 0;
28141da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2815b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
28161da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
28171da177e4SLinus Torvalds 		if (err)
28181da177e4SLinus Torvalds 			goto unlock;
28191da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
28201da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
28211da177e4SLinus Torvalds 			goto unlock;
28221da177e4SLinus Torvalds 	}
28231da177e4SLinus Torvalds 
28241da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
28251da177e4SLinus Torvalds 	if (PageUptodate(page))
28261da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
28271da177e4SLinus Torvalds 
282833a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
28291da177e4SLinus Torvalds 		err = -EIO;
28301da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
28311da177e4SLinus Torvalds 		wait_on_buffer(bh);
28321da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
28331da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
28341da177e4SLinus Torvalds 			goto unlock;
28351da177e4SLinus Torvalds 	}
28361da177e4SLinus Torvalds 
2837eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
28381da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
28391da177e4SLinus Torvalds 	err = 0;
28401da177e4SLinus Torvalds 
28411da177e4SLinus Torvalds unlock:
28421da177e4SLinus Torvalds 	unlock_page(page);
28431da177e4SLinus Torvalds 	page_cache_release(page);
28441da177e4SLinus Torvalds out:
28451da177e4SLinus Torvalds 	return err;
28461da177e4SLinus Torvalds }
28471da177e4SLinus Torvalds 
28481da177e4SLinus Torvalds /*
28491da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
285035c80d5fSChris Mason  * this form passes in the end_io handler used to finish the IO.
28511da177e4SLinus Torvalds  */
285235c80d5fSChris Mason int block_write_full_page_endio(struct page *page, get_block_t *get_block,
285335c80d5fSChris Mason 			struct writeback_control *wbc, bh_end_io_t *handler)
28541da177e4SLinus Torvalds {
28551da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
28561da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
28571da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
28581da177e4SLinus Torvalds 	unsigned offset;
28591da177e4SLinus Torvalds 
28601da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
28611da177e4SLinus Torvalds 	if (page->index < end_index)
286235c80d5fSChris Mason 		return __block_write_full_page(inode, page, get_block, wbc,
286335c80d5fSChris Mason 					       handler);
28641da177e4SLinus Torvalds 
28651da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
28661da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
28671da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
28681da177e4SLinus Torvalds 		/*
28691da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
28701da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
28711da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
28721da177e4SLinus Torvalds 		 */
2873aaa4059bSJan Kara 		do_invalidatepage(page, 0);
28741da177e4SLinus Torvalds 		unlock_page(page);
28751da177e4SLinus Torvalds 		return 0; /* don't care */
28761da177e4SLinus Torvalds 	}
28771da177e4SLinus Torvalds 
28781da177e4SLinus Torvalds 	/*
28791da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
28801da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
28811da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
28821da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
28831da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
28841da177e4SLinus Torvalds 	 */
2885eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
288635c80d5fSChris Mason 	return __block_write_full_page(inode, page, get_block, wbc, handler);
28871da177e4SLinus Torvalds }
28881da177e4SLinus Torvalds 
288935c80d5fSChris Mason /*
289035c80d5fSChris Mason  * The generic ->writepage function for buffer-backed address_spaces
289135c80d5fSChris Mason  */
289235c80d5fSChris Mason int block_write_full_page(struct page *page, get_block_t *get_block,
289335c80d5fSChris Mason 			struct writeback_control *wbc)
289435c80d5fSChris Mason {
289535c80d5fSChris Mason 	return block_write_full_page_endio(page, get_block, wbc,
289635c80d5fSChris Mason 					   end_buffer_async_write);
289735c80d5fSChris Mason }
289835c80d5fSChris Mason 
289935c80d5fSChris Mason 
29001da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
29011da177e4SLinus Torvalds 			    get_block_t *get_block)
29021da177e4SLinus Torvalds {
29031da177e4SLinus Torvalds 	struct buffer_head tmp;
29041da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
29051da177e4SLinus Torvalds 	tmp.b_state = 0;
29061da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2907b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
29081da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
29091da177e4SLinus Torvalds 	return tmp.b_blocknr;
29101da177e4SLinus Torvalds }
29111da177e4SLinus Torvalds 
29126712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
29131da177e4SLinus Torvalds {
29141da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
29151da177e4SLinus Torvalds 
29161da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
29171da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
29181da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
29191da177e4SLinus Torvalds 	}
29201da177e4SLinus Torvalds 
292108bafc03SKeith Mannthey 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
292208bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
292308bafc03SKeith Mannthey 
29241da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
29251da177e4SLinus Torvalds 	bio_put(bio);
29261da177e4SLinus Torvalds }
29271da177e4SLinus Torvalds 
29281da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
29291da177e4SLinus Torvalds {
29301da177e4SLinus Torvalds 	struct bio *bio;
29311da177e4SLinus Torvalds 	int ret = 0;
29321da177e4SLinus Torvalds 
29331da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
29341da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
29351da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
29361da177e4SLinus Torvalds 
293748fd4f93SJens Axboe 	/*
293848fd4f93SJens Axboe 	 * Mask in barrier bit for a write (could be either a WRITE or a
293948fd4f93SJens Axboe 	 * WRITE_SYNC
294048fd4f93SJens Axboe 	 */
294148fd4f93SJens Axboe 	if (buffer_ordered(bh) && (rw & WRITE))
294248fd4f93SJens Axboe 		rw |= WRITE_BARRIER;
29431da177e4SLinus Torvalds 
29441da177e4SLinus Torvalds 	/*
294548fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
29461da177e4SLinus Torvalds 	 */
294748fd4f93SJens Axboe 	if (test_set_buffer_req(bh) && (rw & WRITE))
29481da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
29491da177e4SLinus Torvalds 
29501da177e4SLinus Torvalds 	/*
29511da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
29521da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
29531da177e4SLinus Torvalds 	 */
29541da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
29551da177e4SLinus Torvalds 
29561da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
29571da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
29581da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
29591da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
29601da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
29611da177e4SLinus Torvalds 
29621da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
29631da177e4SLinus Torvalds 	bio->bi_idx = 0;
29641da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
29651da177e4SLinus Torvalds 
29661da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
29671da177e4SLinus Torvalds 	bio->bi_private = bh;
29681da177e4SLinus Torvalds 
29691da177e4SLinus Torvalds 	bio_get(bio);
29701da177e4SLinus Torvalds 	submit_bio(rw, bio);
29711da177e4SLinus Torvalds 
29721da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
29731da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
29741da177e4SLinus Torvalds 
29751da177e4SLinus Torvalds 	bio_put(bio);
29761da177e4SLinus Torvalds 	return ret;
29771da177e4SLinus Torvalds }
29781da177e4SLinus Torvalds 
29791da177e4SLinus Torvalds /**
29801da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2981a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
29821da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
29831da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
29841da177e4SLinus Torvalds  *
2985a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2986a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2987a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2988a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2989a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
29901da177e4SLinus Torvalds  *
29911da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2992a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2993a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2994a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2995a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2996a7662236SJan Kara  * actually clean until the buffer gets unlocked).
29971da177e4SLinus Torvalds  *
29981da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
29991da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
30001da177e4SLinus Torvalds  * any waiters.
30011da177e4SLinus Torvalds  *
30021da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
30031da177e4SLinus Torvalds  * multiple of the current approved size for the device.
30041da177e4SLinus Torvalds  */
30051da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
30061da177e4SLinus Torvalds {
30071da177e4SLinus Torvalds 	int i;
30081da177e4SLinus Torvalds 
30091da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
30101da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
30111da177e4SLinus Torvalds 
30129cf6b720SJens Axboe 		if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3013a7662236SJan Kara 			lock_buffer(bh);
3014ca5de404SNick Piggin 		else if (!trylock_buffer(bh))
30151da177e4SLinus Torvalds 			continue;
30161da177e4SLinus Torvalds 
30179cf6b720SJens Axboe 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
30189cf6b720SJens Axboe 		    rw == SWRITE_SYNC_PLUG) {
30191da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
302076c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
3021e60e5c50SOGAWA Hirofumi 				get_bh(bh);
302218ce3751SJens Axboe 				if (rw == SWRITE_SYNC)
302318ce3751SJens Axboe 					submit_bh(WRITE_SYNC, bh);
302418ce3751SJens Axboe 				else
30251da177e4SLinus Torvalds 					submit_bh(WRITE, bh);
30261da177e4SLinus Torvalds 				continue;
30271da177e4SLinus Torvalds 			}
30281da177e4SLinus Torvalds 		} else {
30291da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
303076c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
3031e60e5c50SOGAWA Hirofumi 				get_bh(bh);
30321da177e4SLinus Torvalds 				submit_bh(rw, bh);
30331da177e4SLinus Torvalds 				continue;
30341da177e4SLinus Torvalds 			}
30351da177e4SLinus Torvalds 		}
30361da177e4SLinus Torvalds 		unlock_buffer(bh);
30371da177e4SLinus Torvalds 	}
30381da177e4SLinus Torvalds }
30391da177e4SLinus Torvalds 
30401da177e4SLinus Torvalds /*
30411da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
30421da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
30431da177e4SLinus Torvalds  * the buffer_head.
30441da177e4SLinus Torvalds  */
30451da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
30461da177e4SLinus Torvalds {
30471da177e4SLinus Torvalds 	int ret = 0;
30481da177e4SLinus Torvalds 
30491da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
30501da177e4SLinus Torvalds 	lock_buffer(bh);
30511da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
30521da177e4SLinus Torvalds 		get_bh(bh);
30531da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
30541aa2a7ccSJens Axboe 		ret = submit_bh(WRITE_SYNC, bh);
30551da177e4SLinus Torvalds 		wait_on_buffer(bh);
30561da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
30571da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
30581da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
30591da177e4SLinus Torvalds 		}
30601da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
30611da177e4SLinus Torvalds 			ret = -EIO;
30621da177e4SLinus Torvalds 	} else {
30631da177e4SLinus Torvalds 		unlock_buffer(bh);
30641da177e4SLinus Torvalds 	}
30651da177e4SLinus Torvalds 	return ret;
30661da177e4SLinus Torvalds }
30671da177e4SLinus Torvalds 
30681da177e4SLinus Torvalds /*
30691da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
30701da177e4SLinus Torvalds  * are unused, and releases them if so.
30711da177e4SLinus Torvalds  *
30721da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
30731da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
30741da177e4SLinus Torvalds  *
30751da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
30761da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
30771da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
30781da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
30791da177e4SLinus Torvalds  * filesystem data on the same device.
30801da177e4SLinus Torvalds  *
30811da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
30821da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
30831da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
30841da177e4SLinus Torvalds  * private_lock.
30851da177e4SLinus Torvalds  *
30861da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
30871da177e4SLinus Torvalds  */
30881da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
30891da177e4SLinus Torvalds {
30901da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
30911da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
30921da177e4SLinus Torvalds }
30931da177e4SLinus Torvalds 
30941da177e4SLinus Torvalds static int
30951da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
30961da177e4SLinus Torvalds {
30971da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
30981da177e4SLinus Torvalds 	struct buffer_head *bh;
30991da177e4SLinus Torvalds 
31001da177e4SLinus Torvalds 	bh = head;
31011da177e4SLinus Torvalds 	do {
3102de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
31031da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
31041da177e4SLinus Torvalds 		if (buffer_busy(bh))
31051da177e4SLinus Torvalds 			goto failed;
31061da177e4SLinus Torvalds 		bh = bh->b_this_page;
31071da177e4SLinus Torvalds 	} while (bh != head);
31081da177e4SLinus Torvalds 
31091da177e4SLinus Torvalds 	do {
31101da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
31111da177e4SLinus Torvalds 
3112535ee2fbSJan Kara 		if (bh->b_assoc_map)
31131da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
31141da177e4SLinus Torvalds 		bh = next;
31151da177e4SLinus Torvalds 	} while (bh != head);
31161da177e4SLinus Torvalds 	*buffers_to_free = head;
31171da177e4SLinus Torvalds 	__clear_page_buffers(page);
31181da177e4SLinus Torvalds 	return 1;
31191da177e4SLinus Torvalds failed:
31201da177e4SLinus Torvalds 	return 0;
31211da177e4SLinus Torvalds }
31221da177e4SLinus Torvalds 
31231da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
31241da177e4SLinus Torvalds {
31251da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
31261da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
31271da177e4SLinus Torvalds 	int ret = 0;
31281da177e4SLinus Torvalds 
31291da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3130ecdfc978SLinus Torvalds 	if (PageWriteback(page))
31311da177e4SLinus Torvalds 		return 0;
31321da177e4SLinus Torvalds 
31331da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
31341da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
31351da177e4SLinus Torvalds 		goto out;
31361da177e4SLinus Torvalds 	}
31371da177e4SLinus Torvalds 
31381da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
31391da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3140ecdfc978SLinus Torvalds 
3141ecdfc978SLinus Torvalds 	/*
3142ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3143ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3144ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3145ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3146ecdfc978SLinus Torvalds 	 *
3147ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3148ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3149ecdfc978SLinus Torvalds 	 * the page also.
315087df7241SNick Piggin 	 *
315187df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
315287df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
315387df7241SNick Piggin 	 * dirty bit from being lost.
3154ecdfc978SLinus Torvalds 	 */
3155ecdfc978SLinus Torvalds 	if (ret)
3156ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
315787df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
31581da177e4SLinus Torvalds out:
31591da177e4SLinus Torvalds 	if (buffers_to_free) {
31601da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
31611da177e4SLinus Torvalds 
31621da177e4SLinus Torvalds 		do {
31631da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
31641da177e4SLinus Torvalds 			free_buffer_head(bh);
31651da177e4SLinus Torvalds 			bh = next;
31661da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
31671da177e4SLinus Torvalds 	}
31681da177e4SLinus Torvalds 	return ret;
31691da177e4SLinus Torvalds }
31701da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
31711da177e4SLinus Torvalds 
31723978d717SNeilBrown void block_sync_page(struct page *page)
31731da177e4SLinus Torvalds {
31741da177e4SLinus Torvalds 	struct address_space *mapping;
31751da177e4SLinus Torvalds 
31761da177e4SLinus Torvalds 	smp_mb();
31771da177e4SLinus Torvalds 	mapping = page_mapping(page);
31781da177e4SLinus Torvalds 	if (mapping)
31791da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
31801da177e4SLinus Torvalds }
31811da177e4SLinus Torvalds 
31821da177e4SLinus Torvalds /*
31831da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
31841da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
31851da177e4SLinus Torvalds  *
31861da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
31871da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
31881da177e4SLinus Torvalds  */
3189bdc480e3SHeiko Carstens SYSCALL_DEFINE2(bdflush, int, func, long, data)
31901da177e4SLinus Torvalds {
31911da177e4SLinus Torvalds 	static int msg_count;
31921da177e4SLinus Torvalds 
31931da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
31941da177e4SLinus Torvalds 		return -EPERM;
31951da177e4SLinus Torvalds 
31961da177e4SLinus Torvalds 	if (msg_count < 5) {
31971da177e4SLinus Torvalds 		msg_count++;
31981da177e4SLinus Torvalds 		printk(KERN_INFO
31991da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
32001da177e4SLinus Torvalds 			" system call\n", current->comm);
32011da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
32021da177e4SLinus Torvalds 	}
32031da177e4SLinus Torvalds 
32041da177e4SLinus Torvalds 	if (func == 1)
32051da177e4SLinus Torvalds 		do_exit(0);
32061da177e4SLinus Torvalds 	return 0;
32071da177e4SLinus Torvalds }
32081da177e4SLinus Torvalds 
32091da177e4SLinus Torvalds /*
32101da177e4SLinus Torvalds  * Buffer-head allocation
32111da177e4SLinus Torvalds  */
3212e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
32131da177e4SLinus Torvalds 
32141da177e4SLinus Torvalds /*
32151da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
32161da177e4SLinus Torvalds  * stripping them in writeback.
32171da177e4SLinus Torvalds  */
32181da177e4SLinus Torvalds static int max_buffer_heads;
32191da177e4SLinus Torvalds 
32201da177e4SLinus Torvalds int buffer_heads_over_limit;
32211da177e4SLinus Torvalds 
32221da177e4SLinus Torvalds struct bh_accounting {
32231da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
32241da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
32251da177e4SLinus Torvalds };
32261da177e4SLinus Torvalds 
32271da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
32281da177e4SLinus Torvalds 
32291da177e4SLinus Torvalds static void recalc_bh_state(void)
32301da177e4SLinus Torvalds {
32311da177e4SLinus Torvalds 	int i;
32321da177e4SLinus Torvalds 	int tot = 0;
32331da177e4SLinus Torvalds 
32341da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
32351da177e4SLinus Torvalds 		return;
32361da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
32378a143426SEric Dumazet 	for_each_online_cpu(i)
32381da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
32391da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
32401da177e4SLinus Torvalds }
32411da177e4SLinus Torvalds 
3242dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
32431da177e4SLinus Torvalds {
3244488514d1SChristoph Lameter 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
32451da177e4SLinus Torvalds 	if (ret) {
3246a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3247736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
32481da177e4SLinus Torvalds 		recalc_bh_state();
3249736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
32501da177e4SLinus Torvalds 	}
32511da177e4SLinus Torvalds 	return ret;
32521da177e4SLinus Torvalds }
32531da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
32541da177e4SLinus Torvalds 
32551da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
32561da177e4SLinus Torvalds {
32571da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
32581da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3259736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
32601da177e4SLinus Torvalds 	recalc_bh_state();
3261736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
32621da177e4SLinus Torvalds }
32631da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
32641da177e4SLinus Torvalds 
32651da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
32661da177e4SLinus Torvalds {
32671da177e4SLinus Torvalds 	int i;
32681da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
32691da177e4SLinus Torvalds 
32701da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
32711da177e4SLinus Torvalds 		brelse(b->bhs[i]);
32721da177e4SLinus Torvalds 		b->bhs[i] = NULL;
32731da177e4SLinus Torvalds 	}
32748a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
32758a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
32768a143426SEric Dumazet 	put_cpu_var(bh_accounting);
32771da177e4SLinus Torvalds }
32781da177e4SLinus Torvalds 
32791da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
32801da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
32811da177e4SLinus Torvalds {
32828bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
32831da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
32841da177e4SLinus Torvalds 	return NOTIFY_OK;
32851da177e4SLinus Torvalds }
32861da177e4SLinus Torvalds 
3287389d1b08SAneesh Kumar K.V /**
3288a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3289389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3290389d1b08SAneesh Kumar K.V  *
3291389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3292389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3293389d1b08SAneesh Kumar K.V  */
3294389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3295389d1b08SAneesh Kumar K.V {
3296389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3297389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3298389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3299389d1b08SAneesh Kumar K.V 			return 0;
3300389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3301389d1b08SAneesh Kumar K.V 	}
3302389d1b08SAneesh Kumar K.V 	return 1;
3303389d1b08SAneesh Kumar K.V }
3304389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3305389d1b08SAneesh Kumar K.V 
3306389d1b08SAneesh Kumar K.V /**
3307a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3308389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3309389d1b08SAneesh Kumar K.V  *
3310389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3311389d1b08SAneesh Kumar K.V  */
3312389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3313389d1b08SAneesh Kumar K.V {
3314389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3315389d1b08SAneesh Kumar K.V 
3316389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3317389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3318389d1b08SAneesh Kumar K.V 		return 0;
3319389d1b08SAneesh Kumar K.V 	}
3320389d1b08SAneesh Kumar K.V 
3321389d1b08SAneesh Kumar K.V 	get_bh(bh);
3322389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3323389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3324389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3325389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3326389d1b08SAneesh Kumar K.V 		return 0;
3327389d1b08SAneesh Kumar K.V 	return -EIO;
3328389d1b08SAneesh Kumar K.V }
3329389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3330389d1b08SAneesh Kumar K.V 
3331b98938c3SChristoph Lameter static void
333251cc5068SAlexey Dobriyan init_buffer_head(void *data)
3333b98938c3SChristoph Lameter {
3334b98938c3SChristoph Lameter 	struct buffer_head *bh = data;
3335b98938c3SChristoph Lameter 
3336b98938c3SChristoph Lameter 	memset(bh, 0, sizeof(*bh));
3337b98938c3SChristoph Lameter 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3338b98938c3SChristoph Lameter }
3339b98938c3SChristoph Lameter 
33401da177e4SLinus Torvalds void __init buffer_init(void)
33411da177e4SLinus Torvalds {
33421da177e4SLinus Torvalds 	int nrpages;
33431da177e4SLinus Torvalds 
3344b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3345b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3346b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3347b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3348b98938c3SChristoph Lameter 				init_buffer_head);
33491da177e4SLinus Torvalds 
33501da177e4SLinus Torvalds 	/*
33511da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
33521da177e4SLinus Torvalds 	 */
33531da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
33541da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
33551da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
33561da177e4SLinus Torvalds }
33571da177e4SLinus Torvalds 
33581da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
33591da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
33601da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
33611da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
33621da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
336354171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite);
33641da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
33651da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
33661da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
33671da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
336835c80d5fSChris Mason EXPORT_SYMBOL(block_write_full_page_endio);
336989e10787SNick Piggin EXPORT_SYMBOL(cont_write_begin);
33701da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
33711da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
337235c80d5fSChris Mason EXPORT_SYMBOL(end_buffer_async_write);
33731da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
33741da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
337505eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
33761da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
33771da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
33781da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
33791da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
33801da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
33811da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
33821da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3383