xref: /linux/fs/buffer.c (revision 327c0e968645f2601a43f5ea7c19c7b3a5fa0a34)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static int sync_buffer(void *word)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct block_device *bd;
591da177e4SLinus Torvalds 	struct buffer_head *bh
601da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	smp_mb();
631da177e4SLinus Torvalds 	bd = bh->b_bdev;
641da177e4SLinus Torvalds 	if (bd)
651da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
661da177e4SLinus Torvalds 	io_schedule();
671da177e4SLinus Torvalds 	return 0;
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
70fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
731da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
741da177e4SLinus Torvalds }
751da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
761da177e4SLinus Torvalds 
77fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
781da177e4SLinus Torvalds {
7951b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
801da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
811da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
821da177e4SLinus Torvalds }
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds /*
851da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
861da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
871da177e4SLinus Torvalds  * if you want to preserve its state.
881da177e4SLinus Torvalds  */
891da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
901da177e4SLinus Torvalds {
911da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
921da177e4SLinus Torvalds }
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds static void
951da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
961da177e4SLinus Torvalds {
971da177e4SLinus Torvalds 	ClearPagePrivate(page);
984c21e2f2SHugh Dickins 	set_page_private(page, 0);
991da177e4SLinus Torvalds 	page_cache_release(page);
1001da177e4SLinus Torvalds }
1011da177e4SLinus Torvalds 
10208bafc03SKeith Mannthey 
10308bafc03SKeith Mannthey static int quiet_error(struct buffer_head *bh)
10408bafc03SKeith Mannthey {
10508bafc03SKeith Mannthey 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
10608bafc03SKeith Mannthey 		return 0;
10708bafc03SKeith Mannthey 	return 1;
10808bafc03SKeith Mannthey }
10908bafc03SKeith Mannthey 
11008bafc03SKeith Mannthey 
1111da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1121da177e4SLinus Torvalds {
1131da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1141da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1151da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1161da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1171da177e4SLinus Torvalds }
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /*
12068671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
12168671f35SDmitry Monakhov  * unlocking it.
12268671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
12368671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
12468671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
12568671f35SDmitry Monakhov  * itself.
1261da177e4SLinus Torvalds  */
12768671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1281da177e4SLinus Torvalds {
1291da177e4SLinus Torvalds 	if (uptodate) {
1301da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1311da177e4SLinus Torvalds 	} else {
1321da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1331da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1341da177e4SLinus Torvalds 	}
1351da177e4SLinus Torvalds 	unlock_buffer(bh);
13668671f35SDmitry Monakhov }
13768671f35SDmitry Monakhov 
13868671f35SDmitry Monakhov /*
13968671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
14068671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
14168671f35SDmitry Monakhov  */
14268671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
14368671f35SDmitry Monakhov {
14468671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1451da177e4SLinus Torvalds 	put_bh(bh);
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1491da177e4SLinus Torvalds {
1501da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds 	if (uptodate) {
1531da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1541da177e4SLinus Torvalds 	} else {
15508bafc03SKeith Mannthey 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1561da177e4SLinus Torvalds 			buffer_io_error(bh);
1571da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1581da177e4SLinus Torvalds 					"I/O error on %s\n",
1591da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1601da177e4SLinus Torvalds 		}
1611da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1621da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1631da177e4SLinus Torvalds 	}
1641da177e4SLinus Torvalds 	unlock_buffer(bh);
1651da177e4SLinus Torvalds 	put_bh(bh);
1661da177e4SLinus Torvalds }
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds /*
1691da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1701da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1711da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1721da177e4SLinus Torvalds  * private_lock.
1731da177e4SLinus Torvalds  *
1741da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
1751da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
1761da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
1771da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
1781da177e4SLinus Torvalds  */
1791da177e4SLinus Torvalds static struct buffer_head *
180385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1811da177e4SLinus Torvalds {
1821da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1831da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1841da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1851da177e4SLinus Torvalds 	pgoff_t index;
1861da177e4SLinus Torvalds 	struct buffer_head *bh;
1871da177e4SLinus Torvalds 	struct buffer_head *head;
1881da177e4SLinus Torvalds 	struct page *page;
1891da177e4SLinus Torvalds 	int all_mapped = 1;
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
1921da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
1931da177e4SLinus Torvalds 	if (!page)
1941da177e4SLinus Torvalds 		goto out;
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
1971da177e4SLinus Torvalds 	if (!page_has_buffers(page))
1981da177e4SLinus Torvalds 		goto out_unlock;
1991da177e4SLinus Torvalds 	head = page_buffers(page);
2001da177e4SLinus Torvalds 	bh = head;
2011da177e4SLinus Torvalds 	do {
2021da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
2031da177e4SLinus Torvalds 			ret = bh;
2041da177e4SLinus Torvalds 			get_bh(bh);
2051da177e4SLinus Torvalds 			goto out_unlock;
2061da177e4SLinus Torvalds 		}
2071da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
2081da177e4SLinus Torvalds 			all_mapped = 0;
2091da177e4SLinus Torvalds 		bh = bh->b_this_page;
2101da177e4SLinus Torvalds 	} while (bh != head);
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2131da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2141da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2151da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2161da177e4SLinus Torvalds 	 */
2171da177e4SLinus Torvalds 	if (all_mapped) {
2181da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
2191da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
220205f87f6SBadari Pulavarty 			(unsigned long long)block,
221205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
222205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
223205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
2241da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
2251da177e4SLinus Torvalds 	}
2261da177e4SLinus Torvalds out_unlock:
2271da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
2281da177e4SLinus Torvalds 	page_cache_release(page);
2291da177e4SLinus Torvalds out:
2301da177e4SLinus Torvalds 	return ret;
2311da177e4SLinus Torvalds }
2321da177e4SLinus Torvalds 
2331da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
2341da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
2351da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
2361da177e4SLinus Torvalds    by the user.
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
2391da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
2401da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
2431da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
2461da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
2471da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
2481da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
2491da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
2501da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
2511da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
2521da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
2531da177e4SLinus Torvalds 
2541da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
2551da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
2561da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
2571da177e4SLinus Torvalds 
2581da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
2591da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
2601da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
2611da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
2621da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
2631da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
2641da177e4SLinus Torvalds    pass does the actual I/O. */
265f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
2661da177e4SLinus Torvalds {
2670e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
2680e1dfc66SAndrew Morton 
2690e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
2700e1dfc66SAndrew Morton 		return;
2710e1dfc66SAndrew Morton 
2721da177e4SLinus Torvalds 	invalidate_bh_lrus();
273fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
2741da177e4SLinus Torvalds }
2751da177e4SLinus Torvalds 
2761da177e4SLinus Torvalds /*
2771da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
2781da177e4SLinus Torvalds  */
2791da177e4SLinus Torvalds static void free_more_memory(void)
2801da177e4SLinus Torvalds {
28119770b32SMel Gorman 	struct zone *zone;
2820e88460dSMel Gorman 	int nid;
2831da177e4SLinus Torvalds 
284687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
2851da177e4SLinus Torvalds 	yield();
2861da177e4SLinus Torvalds 
2870e88460dSMel Gorman 	for_each_online_node(nid) {
28819770b32SMel Gorman 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
28919770b32SMel Gorman 						gfp_zone(GFP_NOFS), NULL,
29019770b32SMel Gorman 						&zone);
29119770b32SMel Gorman 		if (zone)
29254a6eb5cSMel Gorman 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293*327c0e96SKAMEZAWA Hiroyuki 						GFP_NOFS, NULL);
2941da177e4SLinus Torvalds 	}
2951da177e4SLinus Torvalds }
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds /*
2981da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
2991da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
3001da177e4SLinus Torvalds  */
3011da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3021da177e4SLinus Torvalds {
3031da177e4SLinus Torvalds 	unsigned long flags;
304a3972203SNick Piggin 	struct buffer_head *first;
3051da177e4SLinus Torvalds 	struct buffer_head *tmp;
3061da177e4SLinus Torvalds 	struct page *page;
3071da177e4SLinus Torvalds 	int page_uptodate = 1;
3081da177e4SLinus Torvalds 
3091da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds 	page = bh->b_page;
3121da177e4SLinus Torvalds 	if (uptodate) {
3131da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3141da177e4SLinus Torvalds 	} else {
3151da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
31608bafc03SKeith Mannthey 		if (!quiet_error(bh))
3171da177e4SLinus Torvalds 			buffer_io_error(bh);
3181da177e4SLinus Torvalds 		SetPageError(page);
3191da177e4SLinus Torvalds 	}
3201da177e4SLinus Torvalds 
3211da177e4SLinus Torvalds 	/*
3221da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
3231da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
3241da177e4SLinus Torvalds 	 * decide that the page is now completely done.
3251da177e4SLinus Torvalds 	 */
326a3972203SNick Piggin 	first = page_buffers(page);
327a3972203SNick Piggin 	local_irq_save(flags);
328a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
3291da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
3301da177e4SLinus Torvalds 	unlock_buffer(bh);
3311da177e4SLinus Torvalds 	tmp = bh;
3321da177e4SLinus Torvalds 	do {
3331da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
3341da177e4SLinus Torvalds 			page_uptodate = 0;
3351da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
3361da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3371da177e4SLinus Torvalds 			goto still_busy;
3381da177e4SLinus Torvalds 		}
3391da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
3401da177e4SLinus Torvalds 	} while (tmp != bh);
341a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342a3972203SNick Piggin 	local_irq_restore(flags);
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds 	/*
3451da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
3461da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
3471da177e4SLinus Torvalds 	 */
3481da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
3491da177e4SLinus Torvalds 		SetPageUptodate(page);
3501da177e4SLinus Torvalds 	unlock_page(page);
3511da177e4SLinus Torvalds 	return;
3521da177e4SLinus Torvalds 
3531da177e4SLinus Torvalds still_busy:
354a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355a3972203SNick Piggin 	local_irq_restore(flags);
3561da177e4SLinus Torvalds 	return;
3571da177e4SLinus Torvalds }
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds /*
3601da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3611da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3621da177e4SLinus Torvalds  */
363b6cd0b77SAdrian Bunk static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3641da177e4SLinus Torvalds {
3651da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
3661da177e4SLinus Torvalds 	unsigned long flags;
367a3972203SNick Piggin 	struct buffer_head *first;
3681da177e4SLinus Torvalds 	struct buffer_head *tmp;
3691da177e4SLinus Torvalds 	struct page *page;
3701da177e4SLinus Torvalds 
3711da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3721da177e4SLinus Torvalds 
3731da177e4SLinus Torvalds 	page = bh->b_page;
3741da177e4SLinus Torvalds 	if (uptodate) {
3751da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3761da177e4SLinus Torvalds 	} else {
37708bafc03SKeith Mannthey 		if (!quiet_error(bh)) {
3781da177e4SLinus Torvalds 			buffer_io_error(bh);
3791da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
3801da177e4SLinus Torvalds 					"I/O error on %s\n",
3811da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
3821da177e4SLinus Torvalds 		}
3831da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
38458ff407bSJan Kara 		set_buffer_write_io_error(bh);
3851da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3861da177e4SLinus Torvalds 		SetPageError(page);
3871da177e4SLinus Torvalds 	}
3881da177e4SLinus Torvalds 
389a3972203SNick Piggin 	first = page_buffers(page);
390a3972203SNick Piggin 	local_irq_save(flags);
391a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392a3972203SNick Piggin 
3931da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
3941da177e4SLinus Torvalds 	unlock_buffer(bh);
3951da177e4SLinus Torvalds 	tmp = bh->b_this_page;
3961da177e4SLinus Torvalds 	while (tmp != bh) {
3971da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
3981da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3991da177e4SLinus Torvalds 			goto still_busy;
4001da177e4SLinus Torvalds 		}
4011da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4021da177e4SLinus Torvalds 	}
403a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404a3972203SNick Piggin 	local_irq_restore(flags);
4051da177e4SLinus Torvalds 	end_page_writeback(page);
4061da177e4SLinus Torvalds 	return;
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds still_busy:
409a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410a3972203SNick Piggin 	local_irq_restore(flags);
4111da177e4SLinus Torvalds 	return;
4121da177e4SLinus Torvalds }
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds /*
4151da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4161da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4171da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4181da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4191da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4201da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4211da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4221da177e4SLinus Torvalds  *
4231da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4241da177e4SLinus Torvalds  * left.
4251da177e4SLinus Torvalds  *
4261da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4271da177e4SLinus Torvalds  * the buffers.
4281da177e4SLinus Torvalds  *
4291da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4301da177e4SLinus Torvalds  * page.
4311da177e4SLinus Torvalds  *
4321da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4331da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4341da177e4SLinus Torvalds  */
4351da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4361da177e4SLinus Torvalds {
4371da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
4381da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4391da177e4SLinus Torvalds }
4401da177e4SLinus Torvalds 
4411da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4421da177e4SLinus Torvalds {
4431da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
4441da177e4SLinus Torvalds 	set_buffer_async_write(bh);
4451da177e4SLinus Torvalds }
4461da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4471da177e4SLinus Torvalds 
4481da177e4SLinus Torvalds 
4491da177e4SLinus Torvalds /*
4501da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4511da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4521da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4531da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4541da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4551da177e4SLinus Torvalds  *
4561da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4571da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4581da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4591da177e4SLinus Torvalds  *
4601da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4611da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4621da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4631da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4641da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4651da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4661da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4671da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4681da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4691da177e4SLinus Torvalds  * ->private_lock.
4701da177e4SLinus Torvalds  *
4711da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4721da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4731da177e4SLinus Torvalds  *
4741da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4751da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4761da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4771da177e4SLinus Torvalds  * be true at clear_inode() time.
4781da177e4SLinus Torvalds  *
4791da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4801da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4811da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4821da177e4SLinus Torvalds  *
4831da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4841da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4851da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4861da177e4SLinus Torvalds  * queued up.
4871da177e4SLinus Torvalds  *
4881da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
4891da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
4901da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
4911da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
4921da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
4931da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
4941da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
4951da177e4SLinus Torvalds  * b_inode back.
4961da177e4SLinus Torvalds  */
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds /*
4991da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5001da177e4SLinus Torvalds  */
501dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5021da177e4SLinus Torvalds {
5031da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
50458ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
50558ff407bSJan Kara 	if (buffer_write_io_error(bh))
50658ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
50758ff407bSJan Kara 	bh->b_assoc_map = NULL;
5081da177e4SLinus Torvalds }
5091da177e4SLinus Torvalds 
5101da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5111da177e4SLinus Torvalds {
5121da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5131da177e4SLinus Torvalds }
5141da177e4SLinus Torvalds 
5151da177e4SLinus Torvalds /*
5161da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5171da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5181da177e4SLinus Torvalds  * writes to the disk.
5191da177e4SLinus Torvalds  *
5201da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
5211da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
5221da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5231da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5241da177e4SLinus Torvalds  */
5251da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5261da177e4SLinus Torvalds {
5271da177e4SLinus Torvalds 	struct buffer_head *bh;
5281da177e4SLinus Torvalds 	struct list_head *p;
5291da177e4SLinus Torvalds 	int err = 0;
5301da177e4SLinus Torvalds 
5311da177e4SLinus Torvalds 	spin_lock(lock);
5321da177e4SLinus Torvalds repeat:
5331da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5341da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5351da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5361da177e4SLinus Torvalds 			get_bh(bh);
5371da177e4SLinus Torvalds 			spin_unlock(lock);
5381da177e4SLinus Torvalds 			wait_on_buffer(bh);
5391da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5401da177e4SLinus Torvalds 				err = -EIO;
5411da177e4SLinus Torvalds 			brelse(bh);
5421da177e4SLinus Torvalds 			spin_lock(lock);
5431da177e4SLinus Torvalds 			goto repeat;
5441da177e4SLinus Torvalds 		}
5451da177e4SLinus Torvalds 	}
5461da177e4SLinus Torvalds 	spin_unlock(lock);
5471da177e4SLinus Torvalds 	return err;
5481da177e4SLinus Torvalds }
5491da177e4SLinus Torvalds 
5501da177e4SLinus Torvalds /**
55178a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
55267be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5531da177e4SLinus Torvalds  *
5541da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
5551da177e4SLinus Torvalds  * that I/O.
5561da177e4SLinus Torvalds  *
55767be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
55867be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
55967be2dd1SMartin Waitz  * a successful fsync().
5601da177e4SLinus Torvalds  */
5611da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5621da177e4SLinus Torvalds {
5631da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
5641da177e4SLinus Torvalds 
5651da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
5661da177e4SLinus Torvalds 		return 0;
5671da177e4SLinus Torvalds 
5681da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
5691da177e4SLinus Torvalds 					&mapping->private_list);
5701da177e4SLinus Torvalds }
5711da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5721da177e4SLinus Torvalds 
5731da177e4SLinus Torvalds /*
5741da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
5751da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
5761da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
5771da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
5781da177e4SLinus Torvalds  */
5791da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
5801da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
5811da177e4SLinus Torvalds {
5821da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
5831da177e4SLinus Torvalds 	if (bh) {
5841da177e4SLinus Torvalds 		if (buffer_dirty(bh))
5851da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
5861da177e4SLinus Torvalds 		put_bh(bh);
5871da177e4SLinus Torvalds 	}
5881da177e4SLinus Torvalds }
5891da177e4SLinus Torvalds 
5901da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
5911da177e4SLinus Torvalds {
5921da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
5931da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
5941da177e4SLinus Torvalds 
5951da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
5961da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
5971da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
5981da177e4SLinus Torvalds 	} else {
599e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6001da177e4SLinus Torvalds 	}
601535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6021da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6031da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6041da177e4SLinus Torvalds 				&mapping->private_list);
60558ff407bSJan Kara 		bh->b_assoc_map = mapping;
6061da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6071da177e4SLinus Torvalds 	}
6081da177e4SLinus Torvalds }
6091da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6101da177e4SLinus Torvalds 
6111da177e4SLinus Torvalds /*
612787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
613787d2214SNick Piggin  * dirty.
614787d2214SNick Piggin  *
615787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
616787d2214SNick Piggin  * not been truncated.
617787d2214SNick Piggin  */
618a8e7d49aSLinus Torvalds static void __set_page_dirty(struct page *page,
619787d2214SNick Piggin 		struct address_space *mapping, int warn)
620787d2214SNick Piggin {
62119fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
622787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
623787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
624e3a7cca1SEdward Shishkin 		account_page_dirtied(page, mapping);
625787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
626787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
627787d2214SNick Piggin 	}
62819fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
629787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
630787d2214SNick Piggin }
631787d2214SNick Piggin 
632787d2214SNick Piggin /*
6331da177e4SLinus Torvalds  * Add a page to the dirty page list.
6341da177e4SLinus Torvalds  *
6351da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6361da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6371da177e4SLinus Torvalds  *
6381da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6391da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6401da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6411da177e4SLinus Torvalds  * dirty.
6421da177e4SLinus Torvalds  *
6431da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6441da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6451da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6461da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6471da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6481da177e4SLinus Torvalds  * page on the dirty page list.
6491da177e4SLinus Torvalds  *
6501da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6511da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6521da177e4SLinus Torvalds  * added to the page after it was set dirty.
6531da177e4SLinus Torvalds  *
6541da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
6551da177e4SLinus Torvalds  * address_space though.
6561da177e4SLinus Torvalds  */
6571da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
6581da177e4SLinus Torvalds {
659a8e7d49aSLinus Torvalds 	int newly_dirty;
660787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
661ebf7a227SNick Piggin 
662ebf7a227SNick Piggin 	if (unlikely(!mapping))
663ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
6641da177e4SLinus Torvalds 
6651da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
6661da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
6671da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
6681da177e4SLinus Torvalds 		struct buffer_head *bh = head;
6691da177e4SLinus Torvalds 
6701da177e4SLinus Torvalds 		do {
6711da177e4SLinus Torvalds 			set_buffer_dirty(bh);
6721da177e4SLinus Torvalds 			bh = bh->b_this_page;
6731da177e4SLinus Torvalds 		} while (bh != head);
6741da177e4SLinus Torvalds 	}
675a8e7d49aSLinus Torvalds 	newly_dirty = !TestSetPageDirty(page);
6761da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
6771da177e4SLinus Torvalds 
678a8e7d49aSLinus Torvalds 	if (newly_dirty)
679a8e7d49aSLinus Torvalds 		__set_page_dirty(page, mapping, 1);
680a8e7d49aSLinus Torvalds 	return newly_dirty;
6811da177e4SLinus Torvalds }
6821da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
6831da177e4SLinus Torvalds 
6841da177e4SLinus Torvalds /*
6851da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
6861da177e4SLinus Torvalds  *
6871da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
6881da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
6891da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
6901da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
6911da177e4SLinus Torvalds  *
6921da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
6931da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
6941da177e4SLinus Torvalds  * up, waiting for those writes to complete.
6951da177e4SLinus Torvalds  *
6961da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
6971da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
6981da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
6991da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7001da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7011da177e4SLinus Torvalds  * any newly dirty buffers for write.
7021da177e4SLinus Torvalds  */
7031da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7041da177e4SLinus Torvalds {
7051da177e4SLinus Torvalds 	struct buffer_head *bh;
7061da177e4SLinus Torvalds 	struct list_head tmp;
707535ee2fbSJan Kara 	struct address_space *mapping;
7081da177e4SLinus Torvalds 	int err = 0, err2;
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7111da177e4SLinus Torvalds 
7121da177e4SLinus Torvalds 	spin_lock(lock);
7131da177e4SLinus Torvalds 	while (!list_empty(list)) {
7141da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
715535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
71658ff407bSJan Kara 		__remove_assoc_queue(bh);
717535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
718535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
719535ee2fbSJan Kara 		smp_mb();
7201da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7211da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
722535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7231da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7241da177e4SLinus Torvalds 				get_bh(bh);
7251da177e4SLinus Torvalds 				spin_unlock(lock);
7261da177e4SLinus Torvalds 				/*
7271da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7281da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
7291da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
7301da177e4SLinus Torvalds 				 * flight on potentially older contents.
7311da177e4SLinus Torvalds 				 */
73218ce3751SJens Axboe 				ll_rw_block(SWRITE_SYNC, 1, &bh);
7331da177e4SLinus Torvalds 				brelse(bh);
7341da177e4SLinus Torvalds 				spin_lock(lock);
7351da177e4SLinus Torvalds 			}
7361da177e4SLinus Torvalds 		}
7371da177e4SLinus Torvalds 	}
7381da177e4SLinus Torvalds 
7391da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7401da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
7411da177e4SLinus Torvalds 		get_bh(bh);
742535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
743535ee2fbSJan Kara 		__remove_assoc_queue(bh);
744535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
745535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
746535ee2fbSJan Kara 		smp_mb();
747535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
748535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
749e3892296SJan Kara 				 &mapping->private_list);
750535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
751535ee2fbSJan Kara 		}
7521da177e4SLinus Torvalds 		spin_unlock(lock);
7531da177e4SLinus Torvalds 		wait_on_buffer(bh);
7541da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
7551da177e4SLinus Torvalds 			err = -EIO;
7561da177e4SLinus Torvalds 		brelse(bh);
7571da177e4SLinus Torvalds 		spin_lock(lock);
7581da177e4SLinus Torvalds 	}
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds 	spin_unlock(lock);
7611da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
7621da177e4SLinus Torvalds 	if (err)
7631da177e4SLinus Torvalds 		return err;
7641da177e4SLinus Torvalds 	else
7651da177e4SLinus Torvalds 		return err2;
7661da177e4SLinus Torvalds }
7671da177e4SLinus Torvalds 
7681da177e4SLinus Torvalds /*
7691da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
7701da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
7711da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
7721da177e4SLinus Torvalds  *
7731da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
7741da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
7751da177e4SLinus Torvalds  * for reiserfs.
7761da177e4SLinus Torvalds  */
7771da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
7781da177e4SLinus Torvalds {
7791da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
7801da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
7811da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
7821da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
7831da177e4SLinus Torvalds 
7841da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
7851da177e4SLinus Torvalds 		while (!list_empty(list))
7861da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
7871da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
7881da177e4SLinus Torvalds 	}
7891da177e4SLinus Torvalds }
79052b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
7911da177e4SLinus Torvalds 
7921da177e4SLinus Torvalds /*
7931da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
7941da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
7951da177e4SLinus Torvalds  *
7961da177e4SLinus Torvalds  * Returns true if all buffers were removed.
7971da177e4SLinus Torvalds  */
7981da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
7991da177e4SLinus Torvalds {
8001da177e4SLinus Torvalds 	int ret = 1;
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8031da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8041da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8051da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8061da177e4SLinus Torvalds 
8071da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8081da177e4SLinus Torvalds 		while (!list_empty(list)) {
8091da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8101da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8111da177e4SLinus Torvalds 				ret = 0;
8121da177e4SLinus Torvalds 				break;
8131da177e4SLinus Torvalds 			}
8141da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8151da177e4SLinus Torvalds 		}
8161da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8171da177e4SLinus Torvalds 	}
8181da177e4SLinus Torvalds 	return ret;
8191da177e4SLinus Torvalds }
8201da177e4SLinus Torvalds 
8211da177e4SLinus Torvalds /*
8221da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8231da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8241da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8251da177e4SLinus Torvalds  * buffers.
8261da177e4SLinus Torvalds  *
8271da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8281da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8291da177e4SLinus Torvalds  */
8301da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
8311da177e4SLinus Torvalds 		int retry)
8321da177e4SLinus Torvalds {
8331da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
8341da177e4SLinus Torvalds 	long offset;
8351da177e4SLinus Torvalds 
8361da177e4SLinus Torvalds try_again:
8371da177e4SLinus Torvalds 	head = NULL;
8381da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8391da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
8401da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
8411da177e4SLinus Torvalds 		if (!bh)
8421da177e4SLinus Torvalds 			goto no_grow;
8431da177e4SLinus Torvalds 
8441da177e4SLinus Torvalds 		bh->b_bdev = NULL;
8451da177e4SLinus Torvalds 		bh->b_this_page = head;
8461da177e4SLinus Torvalds 		bh->b_blocknr = -1;
8471da177e4SLinus Torvalds 		head = bh;
8481da177e4SLinus Torvalds 
8491da177e4SLinus Torvalds 		bh->b_state = 0;
8501da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
851fc5cd582SChris Mason 		bh->b_private = NULL;
8521da177e4SLinus Torvalds 		bh->b_size = size;
8531da177e4SLinus Torvalds 
8541da177e4SLinus Torvalds 		/* Link the buffer to its page */
8551da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
8561da177e4SLinus Torvalds 
85701ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
8581da177e4SLinus Torvalds 	}
8591da177e4SLinus Torvalds 	return head;
8601da177e4SLinus Torvalds /*
8611da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
8621da177e4SLinus Torvalds  */
8631da177e4SLinus Torvalds no_grow:
8641da177e4SLinus Torvalds 	if (head) {
8651da177e4SLinus Torvalds 		do {
8661da177e4SLinus Torvalds 			bh = head;
8671da177e4SLinus Torvalds 			head = head->b_this_page;
8681da177e4SLinus Torvalds 			free_buffer_head(bh);
8691da177e4SLinus Torvalds 		} while (head);
8701da177e4SLinus Torvalds 	}
8711da177e4SLinus Torvalds 
8721da177e4SLinus Torvalds 	/*
8731da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
8741da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
8751da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
8761da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
8771da177e4SLinus Torvalds 	 */
8781da177e4SLinus Torvalds 	if (!retry)
8791da177e4SLinus Torvalds 		return NULL;
8801da177e4SLinus Torvalds 
8811da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
8821da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
8831da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
8841da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
8851da177e4SLinus Torvalds 	 * async buffer heads in use.
8861da177e4SLinus Torvalds 	 */
8871da177e4SLinus Torvalds 	free_more_memory();
8881da177e4SLinus Torvalds 	goto try_again;
8891da177e4SLinus Torvalds }
8901da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
8911da177e4SLinus Torvalds 
8921da177e4SLinus Torvalds static inline void
8931da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
8941da177e4SLinus Torvalds {
8951da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
8961da177e4SLinus Torvalds 
8971da177e4SLinus Torvalds 	bh = head;
8981da177e4SLinus Torvalds 	do {
8991da177e4SLinus Torvalds 		tail = bh;
9001da177e4SLinus Torvalds 		bh = bh->b_this_page;
9011da177e4SLinus Torvalds 	} while (bh);
9021da177e4SLinus Torvalds 	tail->b_this_page = head;
9031da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds /*
9071da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9081da177e4SLinus Torvalds  */
9091da177e4SLinus Torvalds static void
9101da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9111da177e4SLinus Torvalds 			sector_t block, int size)
9121da177e4SLinus Torvalds {
9131da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9141da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9151da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9161da177e4SLinus Torvalds 
9171da177e4SLinus Torvalds 	do {
9181da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9191da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9201da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9211da177e4SLinus Torvalds 			bh->b_blocknr = block;
9221da177e4SLinus Torvalds 			if (uptodate)
9231da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9241da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9251da177e4SLinus Torvalds 		}
9261da177e4SLinus Torvalds 		block++;
9271da177e4SLinus Torvalds 		bh = bh->b_this_page;
9281da177e4SLinus Torvalds 	} while (bh != head);
9291da177e4SLinus Torvalds }
9301da177e4SLinus Torvalds 
9311da177e4SLinus Torvalds /*
9321da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9331da177e4SLinus Torvalds  *
9341da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
9351da177e4SLinus Torvalds  */
9361da177e4SLinus Torvalds static struct page *
9371da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9381da177e4SLinus Torvalds 		pgoff_t index, int size)
9391da177e4SLinus Torvalds {
9401da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9411da177e4SLinus Torvalds 	struct page *page;
9421da177e4SLinus Torvalds 	struct buffer_head *bh;
9431da177e4SLinus Torvalds 
944ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
945769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
9461da177e4SLinus Torvalds 	if (!page)
9471da177e4SLinus Torvalds 		return NULL;
9481da177e4SLinus Torvalds 
949e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
9501da177e4SLinus Torvalds 
9511da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
9521da177e4SLinus Torvalds 		bh = page_buffers(page);
9531da177e4SLinus Torvalds 		if (bh->b_size == size) {
9541da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
9551da177e4SLinus Torvalds 			return page;
9561da177e4SLinus Torvalds 		}
9571da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
9581da177e4SLinus Torvalds 			goto failed;
9591da177e4SLinus Torvalds 	}
9601da177e4SLinus Torvalds 
9611da177e4SLinus Torvalds 	/*
9621da177e4SLinus Torvalds 	 * Allocate some buffers for this page
9631da177e4SLinus Torvalds 	 */
9641da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
9651da177e4SLinus Torvalds 	if (!bh)
9661da177e4SLinus Torvalds 		goto failed;
9671da177e4SLinus Torvalds 
9681da177e4SLinus Torvalds 	/*
9691da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
9701da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
9711da177e4SLinus Torvalds 	 * run under the page lock.
9721da177e4SLinus Torvalds 	 */
9731da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
9741da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
9751da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
9761da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
9771da177e4SLinus Torvalds 	return page;
9781da177e4SLinus Torvalds 
9791da177e4SLinus Torvalds failed:
9801da177e4SLinus Torvalds 	BUG();
9811da177e4SLinus Torvalds 	unlock_page(page);
9821da177e4SLinus Torvalds 	page_cache_release(page);
9831da177e4SLinus Torvalds 	return NULL;
9841da177e4SLinus Torvalds }
9851da177e4SLinus Torvalds 
9861da177e4SLinus Torvalds /*
9871da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
9881da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
9891da177e4SLinus Torvalds  */
990858119e1SArjan van de Ven static int
9911da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
9921da177e4SLinus Torvalds {
9931da177e4SLinus Torvalds 	struct page *page;
9941da177e4SLinus Torvalds 	pgoff_t index;
9951da177e4SLinus Torvalds 	int sizebits;
9961da177e4SLinus Torvalds 
9971da177e4SLinus Torvalds 	sizebits = -1;
9981da177e4SLinus Torvalds 	do {
9991da177e4SLinus Torvalds 		sizebits++;
10001da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10011da177e4SLinus Torvalds 
10021da177e4SLinus Torvalds 	index = block >> sizebits;
10031da177e4SLinus Torvalds 
1004e5657933SAndrew Morton 	/*
1005e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1006e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1007e5657933SAndrew Morton 	 */
1008e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1009e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1010e5657933SAndrew Morton 
1011e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1012e5657933SAndrew Morton 			"device %s\n",
10138e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1014e5657933SAndrew Morton 			bdevname(bdev, b));
1015e5657933SAndrew Morton 		return -EIO;
1016e5657933SAndrew Morton 	}
1017e5657933SAndrew Morton 	block = index << sizebits;
10181da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10191da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10201da177e4SLinus Torvalds 	if (!page)
10211da177e4SLinus Torvalds 		return 0;
10221da177e4SLinus Torvalds 	unlock_page(page);
10231da177e4SLinus Torvalds 	page_cache_release(page);
10241da177e4SLinus Torvalds 	return 1;
10251da177e4SLinus Torvalds }
10261da177e4SLinus Torvalds 
102775c96f85SAdrian Bunk static struct buffer_head *
10281da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
10291da177e4SLinus Torvalds {
10301da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
10311da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
10321da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10331da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10341da177e4SLinus Torvalds 					size);
10351da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
10361da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
10371da177e4SLinus Torvalds 
10381da177e4SLinus Torvalds 		dump_stack();
10391da177e4SLinus Torvalds 		return NULL;
10401da177e4SLinus Torvalds 	}
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	for (;;) {
10431da177e4SLinus Torvalds 		struct buffer_head * bh;
1044e5657933SAndrew Morton 		int ret;
10451da177e4SLinus Torvalds 
10461da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
10471da177e4SLinus Torvalds 		if (bh)
10481da177e4SLinus Torvalds 			return bh;
10491da177e4SLinus Torvalds 
1050e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1051e5657933SAndrew Morton 		if (ret < 0)
1052e5657933SAndrew Morton 			return NULL;
1053e5657933SAndrew Morton 		if (ret == 0)
10541da177e4SLinus Torvalds 			free_more_memory();
10551da177e4SLinus Torvalds 	}
10561da177e4SLinus Torvalds }
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds /*
10591da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
10601da177e4SLinus Torvalds  *
10611da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
10621da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
10631da177e4SLinus Torvalds  *
10641da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
10651da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
10661da177e4SLinus Torvalds  * merely a hint about the true dirty state.
10671da177e4SLinus Torvalds  *
10681da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
10691da177e4SLinus Torvalds  * (if the page has buffers).
10701da177e4SLinus Torvalds  *
10711da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
10721da177e4SLinus Torvalds  * buffers are not.
10731da177e4SLinus Torvalds  *
10741da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
10751da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
10761da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
10771da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
10781da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
10791da177e4SLinus Torvalds  */
10801da177e4SLinus Torvalds 
10811da177e4SLinus Torvalds /**
10821da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
108367be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
10841da177e4SLinus Torvalds  *
10851da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
10861da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
10871da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
10881da177e4SLinus Torvalds  * inode list.
10891da177e4SLinus Torvalds  *
10901da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
10911da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
10921da177e4SLinus Torvalds  */
1093fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
10941da177e4SLinus Torvalds {
1095787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
10961be62dc1SLinus Torvalds 
10971be62dc1SLinus Torvalds 	/*
10981be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
10991be62dc1SLinus Torvalds 	 *
11001be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11011be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11021be62dc1SLinus Torvalds 	 */
11031be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11041be62dc1SLinus Torvalds 		smp_mb();
11051be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11061be62dc1SLinus Torvalds 			return;
11071be62dc1SLinus Torvalds 	}
11081be62dc1SLinus Torvalds 
1109a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1110a8e7d49aSLinus Torvalds 		struct page *page = bh->b_page;
1111a8e7d49aSLinus Torvalds 		if (!TestSetPageDirty(page))
1112a8e7d49aSLinus Torvalds 			__set_page_dirty(page, page_mapping(page), 0);
1113a8e7d49aSLinus Torvalds 	}
11141da177e4SLinus Torvalds }
11151da177e4SLinus Torvalds 
11161da177e4SLinus Torvalds /*
11171da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11181da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11191da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11201da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11211da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11221da177e4SLinus Torvalds  */
11231da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11241da177e4SLinus Torvalds {
11251da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11261da177e4SLinus Torvalds 		put_bh(buf);
11271da177e4SLinus Torvalds 		return;
11281da177e4SLinus Torvalds 	}
11295c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11301da177e4SLinus Torvalds }
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds /*
11331da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11341da177e4SLinus Torvalds  * potentially dirty data.
11351da177e4SLinus Torvalds  */
11361da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11371da177e4SLinus Torvalds {
11381da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1139535ee2fbSJan Kara 	if (bh->b_assoc_map) {
11401da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11411da177e4SLinus Torvalds 
11421da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
11431da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
114458ff407bSJan Kara 		bh->b_assoc_map = NULL;
11451da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
11461da177e4SLinus Torvalds 	}
11471da177e4SLinus Torvalds 	__brelse(bh);
11481da177e4SLinus Torvalds }
11491da177e4SLinus Torvalds 
11501da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
11511da177e4SLinus Torvalds {
11521da177e4SLinus Torvalds 	lock_buffer(bh);
11531da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
11541da177e4SLinus Torvalds 		unlock_buffer(bh);
11551da177e4SLinus Torvalds 		return bh;
11561da177e4SLinus Torvalds 	} else {
11571da177e4SLinus Torvalds 		get_bh(bh);
11581da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
11591da177e4SLinus Torvalds 		submit_bh(READ, bh);
11601da177e4SLinus Torvalds 		wait_on_buffer(bh);
11611da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
11621da177e4SLinus Torvalds 			return bh;
11631da177e4SLinus Torvalds 	}
11641da177e4SLinus Torvalds 	brelse(bh);
11651da177e4SLinus Torvalds 	return NULL;
11661da177e4SLinus Torvalds }
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds /*
11691da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
11701da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
11711da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
11721da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
11731da177e4SLinus Torvalds  * CPU's LRUs at the same time.
11741da177e4SLinus Torvalds  *
11751da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
11761da177e4SLinus Torvalds  * sb_find_get_block().
11771da177e4SLinus Torvalds  *
11781da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
11791da177e4SLinus Torvalds  * a local interrupt disable for that.
11801da177e4SLinus Torvalds  */
11811da177e4SLinus Torvalds 
11821da177e4SLinus Torvalds #define BH_LRU_SIZE	8
11831da177e4SLinus Torvalds 
11841da177e4SLinus Torvalds struct bh_lru {
11851da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
11861da177e4SLinus Torvalds };
11871da177e4SLinus Torvalds 
11881da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
11891da177e4SLinus Torvalds 
11901da177e4SLinus Torvalds #ifdef CONFIG_SMP
11911da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
11921da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
11931da177e4SLinus Torvalds #else
11941da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
11951da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
11961da177e4SLinus Torvalds #endif
11971da177e4SLinus Torvalds 
11981da177e4SLinus Torvalds static inline void check_irqs_on(void)
11991da177e4SLinus Torvalds {
12001da177e4SLinus Torvalds #ifdef irqs_disabled
12011da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12021da177e4SLinus Torvalds #endif
12031da177e4SLinus Torvalds }
12041da177e4SLinus Torvalds 
12051da177e4SLinus Torvalds /*
12061da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12071da177e4SLinus Torvalds  */
12081da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12091da177e4SLinus Torvalds {
12101da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12111da177e4SLinus Torvalds 	struct bh_lru *lru;
12121da177e4SLinus Torvalds 
12131da177e4SLinus Torvalds 	check_irqs_on();
12141da177e4SLinus Torvalds 	bh_lru_lock();
12151da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12161da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
12171da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12181da177e4SLinus Torvalds 		int in;
12191da177e4SLinus Torvalds 		int out = 0;
12201da177e4SLinus Torvalds 
12211da177e4SLinus Torvalds 		get_bh(bh);
12221da177e4SLinus Torvalds 		bhs[out++] = bh;
12231da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
12241da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
12251da177e4SLinus Torvalds 
12261da177e4SLinus Torvalds 			if (bh2 == bh) {
12271da177e4SLinus Torvalds 				__brelse(bh2);
12281da177e4SLinus Torvalds 			} else {
12291da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12301da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12311da177e4SLinus Torvalds 					evictee = bh2;
12321da177e4SLinus Torvalds 				} else {
12331da177e4SLinus Torvalds 					bhs[out++] = bh2;
12341da177e4SLinus Torvalds 				}
12351da177e4SLinus Torvalds 			}
12361da177e4SLinus Torvalds 		}
12371da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12381da177e4SLinus Torvalds 			bhs[out++] = NULL;
12391da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
12401da177e4SLinus Torvalds 	}
12411da177e4SLinus Torvalds 	bh_lru_unlock();
12421da177e4SLinus Torvalds 
12431da177e4SLinus Torvalds 	if (evictee)
12441da177e4SLinus Torvalds 		__brelse(evictee);
12451da177e4SLinus Torvalds }
12461da177e4SLinus Torvalds 
12471da177e4SLinus Torvalds /*
12481da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
12491da177e4SLinus Torvalds  */
1250858119e1SArjan van de Ven static struct buffer_head *
12513991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
12521da177e4SLinus Torvalds {
12531da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
12541da177e4SLinus Torvalds 	struct bh_lru *lru;
12553991d3bdSTomasz Kvarsin 	unsigned int i;
12561da177e4SLinus Torvalds 
12571da177e4SLinus Torvalds 	check_irqs_on();
12581da177e4SLinus Torvalds 	bh_lru_lock();
12591da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
12601da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
12611da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
12621da177e4SLinus Torvalds 
12631da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
12641da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
12651da177e4SLinus Torvalds 			if (i) {
12661da177e4SLinus Torvalds 				while (i) {
12671da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
12681da177e4SLinus Torvalds 					i--;
12691da177e4SLinus Torvalds 				}
12701da177e4SLinus Torvalds 				lru->bhs[0] = bh;
12711da177e4SLinus Torvalds 			}
12721da177e4SLinus Torvalds 			get_bh(bh);
12731da177e4SLinus Torvalds 			ret = bh;
12741da177e4SLinus Torvalds 			break;
12751da177e4SLinus Torvalds 		}
12761da177e4SLinus Torvalds 	}
12771da177e4SLinus Torvalds 	bh_lru_unlock();
12781da177e4SLinus Torvalds 	return ret;
12791da177e4SLinus Torvalds }
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds /*
12821da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
12831da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
12841da177e4SLinus Torvalds  * NULL
12851da177e4SLinus Torvalds  */
12861da177e4SLinus Torvalds struct buffer_head *
12873991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
12881da177e4SLinus Torvalds {
12891da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
12901da177e4SLinus Torvalds 
12911da177e4SLinus Torvalds 	if (bh == NULL) {
1292385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
12931da177e4SLinus Torvalds 		if (bh)
12941da177e4SLinus Torvalds 			bh_lru_install(bh);
12951da177e4SLinus Torvalds 	}
12961da177e4SLinus Torvalds 	if (bh)
12971da177e4SLinus Torvalds 		touch_buffer(bh);
12981da177e4SLinus Torvalds 	return bh;
12991da177e4SLinus Torvalds }
13001da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds /*
13031da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13041da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13051da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13061da177e4SLinus Torvalds  *
13071da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13081da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13091da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13101da177e4SLinus Torvalds  *
13111da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13121da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13131da177e4SLinus Torvalds  */
13141da177e4SLinus Torvalds struct buffer_head *
13153991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13161da177e4SLinus Torvalds {
13171da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	might_sleep();
13201da177e4SLinus Torvalds 	if (bh == NULL)
13211da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13221da177e4SLinus Torvalds 	return bh;
13231da177e4SLinus Torvalds }
13241da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13251da177e4SLinus Torvalds 
13261da177e4SLinus Torvalds /*
13271da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13281da177e4SLinus Torvalds  */
13293991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13301da177e4SLinus Torvalds {
13311da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1332a3e713b5SAndrew Morton 	if (likely(bh)) {
13331da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13341da177e4SLinus Torvalds 		brelse(bh);
13351da177e4SLinus Torvalds 	}
1336a3e713b5SAndrew Morton }
13371da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13381da177e4SLinus Torvalds 
13391da177e4SLinus Torvalds /**
13401da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
134167be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13421da177e4SLinus Torvalds  *  @block: number of block
13431da177e4SLinus Torvalds  *  @size: size (in bytes) to read
13441da177e4SLinus Torvalds  *
13451da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
13461da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
13471da177e4SLinus Torvalds  */
13481da177e4SLinus Torvalds struct buffer_head *
13493991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
13501da177e4SLinus Torvalds {
13511da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
13521da177e4SLinus Torvalds 
1353a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
13541da177e4SLinus Torvalds 		bh = __bread_slow(bh);
13551da177e4SLinus Torvalds 	return bh;
13561da177e4SLinus Torvalds }
13571da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds /*
13601da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
13611da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
13621da177e4SLinus Torvalds  * or with preempt disabled.
13631da177e4SLinus Torvalds  */
13641da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
13651da177e4SLinus Torvalds {
13661da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
13671da177e4SLinus Torvalds 	int i;
13681da177e4SLinus Torvalds 
13691da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
13701da177e4SLinus Torvalds 		brelse(b->bhs[i]);
13711da177e4SLinus Torvalds 		b->bhs[i] = NULL;
13721da177e4SLinus Torvalds 	}
13731da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
13741da177e4SLinus Torvalds }
13751da177e4SLinus Torvalds 
1376f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
13771da177e4SLinus Torvalds {
137815c8b6c1SJens Axboe 	on_each_cpu(invalidate_bh_lru, NULL, 1);
13791da177e4SLinus Torvalds }
13809db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
13811da177e4SLinus Torvalds 
13821da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
13831da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
13841da177e4SLinus Torvalds {
13851da177e4SLinus Torvalds 	bh->b_page = page;
1386e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
13871da177e4SLinus Torvalds 	if (PageHighMem(page))
13881da177e4SLinus Torvalds 		/*
13891da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
13901da177e4SLinus Torvalds 		 */
13911da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
13921da177e4SLinus Torvalds 	else
13931da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
13941da177e4SLinus Torvalds }
13951da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
13961da177e4SLinus Torvalds 
13971da177e4SLinus Torvalds /*
13981da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
13991da177e4SLinus Torvalds  */
1400858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14011da177e4SLinus Torvalds {
14021da177e4SLinus Torvalds 	lock_buffer(bh);
14031da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14041da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14051da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14061da177e4SLinus Torvalds 	clear_buffer_req(bh);
14071da177e4SLinus Torvalds 	clear_buffer_new(bh);
14081da177e4SLinus Torvalds 	clear_buffer_delay(bh);
140933a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14101da177e4SLinus Torvalds 	unlock_buffer(bh);
14111da177e4SLinus Torvalds }
14121da177e4SLinus Torvalds 
14131da177e4SLinus Torvalds /**
14141da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14151da177e4SLinus Torvalds  *
14161da177e4SLinus Torvalds  * @page: the page which is affected
14171da177e4SLinus Torvalds  * @offset: the index of the truncation point
14181da177e4SLinus Torvalds  *
14191da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14201da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14211da177e4SLinus Torvalds  *
14221da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14231da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14241da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14251da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14261da177e4SLinus Torvalds  * blocks on-disk.
14271da177e4SLinus Torvalds  */
14282ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14291da177e4SLinus Torvalds {
14301da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14311da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14321da177e4SLinus Torvalds 
14331da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14341da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14351da177e4SLinus Torvalds 		goto out;
14361da177e4SLinus Torvalds 
14371da177e4SLinus Torvalds 	head = page_buffers(page);
14381da177e4SLinus Torvalds 	bh = head;
14391da177e4SLinus Torvalds 	do {
14401da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
14411da177e4SLinus Torvalds 		next = bh->b_this_page;
14421da177e4SLinus Torvalds 
14431da177e4SLinus Torvalds 		/*
14441da177e4SLinus Torvalds 		 * is this block fully invalidated?
14451da177e4SLinus Torvalds 		 */
14461da177e4SLinus Torvalds 		if (offset <= curr_off)
14471da177e4SLinus Torvalds 			discard_buffer(bh);
14481da177e4SLinus Torvalds 		curr_off = next_off;
14491da177e4SLinus Torvalds 		bh = next;
14501da177e4SLinus Torvalds 	} while (bh != head);
14511da177e4SLinus Torvalds 
14521da177e4SLinus Torvalds 	/*
14531da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
14541da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
14551da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
14561da177e4SLinus Torvalds 	 */
14571da177e4SLinus Torvalds 	if (offset == 0)
14582ff28e22SNeilBrown 		try_to_release_page(page, 0);
14591da177e4SLinus Torvalds out:
14602ff28e22SNeilBrown 	return;
14611da177e4SLinus Torvalds }
14621da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
14631da177e4SLinus Torvalds 
14641da177e4SLinus Torvalds /*
14651da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
14661da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
14671da177e4SLinus Torvalds  * is already excluded via the page lock.
14681da177e4SLinus Torvalds  */
14691da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
14701da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
14711da177e4SLinus Torvalds {
14721da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
14731da177e4SLinus Torvalds 
14741da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
14751da177e4SLinus Torvalds 	bh = head;
14761da177e4SLinus Torvalds 	do {
14771da177e4SLinus Torvalds 		bh->b_state |= b_state;
14781da177e4SLinus Torvalds 		tail = bh;
14791da177e4SLinus Torvalds 		bh = bh->b_this_page;
14801da177e4SLinus Torvalds 	} while (bh);
14811da177e4SLinus Torvalds 	tail->b_this_page = head;
14821da177e4SLinus Torvalds 
14831da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
14841da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
14851da177e4SLinus Torvalds 		bh = head;
14861da177e4SLinus Torvalds 		do {
14871da177e4SLinus Torvalds 			if (PageDirty(page))
14881da177e4SLinus Torvalds 				set_buffer_dirty(bh);
14891da177e4SLinus Torvalds 			if (PageUptodate(page))
14901da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
14911da177e4SLinus Torvalds 			bh = bh->b_this_page;
14921da177e4SLinus Torvalds 		} while (bh != head);
14931da177e4SLinus Torvalds 	}
14941da177e4SLinus Torvalds 	attach_page_buffers(page, head);
14951da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
14961da177e4SLinus Torvalds }
14971da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
14981da177e4SLinus Torvalds 
14991da177e4SLinus Torvalds /*
15001da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15011da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15021da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15031da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15041da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15051da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15061da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15071da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15081da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15091da177e4SLinus Torvalds  *
15101da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15111da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15121da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15131da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15141da177e4SLinus Torvalds  */
15151da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15161da177e4SLinus Torvalds {
15171da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds 	might_sleep();
15201da177e4SLinus Torvalds 
1521385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15221da177e4SLinus Torvalds 	if (old_bh) {
15231da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15241da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15251da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15261da177e4SLinus Torvalds 		__brelse(old_bh);
15271da177e4SLinus Torvalds 	}
15281da177e4SLinus Torvalds }
15291da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15301da177e4SLinus Torvalds 
15311da177e4SLinus Torvalds /*
15321da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15331da177e4SLinus Torvalds  *
15341da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15351da177e4SLinus Torvalds  *
15361da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15371da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15381da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15391da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15401da177e4SLinus Torvalds  *
15411da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
15421da177e4SLinus Torvalds  */
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds /*
15451da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
15461da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
15471da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
15481da177e4SLinus Torvalds  * state inside lock_buffer().
15491da177e4SLinus Torvalds  *
15501da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
15511da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
15521da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
15531da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
15541da177e4SLinus Torvalds  * prevents this contention from occurring.
15551da177e4SLinus Torvalds  */
15561da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
15571da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
15581da177e4SLinus Torvalds {
15591da177e4SLinus Torvalds 	int err;
15601da177e4SLinus Torvalds 	sector_t block;
15611da177e4SLinus Torvalds 	sector_t last_block;
1562f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1563b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
15641da177e4SLinus Torvalds 	int nr_underway = 0;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
15671da177e4SLinus Torvalds 
15681da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1571b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
15721da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
15731da177e4SLinus Torvalds 	}
15741da177e4SLinus Torvalds 
15751da177e4SLinus Torvalds 	/*
15761da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
15771da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
15781da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
15791da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
15801da177e4SLinus Torvalds 	 *
15811da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
15821da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
15831da177e4SLinus Torvalds 	 */
15841da177e4SLinus Torvalds 
158554b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
15861da177e4SLinus Torvalds 	head = page_buffers(page);
15871da177e4SLinus Torvalds 	bh = head;
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds 	/*
15901da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
15911da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
15921da177e4SLinus Torvalds 	 */
15931da177e4SLinus Torvalds 	do {
15941da177e4SLinus Torvalds 		if (block > last_block) {
15951da177e4SLinus Torvalds 			/*
15961da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
15971da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
15981da177e4SLinus Torvalds 			 * truncate in progress.
15991da177e4SLinus Torvalds 			 */
16001da177e4SLinus Torvalds 			/*
16011da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16021da177e4SLinus Torvalds 			 */
16031da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16041da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
160529a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
160629a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1607b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16081da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16091da177e4SLinus Torvalds 			if (err)
16101da177e4SLinus Torvalds 				goto recover;
161129a814d2SAlex Tomas 			clear_buffer_delay(bh);
16121da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16131da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16141da177e4SLinus Torvalds 				clear_buffer_new(bh);
16151da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16161da177e4SLinus Torvalds 							bh->b_blocknr);
16171da177e4SLinus Torvalds 			}
16181da177e4SLinus Torvalds 		}
16191da177e4SLinus Torvalds 		bh = bh->b_this_page;
16201da177e4SLinus Torvalds 		block++;
16211da177e4SLinus Torvalds 	} while (bh != head);
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	do {
16241da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16251da177e4SLinus Torvalds 			continue;
16261da177e4SLinus Torvalds 		/*
16271da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16281da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16291da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
16301da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
16311da177e4SLinus Torvalds 		 * throttling.
16321da177e4SLinus Torvalds 		 */
16331da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
16341da177e4SLinus Torvalds 			lock_buffer(bh);
1635ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
16361da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
16371da177e4SLinus Torvalds 			continue;
16381da177e4SLinus Torvalds 		}
16391da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
16401da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
16411da177e4SLinus Torvalds 		} else {
16421da177e4SLinus Torvalds 			unlock_buffer(bh);
16431da177e4SLinus Torvalds 		}
16441da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds 	/*
16471da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
16481da177e4SLinus Torvalds 	 * drop the bh refcounts early.
16491da177e4SLinus Torvalds 	 */
16501da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
16511da177e4SLinus Torvalds 	set_page_writeback(page);
16521da177e4SLinus Torvalds 
16531da177e4SLinus Torvalds 	do {
16541da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
16551da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
16561da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
16571da177e4SLinus Torvalds 			nr_underway++;
1658ad576e63SNick Piggin 		}
16591da177e4SLinus Torvalds 		bh = next;
16601da177e4SLinus Torvalds 	} while (bh != head);
166105937baaSAndrew Morton 	unlock_page(page);
16621da177e4SLinus Torvalds 
16631da177e4SLinus Torvalds 	err = 0;
16641da177e4SLinus Torvalds done:
16651da177e4SLinus Torvalds 	if (nr_underway == 0) {
16661da177e4SLinus Torvalds 		/*
16671da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
16681da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
16691da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
16701da177e4SLinus Torvalds 		 */
16711da177e4SLinus Torvalds 		end_page_writeback(page);
16723d67f2d7SNick Piggin 
16731da177e4SLinus Torvalds 		/*
16741da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
16751da177e4SLinus Torvalds 		 * here on.
16761da177e4SLinus Torvalds 		 */
16771da177e4SLinus Torvalds 	}
16781da177e4SLinus Torvalds 	return err;
16791da177e4SLinus Torvalds 
16801da177e4SLinus Torvalds recover:
16811da177e4SLinus Torvalds 	/*
16821da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
16831da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
16841da177e4SLinus Torvalds 	 * exposing stale data.
16851da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
16861da177e4SLinus Torvalds 	 */
16871da177e4SLinus Torvalds 	bh = head;
16881da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
16891da177e4SLinus Torvalds 	do {
169029a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
169129a814d2SAlex Tomas 		    !buffer_delay(bh)) {
16921da177e4SLinus Torvalds 			lock_buffer(bh);
16931da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
16941da177e4SLinus Torvalds 		} else {
16951da177e4SLinus Torvalds 			/*
16961da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
16971da177e4SLinus Torvalds 			 * attachment to a dirty page.
16981da177e4SLinus Torvalds 			 */
16991da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17001da177e4SLinus Torvalds 		}
17011da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17021da177e4SLinus Torvalds 	SetPageError(page);
17031da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17047e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17051da177e4SLinus Torvalds 	set_page_writeback(page);
17061da177e4SLinus Torvalds 	do {
17071da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17081da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17091da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17101da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
17111da177e4SLinus Torvalds 			nr_underway++;
1712ad576e63SNick Piggin 		}
17131da177e4SLinus Torvalds 		bh = next;
17141da177e4SLinus Torvalds 	} while (bh != head);
1715ffda9d30SNick Piggin 	unlock_page(page);
17161da177e4SLinus Torvalds 	goto done;
17171da177e4SLinus Torvalds }
17181da177e4SLinus Torvalds 
1719afddba49SNick Piggin /*
1720afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1721afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1722afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1723afddba49SNick Piggin  */
1724afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1725afddba49SNick Piggin {
1726afddba49SNick Piggin 	unsigned int block_start, block_end;
1727afddba49SNick Piggin 	struct buffer_head *head, *bh;
1728afddba49SNick Piggin 
1729afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1730afddba49SNick Piggin 	if (!page_has_buffers(page))
1731afddba49SNick Piggin 		return;
1732afddba49SNick Piggin 
1733afddba49SNick Piggin 	bh = head = page_buffers(page);
1734afddba49SNick Piggin 	block_start = 0;
1735afddba49SNick Piggin 	do {
1736afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1737afddba49SNick Piggin 
1738afddba49SNick Piggin 		if (buffer_new(bh)) {
1739afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1740afddba49SNick Piggin 				if (!PageUptodate(page)) {
1741afddba49SNick Piggin 					unsigned start, size;
1742afddba49SNick Piggin 
1743afddba49SNick Piggin 					start = max(from, block_start);
1744afddba49SNick Piggin 					size = min(to, block_end) - start;
1745afddba49SNick Piggin 
1746eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1747afddba49SNick Piggin 					set_buffer_uptodate(bh);
1748afddba49SNick Piggin 				}
1749afddba49SNick Piggin 
1750afddba49SNick Piggin 				clear_buffer_new(bh);
1751afddba49SNick Piggin 				mark_buffer_dirty(bh);
1752afddba49SNick Piggin 			}
1753afddba49SNick Piggin 		}
1754afddba49SNick Piggin 
1755afddba49SNick Piggin 		block_start = block_end;
1756afddba49SNick Piggin 		bh = bh->b_this_page;
1757afddba49SNick Piggin 	} while (bh != head);
1758afddba49SNick Piggin }
1759afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1760afddba49SNick Piggin 
17611da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
17621da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
17631da177e4SLinus Torvalds {
17641da177e4SLinus Torvalds 	unsigned block_start, block_end;
17651da177e4SLinus Torvalds 	sector_t block;
17661da177e4SLinus Torvalds 	int err = 0;
17671da177e4SLinus Torvalds 	unsigned blocksize, bbits;
17681da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
17691da177e4SLinus Torvalds 
17701da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
17711da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
17721da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
17731da177e4SLinus Torvalds 	BUG_ON(from > to);
17741da177e4SLinus Torvalds 
17751da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
17761da177e4SLinus Torvalds 	if (!page_has_buffers(page))
17771da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
17781da177e4SLinus Torvalds 	head = page_buffers(page);
17791da177e4SLinus Torvalds 
17801da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
17811da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
17821da177e4SLinus Torvalds 
17831da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
17841da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
17851da177e4SLinus Torvalds 		block_end = block_start + blocksize;
17861da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
17871da177e4SLinus Torvalds 			if (PageUptodate(page)) {
17881da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
17891da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
17901da177e4SLinus Torvalds 			}
17911da177e4SLinus Torvalds 			continue;
17921da177e4SLinus Torvalds 		}
17931da177e4SLinus Torvalds 		if (buffer_new(bh))
17941da177e4SLinus Torvalds 			clear_buffer_new(bh);
17951da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1796b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
17971da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17981da177e4SLinus Torvalds 			if (err)
1799f3ddbdc6SNick Piggin 				break;
18001da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18011da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18021da177e4SLinus Torvalds 							bh->b_blocknr);
18031da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1804637aff46SNick Piggin 					clear_buffer_new(bh);
18051da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1806637aff46SNick Piggin 					mark_buffer_dirty(bh);
18071da177e4SLinus Torvalds 					continue;
18081da177e4SLinus Torvalds 				}
1809eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1810eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1811eebd2aa3SChristoph Lameter 						to, block_end,
1812eebd2aa3SChristoph Lameter 						block_start, from);
18131da177e4SLinus Torvalds 				continue;
18141da177e4SLinus Torvalds 			}
18151da177e4SLinus Torvalds 		}
18161da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18171da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18181da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18191da177e4SLinus Torvalds 			continue;
18201da177e4SLinus Torvalds 		}
18211da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
182233a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18231da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18241da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18251da177e4SLinus Torvalds 			*wait_bh++=bh;
18261da177e4SLinus Torvalds 		}
18271da177e4SLinus Torvalds 	}
18281da177e4SLinus Torvalds 	/*
18291da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
18301da177e4SLinus Torvalds 	 */
18311da177e4SLinus Torvalds 	while(wait_bh > wait) {
18321da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
18331da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1834f3ddbdc6SNick Piggin 			err = -EIO;
18351da177e4SLinus Torvalds 	}
1836afddba49SNick Piggin 	if (unlikely(err))
1837afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
18381da177e4SLinus Torvalds 	return err;
18391da177e4SLinus Torvalds }
18401da177e4SLinus Torvalds 
18411da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
18421da177e4SLinus Torvalds 		unsigned from, unsigned to)
18431da177e4SLinus Torvalds {
18441da177e4SLinus Torvalds 	unsigned block_start, block_end;
18451da177e4SLinus Torvalds 	int partial = 0;
18461da177e4SLinus Torvalds 	unsigned blocksize;
18471da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
18481da177e4SLinus Torvalds 
18491da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18501da177e4SLinus Torvalds 
18511da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
18521da177e4SLinus Torvalds 	    bh != head || !block_start;
18531da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
18541da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18551da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18561da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18571da177e4SLinus Torvalds 				partial = 1;
18581da177e4SLinus Torvalds 		} else {
18591da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
18601da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
18611da177e4SLinus Torvalds 		}
1862afddba49SNick Piggin 		clear_buffer_new(bh);
18631da177e4SLinus Torvalds 	}
18641da177e4SLinus Torvalds 
18651da177e4SLinus Torvalds 	/*
18661da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
18671da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
18681da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
18691da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
18701da177e4SLinus Torvalds 	 */
18711da177e4SLinus Torvalds 	if (!partial)
18721da177e4SLinus Torvalds 		SetPageUptodate(page);
18731da177e4SLinus Torvalds 	return 0;
18741da177e4SLinus Torvalds }
18751da177e4SLinus Torvalds 
18761da177e4SLinus Torvalds /*
1877afddba49SNick Piggin  * block_write_begin takes care of the basic task of block allocation and
1878afddba49SNick Piggin  * bringing partial write blocks uptodate first.
1879afddba49SNick Piggin  *
1880afddba49SNick Piggin  * If *pagep is not NULL, then block_write_begin uses the locked page
1881afddba49SNick Piggin  * at *pagep rather than allocating its own. In this case, the page will
1882afddba49SNick Piggin  * not be unlocked or deallocated on failure.
1883afddba49SNick Piggin  */
1884afddba49SNick Piggin int block_write_begin(struct file *file, struct address_space *mapping,
1885afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
1886afddba49SNick Piggin 			struct page **pagep, void **fsdata,
1887afddba49SNick Piggin 			get_block_t *get_block)
1888afddba49SNick Piggin {
1889afddba49SNick Piggin 	struct inode *inode = mapping->host;
1890afddba49SNick Piggin 	int status = 0;
1891afddba49SNick Piggin 	struct page *page;
1892afddba49SNick Piggin 	pgoff_t index;
1893afddba49SNick Piggin 	unsigned start, end;
1894afddba49SNick Piggin 	int ownpage = 0;
1895afddba49SNick Piggin 
1896afddba49SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
1897afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1898afddba49SNick Piggin 	end = start + len;
1899afddba49SNick Piggin 
1900afddba49SNick Piggin 	page = *pagep;
1901afddba49SNick Piggin 	if (page == NULL) {
1902afddba49SNick Piggin 		ownpage = 1;
190354566b2cSNick Piggin 		page = grab_cache_page_write_begin(mapping, index, flags);
1904afddba49SNick Piggin 		if (!page) {
1905afddba49SNick Piggin 			status = -ENOMEM;
1906afddba49SNick Piggin 			goto out;
1907afddba49SNick Piggin 		}
1908afddba49SNick Piggin 		*pagep = page;
1909afddba49SNick Piggin 	} else
1910afddba49SNick Piggin 		BUG_ON(!PageLocked(page));
1911afddba49SNick Piggin 
1912afddba49SNick Piggin 	status = __block_prepare_write(inode, page, start, end, get_block);
1913afddba49SNick Piggin 	if (unlikely(status)) {
1914afddba49SNick Piggin 		ClearPageUptodate(page);
1915afddba49SNick Piggin 
1916afddba49SNick Piggin 		if (ownpage) {
1917afddba49SNick Piggin 			unlock_page(page);
1918afddba49SNick Piggin 			page_cache_release(page);
1919afddba49SNick Piggin 			*pagep = NULL;
1920afddba49SNick Piggin 
1921afddba49SNick Piggin 			/*
1922afddba49SNick Piggin 			 * prepare_write() may have instantiated a few blocks
1923afddba49SNick Piggin 			 * outside i_size.  Trim these off again. Don't need
1924afddba49SNick Piggin 			 * i_size_read because we hold i_mutex.
1925afddba49SNick Piggin 			 */
1926afddba49SNick Piggin 			if (pos + len > inode->i_size)
1927afddba49SNick Piggin 				vmtruncate(inode, inode->i_size);
1928afddba49SNick Piggin 		}
1929afddba49SNick Piggin 	}
1930afddba49SNick Piggin 
1931afddba49SNick Piggin out:
1932afddba49SNick Piggin 	return status;
1933afddba49SNick Piggin }
1934afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
1935afddba49SNick Piggin 
1936afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
1937afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
1938afddba49SNick Piggin 			struct page *page, void *fsdata)
1939afddba49SNick Piggin {
1940afddba49SNick Piggin 	struct inode *inode = mapping->host;
1941afddba49SNick Piggin 	unsigned start;
1942afddba49SNick Piggin 
1943afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1944afddba49SNick Piggin 
1945afddba49SNick Piggin 	if (unlikely(copied < len)) {
1946afddba49SNick Piggin 		/*
1947afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
1948afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
1949afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
1950afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
1951afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
1952afddba49SNick Piggin 		 * destroy our partial write.
1953afddba49SNick Piggin 		 *
1954afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
1955afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
1956afddba49SNick Piggin 		 * caller to redo the whole thing.
1957afddba49SNick Piggin 		 */
1958afddba49SNick Piggin 		if (!PageUptodate(page))
1959afddba49SNick Piggin 			copied = 0;
1960afddba49SNick Piggin 
1961afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
1962afddba49SNick Piggin 	}
1963afddba49SNick Piggin 	flush_dcache_page(page);
1964afddba49SNick Piggin 
1965afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
1966afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
1967afddba49SNick Piggin 
1968afddba49SNick Piggin 	return copied;
1969afddba49SNick Piggin }
1970afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
1971afddba49SNick Piggin 
1972afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
1973afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
1974afddba49SNick Piggin 			struct page *page, void *fsdata)
1975afddba49SNick Piggin {
1976afddba49SNick Piggin 	struct inode *inode = mapping->host;
1977c7d206b3SJan Kara 	int i_size_changed = 0;
1978afddba49SNick Piggin 
1979afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1980afddba49SNick Piggin 
1981afddba49SNick Piggin 	/*
1982afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
1983afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
1984afddba49SNick Piggin 	 *
1985afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
1986afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
1987afddba49SNick Piggin 	 */
1988afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
1989afddba49SNick Piggin 		i_size_write(inode, pos+copied);
1990c7d206b3SJan Kara 		i_size_changed = 1;
1991afddba49SNick Piggin 	}
1992afddba49SNick Piggin 
1993afddba49SNick Piggin 	unlock_page(page);
1994afddba49SNick Piggin 	page_cache_release(page);
1995afddba49SNick Piggin 
1996c7d206b3SJan Kara 	/*
1997c7d206b3SJan Kara 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1998c7d206b3SJan Kara 	 * makes the holding time of page lock longer. Second, it forces lock
1999c7d206b3SJan Kara 	 * ordering of page lock and transaction start for journaling
2000c7d206b3SJan Kara 	 * filesystems.
2001c7d206b3SJan Kara 	 */
2002c7d206b3SJan Kara 	if (i_size_changed)
2003c7d206b3SJan Kara 		mark_inode_dirty(inode);
2004c7d206b3SJan Kara 
2005afddba49SNick Piggin 	return copied;
2006afddba49SNick Piggin }
2007afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2008afddba49SNick Piggin 
2009afddba49SNick Piggin /*
20108ab22b9aSHisashi Hifumi  * block_is_partially_uptodate checks whether buffers within a page are
20118ab22b9aSHisashi Hifumi  * uptodate or not.
20128ab22b9aSHisashi Hifumi  *
20138ab22b9aSHisashi Hifumi  * Returns true if all buffers which correspond to a file portion
20148ab22b9aSHisashi Hifumi  * we want to read are uptodate.
20158ab22b9aSHisashi Hifumi  */
20168ab22b9aSHisashi Hifumi int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
20178ab22b9aSHisashi Hifumi 					unsigned long from)
20188ab22b9aSHisashi Hifumi {
20198ab22b9aSHisashi Hifumi 	struct inode *inode = page->mapping->host;
20208ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
20218ab22b9aSHisashi Hifumi 	unsigned to;
20228ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
20238ab22b9aSHisashi Hifumi 	int ret = 1;
20248ab22b9aSHisashi Hifumi 
20258ab22b9aSHisashi Hifumi 	if (!page_has_buffers(page))
20268ab22b9aSHisashi Hifumi 		return 0;
20278ab22b9aSHisashi Hifumi 
20288ab22b9aSHisashi Hifumi 	blocksize = 1 << inode->i_blkbits;
20298ab22b9aSHisashi Hifumi 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
20308ab22b9aSHisashi Hifumi 	to = from + to;
20318ab22b9aSHisashi Hifumi 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
20328ab22b9aSHisashi Hifumi 		return 0;
20338ab22b9aSHisashi Hifumi 
20348ab22b9aSHisashi Hifumi 	head = page_buffers(page);
20358ab22b9aSHisashi Hifumi 	bh = head;
20368ab22b9aSHisashi Hifumi 	block_start = 0;
20378ab22b9aSHisashi Hifumi 	do {
20388ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
20398ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
20408ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
20418ab22b9aSHisashi Hifumi 				ret = 0;
20428ab22b9aSHisashi Hifumi 				break;
20438ab22b9aSHisashi Hifumi 			}
20448ab22b9aSHisashi Hifumi 			if (block_end >= to)
20458ab22b9aSHisashi Hifumi 				break;
20468ab22b9aSHisashi Hifumi 		}
20478ab22b9aSHisashi Hifumi 		block_start = block_end;
20488ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
20498ab22b9aSHisashi Hifumi 	} while (bh != head);
20508ab22b9aSHisashi Hifumi 
20518ab22b9aSHisashi Hifumi 	return ret;
20528ab22b9aSHisashi Hifumi }
20538ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
20548ab22b9aSHisashi Hifumi 
20558ab22b9aSHisashi Hifumi /*
20561da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
20571da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
20581da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
20591da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
20601da177e4SLinus Torvalds  * page struct once IO has completed.
20611da177e4SLinus Torvalds  */
20621da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
20631da177e4SLinus Torvalds {
20641da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
20651da177e4SLinus Torvalds 	sector_t iblock, lblock;
20661da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
20671da177e4SLinus Torvalds 	unsigned int blocksize;
20681da177e4SLinus Torvalds 	int nr, i;
20691da177e4SLinus Torvalds 	int fully_mapped = 1;
20701da177e4SLinus Torvalds 
2071cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
20721da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
20731da177e4SLinus Torvalds 	if (!page_has_buffers(page))
20741da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
20751da177e4SLinus Torvalds 	head = page_buffers(page);
20761da177e4SLinus Torvalds 
20771da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
20781da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
20791da177e4SLinus Torvalds 	bh = head;
20801da177e4SLinus Torvalds 	nr = 0;
20811da177e4SLinus Torvalds 	i = 0;
20821da177e4SLinus Torvalds 
20831da177e4SLinus Torvalds 	do {
20841da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
20851da177e4SLinus Torvalds 			continue;
20861da177e4SLinus Torvalds 
20871da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2088c64610baSAndrew Morton 			int err = 0;
2089c64610baSAndrew Morton 
20901da177e4SLinus Torvalds 			fully_mapped = 0;
20911da177e4SLinus Torvalds 			if (iblock < lblock) {
2092b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2093c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2094c64610baSAndrew Morton 				if (err)
20951da177e4SLinus Torvalds 					SetPageError(page);
20961da177e4SLinus Torvalds 			}
20971da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2098eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2099c64610baSAndrew Morton 				if (!err)
21001da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21011da177e4SLinus Torvalds 				continue;
21021da177e4SLinus Torvalds 			}
21031da177e4SLinus Torvalds 			/*
21041da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
21051da177e4SLinus Torvalds 			 * synchronously
21061da177e4SLinus Torvalds 			 */
21071da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
21081da177e4SLinus Torvalds 				continue;
21091da177e4SLinus Torvalds 		}
21101da177e4SLinus Torvalds 		arr[nr++] = bh;
21111da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
21121da177e4SLinus Torvalds 
21131da177e4SLinus Torvalds 	if (fully_mapped)
21141da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
21151da177e4SLinus Torvalds 
21161da177e4SLinus Torvalds 	if (!nr) {
21171da177e4SLinus Torvalds 		/*
21181da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
21191da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
21201da177e4SLinus Torvalds 		 */
21211da177e4SLinus Torvalds 		if (!PageError(page))
21221da177e4SLinus Torvalds 			SetPageUptodate(page);
21231da177e4SLinus Torvalds 		unlock_page(page);
21241da177e4SLinus Torvalds 		return 0;
21251da177e4SLinus Torvalds 	}
21261da177e4SLinus Torvalds 
21271da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
21281da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21291da177e4SLinus Torvalds 		bh = arr[i];
21301da177e4SLinus Torvalds 		lock_buffer(bh);
21311da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
21321da177e4SLinus Torvalds 	}
21331da177e4SLinus Torvalds 
21341da177e4SLinus Torvalds 	/*
21351da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
21361da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
21371da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
21381da177e4SLinus Torvalds 	 */
21391da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21401da177e4SLinus Torvalds 		bh = arr[i];
21411da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21421da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
21431da177e4SLinus Torvalds 		else
21441da177e4SLinus Torvalds 			submit_bh(READ, bh);
21451da177e4SLinus Torvalds 	}
21461da177e4SLinus Torvalds 	return 0;
21471da177e4SLinus Torvalds }
21481da177e4SLinus Torvalds 
21491da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
215089e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
21511da177e4SLinus Torvalds  * deal with the hole.
21521da177e4SLinus Torvalds  */
215389e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
21541da177e4SLinus Torvalds {
21551da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
21561da177e4SLinus Torvalds 	struct page *page;
215789e10787SNick Piggin 	void *fsdata;
215805eb0b51SOGAWA Hirofumi 	unsigned long limit;
21591da177e4SLinus Torvalds 	int err;
21601da177e4SLinus Torvalds 
21611da177e4SLinus Torvalds 	err = -EFBIG;
21621da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
21631da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
21641da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
21651da177e4SLinus Torvalds 		goto out;
21661da177e4SLinus Torvalds 	}
21671da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
21681da177e4SLinus Torvalds 		goto out;
21691da177e4SLinus Torvalds 
217089e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
217189e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
217289e10787SNick Piggin 				&page, &fsdata);
217389e10787SNick Piggin 	if (err)
217405eb0b51SOGAWA Hirofumi 		goto out;
217505eb0b51SOGAWA Hirofumi 
217689e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
217789e10787SNick Piggin 	BUG_ON(err > 0);
217805eb0b51SOGAWA Hirofumi 
217905eb0b51SOGAWA Hirofumi out:
218005eb0b51SOGAWA Hirofumi 	return err;
218105eb0b51SOGAWA Hirofumi }
218205eb0b51SOGAWA Hirofumi 
2183f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
218489e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
218505eb0b51SOGAWA Hirofumi {
218689e10787SNick Piggin 	struct inode *inode = mapping->host;
218789e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
218889e10787SNick Piggin 	struct page *page;
218989e10787SNick Piggin 	void *fsdata;
219089e10787SNick Piggin 	pgoff_t index, curidx;
219189e10787SNick Piggin 	loff_t curpos;
219289e10787SNick Piggin 	unsigned zerofrom, offset, len;
219389e10787SNick Piggin 	int err = 0;
219405eb0b51SOGAWA Hirofumi 
219589e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
219689e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
219789e10787SNick Piggin 
219889e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
219989e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
220089e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
220189e10787SNick Piggin 			*bytes |= (blocksize-1);
220289e10787SNick Piggin 			(*bytes)++;
220389e10787SNick Piggin 		}
220489e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
220589e10787SNick Piggin 
220689e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
220789e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
220889e10787SNick Piggin 						&page, &fsdata);
220989e10787SNick Piggin 		if (err)
221089e10787SNick Piggin 			goto out;
2211eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
221289e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
221389e10787SNick Piggin 						page, fsdata);
221489e10787SNick Piggin 		if (err < 0)
221589e10787SNick Piggin 			goto out;
221689e10787SNick Piggin 		BUG_ON(err != len);
221789e10787SNick Piggin 		err = 0;
2218061e9746SOGAWA Hirofumi 
2219061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
222089e10787SNick Piggin 	}
222189e10787SNick Piggin 
222289e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
222389e10787SNick Piggin 	if (index == curidx) {
222489e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
222589e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
222689e10787SNick Piggin 		if (offset <= zerofrom) {
222789e10787SNick Piggin 			goto out;
222889e10787SNick Piggin 		}
222989e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
223089e10787SNick Piggin 			*bytes |= (blocksize-1);
223189e10787SNick Piggin 			(*bytes)++;
223289e10787SNick Piggin 		}
223389e10787SNick Piggin 		len = offset - zerofrom;
223489e10787SNick Piggin 
223589e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
223689e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
223789e10787SNick Piggin 						&page, &fsdata);
223889e10787SNick Piggin 		if (err)
223989e10787SNick Piggin 			goto out;
2240eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
224189e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
224289e10787SNick Piggin 						page, fsdata);
224389e10787SNick Piggin 		if (err < 0)
224489e10787SNick Piggin 			goto out;
224589e10787SNick Piggin 		BUG_ON(err != len);
224689e10787SNick Piggin 		err = 0;
224789e10787SNick Piggin 	}
224889e10787SNick Piggin out:
224989e10787SNick Piggin 	return err;
22501da177e4SLinus Torvalds }
22511da177e4SLinus Torvalds 
22521da177e4SLinus Torvalds /*
22531da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
22541da177e4SLinus Torvalds  * We may have to extend the file.
22551da177e4SLinus Torvalds  */
225689e10787SNick Piggin int cont_write_begin(struct file *file, struct address_space *mapping,
225789e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
225889e10787SNick Piggin 			struct page **pagep, void **fsdata,
225989e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
22601da177e4SLinus Torvalds {
22611da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
22621da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
226389e10787SNick Piggin 	unsigned zerofrom;
226489e10787SNick Piggin 	int err;
22651da177e4SLinus Torvalds 
226689e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
226789e10787SNick Piggin 	if (err)
22681da177e4SLinus Torvalds 		goto out;
22691da177e4SLinus Torvalds 
22701da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
227189e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
22721da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
22731da177e4SLinus Torvalds 		(*bytes)++;
22741da177e4SLinus Torvalds 	}
22751da177e4SLinus Torvalds 
227689e10787SNick Piggin 	*pagep = NULL;
227789e10787SNick Piggin 	err = block_write_begin(file, mapping, pos, len,
227889e10787SNick Piggin 				flags, pagep, fsdata, get_block);
22791da177e4SLinus Torvalds out:
228089e10787SNick Piggin 	return err;
22811da177e4SLinus Torvalds }
22821da177e4SLinus Torvalds 
22831da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
22841da177e4SLinus Torvalds 			get_block_t *get_block)
22851da177e4SLinus Torvalds {
22861da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22871da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
22881da177e4SLinus Torvalds 	if (err)
22891da177e4SLinus Torvalds 		ClearPageUptodate(page);
22901da177e4SLinus Torvalds 	return err;
22911da177e4SLinus Torvalds }
22921da177e4SLinus Torvalds 
22931da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
22941da177e4SLinus Torvalds {
22951da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
22961da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
22971da177e4SLinus Torvalds 	return 0;
22981da177e4SLinus Torvalds }
22991da177e4SLinus Torvalds 
230054171690SDavid Chinner /*
230154171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
230254171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
230354171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
230454171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
230554171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
230654171690SDavid Chinner  * support these features.
230754171690SDavid Chinner  *
230854171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
230954171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
231054171690SDavid Chinner  * vmtruncate() writes the inode size before removing pages, once we have the
231154171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
231254171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
231354171690SDavid Chinner  * unlock the page.
231454171690SDavid Chinner  */
231554171690SDavid Chinner int
2316c2ec175cSNick Piggin block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
231754171690SDavid Chinner 		   get_block_t get_block)
231854171690SDavid Chinner {
2319c2ec175cSNick Piggin 	struct page *page = vmf->page;
232054171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
232154171690SDavid Chinner 	unsigned long end;
232254171690SDavid Chinner 	loff_t size;
232356a76f82SNick Piggin 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
232454171690SDavid Chinner 
232554171690SDavid Chinner 	lock_page(page);
232654171690SDavid Chinner 	size = i_size_read(inode);
232754171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
232818336338SNick Piggin 	    (page_offset(page) > size)) {
232954171690SDavid Chinner 		/* page got truncated out from underneath us */
233054171690SDavid Chinner 		goto out_unlock;
233154171690SDavid Chinner 	}
233254171690SDavid Chinner 
233354171690SDavid Chinner 	/* page is wholly or partially inside EOF */
233454171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
233554171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
233654171690SDavid Chinner 	else
233754171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
233854171690SDavid Chinner 
233954171690SDavid Chinner 	ret = block_prepare_write(page, 0, end, get_block);
234054171690SDavid Chinner 	if (!ret)
234154171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
234254171690SDavid Chinner 
234356a76f82SNick Piggin 	if (unlikely(ret)) {
234456a76f82SNick Piggin 		if (ret == -ENOMEM)
234556a76f82SNick Piggin 			ret = VM_FAULT_OOM;
234656a76f82SNick Piggin 		else /* -ENOSPC, -EIO, etc */
2347c2ec175cSNick Piggin 			ret = VM_FAULT_SIGBUS;
234856a76f82SNick Piggin 	}
2349c2ec175cSNick Piggin 
235056a76f82SNick Piggin out_unlock:
235154171690SDavid Chinner 	unlock_page(page);
235254171690SDavid Chinner 	return ret;
235354171690SDavid Chinner }
23541da177e4SLinus Torvalds 
23551da177e4SLinus Torvalds /*
235603158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
23571da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
23581da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
23591da177e4SLinus Torvalds  */
23601da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
23611da177e4SLinus Torvalds {
236268671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
23631da177e4SLinus Torvalds }
23641da177e4SLinus Torvalds 
23651da177e4SLinus Torvalds /*
236603158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
236703158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
236803158cd7SNick Piggin  * dirty races).
236903158cd7SNick Piggin  */
237003158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
237103158cd7SNick Piggin {
237203158cd7SNick Piggin 	struct buffer_head *bh;
237303158cd7SNick Piggin 
237403158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
237503158cd7SNick Piggin 
237603158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
237703158cd7SNick Piggin 	bh = head;
237803158cd7SNick Piggin 	do {
237903158cd7SNick Piggin 		if (PageDirty(page))
238003158cd7SNick Piggin 			set_buffer_dirty(bh);
238103158cd7SNick Piggin 		if (!bh->b_this_page)
238203158cd7SNick Piggin 			bh->b_this_page = head;
238303158cd7SNick Piggin 		bh = bh->b_this_page;
238403158cd7SNick Piggin 	} while (bh != head);
238503158cd7SNick Piggin 	attach_page_buffers(page, head);
238603158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
238703158cd7SNick Piggin }
238803158cd7SNick Piggin 
238903158cd7SNick Piggin /*
23901da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
23911da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
23921da177e4SLinus Torvalds  */
239303158cd7SNick Piggin int nobh_write_begin(struct file *file, struct address_space *mapping,
239403158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
239503158cd7SNick Piggin 			struct page **pagep, void **fsdata,
23961da177e4SLinus Torvalds 			get_block_t *get_block)
23971da177e4SLinus Torvalds {
239803158cd7SNick Piggin 	struct inode *inode = mapping->host;
23991da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
24001da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2401a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
240203158cd7SNick Piggin 	struct page *page;
240303158cd7SNick Piggin 	pgoff_t index;
240403158cd7SNick Piggin 	unsigned from, to;
24051da177e4SLinus Torvalds 	unsigned block_in_page;
2406a4b0672dSNick Piggin 	unsigned block_start, block_end;
24071da177e4SLinus Torvalds 	sector_t block_in_file;
24081da177e4SLinus Torvalds 	int nr_reads = 0;
24091da177e4SLinus Torvalds 	int ret = 0;
24101da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
24111da177e4SLinus Torvalds 
241203158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
241303158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
241403158cd7SNick Piggin 	to = from + len;
241503158cd7SNick Piggin 
241654566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
241703158cd7SNick Piggin 	if (!page)
241803158cd7SNick Piggin 		return -ENOMEM;
241903158cd7SNick Piggin 	*pagep = page;
242003158cd7SNick Piggin 	*fsdata = NULL;
242103158cd7SNick Piggin 
242203158cd7SNick Piggin 	if (page_has_buffers(page)) {
242303158cd7SNick Piggin 		unlock_page(page);
242403158cd7SNick Piggin 		page_cache_release(page);
242503158cd7SNick Piggin 		*pagep = NULL;
242603158cd7SNick Piggin 		return block_write_begin(file, mapping, pos, len, flags, pagep,
242703158cd7SNick Piggin 					fsdata, get_block);
242803158cd7SNick Piggin 	}
2429a4b0672dSNick Piggin 
24301da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
24311da177e4SLinus Torvalds 		return 0;
24321da177e4SLinus Torvalds 
2433a4b0672dSNick Piggin 	/*
2434a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2435a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2436a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2437a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2438a4b0672dSNick Piggin 	 *
2439a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2440a4b0672dSNick Piggin 	 * than the circular one we're used to.
2441a4b0672dSNick Piggin 	 */
2442a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
244303158cd7SNick Piggin 	if (!head) {
244403158cd7SNick Piggin 		ret = -ENOMEM;
244503158cd7SNick Piggin 		goto out_release;
244603158cd7SNick Piggin 	}
2447a4b0672dSNick Piggin 
24481da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
24491da177e4SLinus Torvalds 
24501da177e4SLinus Torvalds 	/*
24511da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
24521da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
24531da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
24541da177e4SLinus Torvalds 	 */
2455a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
24561da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2457a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
24581da177e4SLinus Torvalds 		int create;
24591da177e4SLinus Torvalds 
2460a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2461a4b0672dSNick Piggin 		bh->b_state = 0;
24621da177e4SLinus Torvalds 		create = 1;
24631da177e4SLinus Torvalds 		if (block_start >= to)
24641da177e4SLinus Torvalds 			create = 0;
24651da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2466a4b0672dSNick Piggin 					bh, create);
24671da177e4SLinus Torvalds 		if (ret)
24681da177e4SLinus Torvalds 			goto failed;
2469a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
24701da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2471a4b0672dSNick Piggin 		if (buffer_new(bh))
2472a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2473a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2474a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
24751da177e4SLinus Torvalds 			continue;
2476a4b0672dSNick Piggin 		}
2477a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2478eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2479eebd2aa3SChristoph Lameter 							to, block_end);
24801da177e4SLinus Torvalds 			continue;
24811da177e4SLinus Torvalds 		}
2482a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
24831da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
24841da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2485a4b0672dSNick Piggin 			lock_buffer(bh);
2486a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2487a4b0672dSNick Piggin 			submit_bh(READ, bh);
2488a4b0672dSNick Piggin 			nr_reads++;
24891da177e4SLinus Torvalds 		}
24901da177e4SLinus Torvalds 	}
24911da177e4SLinus Torvalds 
24921da177e4SLinus Torvalds 	if (nr_reads) {
24931da177e4SLinus Torvalds 		/*
24941da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
24951da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
24961da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
24971da177e4SLinus Torvalds 		 */
2498a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
24991da177e4SLinus Torvalds 			wait_on_buffer(bh);
25001da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
25011da177e4SLinus Torvalds 				ret = -EIO;
25021da177e4SLinus Torvalds 		}
25031da177e4SLinus Torvalds 		if (ret)
25041da177e4SLinus Torvalds 			goto failed;
25051da177e4SLinus Torvalds 	}
25061da177e4SLinus Torvalds 
25071da177e4SLinus Torvalds 	if (is_mapped_to_disk)
25081da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
25091da177e4SLinus Torvalds 
251003158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2511a4b0672dSNick Piggin 
25121da177e4SLinus Torvalds 	return 0;
25131da177e4SLinus Torvalds 
25141da177e4SLinus Torvalds failed:
251503158cd7SNick Piggin 	BUG_ON(!ret);
25161da177e4SLinus Torvalds 	/*
2517a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2518a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2519a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2520a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2521a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
25221da177e4SLinus Torvalds 	 */
252303158cd7SNick Piggin 	attach_nobh_buffers(page, head);
252403158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2525a4b0672dSNick Piggin 
252603158cd7SNick Piggin out_release:
252703158cd7SNick Piggin 	unlock_page(page);
252803158cd7SNick Piggin 	page_cache_release(page);
252903158cd7SNick Piggin 	*pagep = NULL;
2530a4b0672dSNick Piggin 
253103158cd7SNick Piggin 	if (pos + len > inode->i_size)
253203158cd7SNick Piggin 		vmtruncate(inode, inode->i_size);
2533a4b0672dSNick Piggin 
25341da177e4SLinus Torvalds 	return ret;
25351da177e4SLinus Torvalds }
253603158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
25371da177e4SLinus Torvalds 
253803158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
253903158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
254003158cd7SNick Piggin 			struct page *page, void *fsdata)
25411da177e4SLinus Torvalds {
25421da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2543efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
254403158cd7SNick Piggin 	struct buffer_head *bh;
25455b41e74aSDmitri Monakhov 	BUG_ON(fsdata != NULL && page_has_buffers(page));
25461da177e4SLinus Torvalds 
2547d4cf109fSDave Kleikamp 	if (unlikely(copied < len) && head)
254803158cd7SNick Piggin 		attach_nobh_buffers(page, head);
2549a4b0672dSNick Piggin 	if (page_has_buffers(page))
255003158cd7SNick Piggin 		return generic_write_end(file, mapping, pos, len,
255103158cd7SNick Piggin 					copied, page, fsdata);
2552a4b0672dSNick Piggin 
255322c8ca78SNick Piggin 	SetPageUptodate(page);
25541da177e4SLinus Torvalds 	set_page_dirty(page);
255503158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
255603158cd7SNick Piggin 		i_size_write(inode, pos+copied);
25571da177e4SLinus Torvalds 		mark_inode_dirty(inode);
25581da177e4SLinus Torvalds 	}
255903158cd7SNick Piggin 
256003158cd7SNick Piggin 	unlock_page(page);
256103158cd7SNick Piggin 	page_cache_release(page);
256203158cd7SNick Piggin 
256303158cd7SNick Piggin 	while (head) {
256403158cd7SNick Piggin 		bh = head;
256503158cd7SNick Piggin 		head = head->b_this_page;
256603158cd7SNick Piggin 		free_buffer_head(bh);
25671da177e4SLinus Torvalds 	}
256803158cd7SNick Piggin 
256903158cd7SNick Piggin 	return copied;
257003158cd7SNick Piggin }
257103158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
25721da177e4SLinus Torvalds 
25731da177e4SLinus Torvalds /*
25741da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
25751da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
25761da177e4SLinus Torvalds  * the page.
25771da177e4SLinus Torvalds  */
25781da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
25791da177e4SLinus Torvalds 			struct writeback_control *wbc)
25801da177e4SLinus Torvalds {
25811da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
25821da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
25831da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
25841da177e4SLinus Torvalds 	unsigned offset;
25851da177e4SLinus Torvalds 	int ret;
25861da177e4SLinus Torvalds 
25871da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
25881da177e4SLinus Torvalds 	if (page->index < end_index)
25891da177e4SLinus Torvalds 		goto out;
25901da177e4SLinus Torvalds 
25911da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
25921da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
25931da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
25941da177e4SLinus Torvalds 		/*
25951da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
25961da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
25971da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
25981da177e4SLinus Torvalds 		 */
25991da177e4SLinus Torvalds #if 0
26001da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
26011da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
26021da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
26031da177e4SLinus Torvalds #endif
26041da177e4SLinus Torvalds 		unlock_page(page);
26051da177e4SLinus Torvalds 		return 0; /* don't care */
26061da177e4SLinus Torvalds 	}
26071da177e4SLinus Torvalds 
26081da177e4SLinus Torvalds 	/*
26091da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26101da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
26111da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26121da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26131da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26141da177e4SLinus Torvalds 	 */
2615eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
26161da177e4SLinus Torvalds out:
26171da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
26181da177e4SLinus Torvalds 	if (ret == -EAGAIN)
26191da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
26201da177e4SLinus Torvalds 	return ret;
26211da177e4SLinus Torvalds }
26221da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
26231da177e4SLinus Torvalds 
262403158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
262503158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
26261da177e4SLinus Torvalds {
26271da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
26281da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
262903158cd7SNick Piggin 	unsigned blocksize;
263003158cd7SNick Piggin 	sector_t iblock;
263103158cd7SNick Piggin 	unsigned length, pos;
263203158cd7SNick Piggin 	struct inode *inode = mapping->host;
26331da177e4SLinus Torvalds 	struct page *page;
263403158cd7SNick Piggin 	struct buffer_head map_bh;
263503158cd7SNick Piggin 	int err;
26361da177e4SLinus Torvalds 
263703158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
263803158cd7SNick Piggin 	length = offset & (blocksize - 1);
26391da177e4SLinus Torvalds 
264003158cd7SNick Piggin 	/* Block boundary? Nothing to do */
264103158cd7SNick Piggin 	if (!length)
264203158cd7SNick Piggin 		return 0;
264303158cd7SNick Piggin 
264403158cd7SNick Piggin 	length = blocksize - length;
264503158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
264603158cd7SNick Piggin 
26471da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
264803158cd7SNick Piggin 	err = -ENOMEM;
26491da177e4SLinus Torvalds 	if (!page)
26501da177e4SLinus Torvalds 		goto out;
26511da177e4SLinus Torvalds 
265203158cd7SNick Piggin 	if (page_has_buffers(page)) {
265303158cd7SNick Piggin has_buffers:
265403158cd7SNick Piggin 		unlock_page(page);
265503158cd7SNick Piggin 		page_cache_release(page);
265603158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
26571da177e4SLinus Torvalds 	}
265803158cd7SNick Piggin 
265903158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
266003158cd7SNick Piggin 	pos = blocksize;
266103158cd7SNick Piggin 	while (offset >= pos) {
266203158cd7SNick Piggin 		iblock++;
266303158cd7SNick Piggin 		pos += blocksize;
266403158cd7SNick Piggin 	}
266503158cd7SNick Piggin 
266603158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
266703158cd7SNick Piggin 	if (err)
266803158cd7SNick Piggin 		goto unlock;
266903158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
267003158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
267103158cd7SNick Piggin 		goto unlock;
267203158cd7SNick Piggin 
267303158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
267403158cd7SNick Piggin 	if (!PageUptodate(page)) {
267503158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
267603158cd7SNick Piggin 		if (err) {
267703158cd7SNick Piggin 			page_cache_release(page);
267803158cd7SNick Piggin 			goto out;
267903158cd7SNick Piggin 		}
268003158cd7SNick Piggin 		lock_page(page);
268103158cd7SNick Piggin 		if (!PageUptodate(page)) {
268203158cd7SNick Piggin 			err = -EIO;
268303158cd7SNick Piggin 			goto unlock;
268403158cd7SNick Piggin 		}
268503158cd7SNick Piggin 		if (page_has_buffers(page))
268603158cd7SNick Piggin 			goto has_buffers;
268703158cd7SNick Piggin 	}
2688eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
268903158cd7SNick Piggin 	set_page_dirty(page);
269003158cd7SNick Piggin 	err = 0;
269103158cd7SNick Piggin 
269203158cd7SNick Piggin unlock:
26931da177e4SLinus Torvalds 	unlock_page(page);
26941da177e4SLinus Torvalds 	page_cache_release(page);
26951da177e4SLinus Torvalds out:
269603158cd7SNick Piggin 	return err;
26971da177e4SLinus Torvalds }
26981da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
26991da177e4SLinus Torvalds 
27001da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
27011da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
27021da177e4SLinus Torvalds {
27031da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27041da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
27051da177e4SLinus Torvalds 	unsigned blocksize;
270654b21a79SAndrew Morton 	sector_t iblock;
27071da177e4SLinus Torvalds 	unsigned length, pos;
27081da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27091da177e4SLinus Torvalds 	struct page *page;
27101da177e4SLinus Torvalds 	struct buffer_head *bh;
27111da177e4SLinus Torvalds 	int err;
27121da177e4SLinus Torvalds 
27131da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
27141da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
27151da177e4SLinus Torvalds 
27161da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
27171da177e4SLinus Torvalds 	if (!length)
27181da177e4SLinus Torvalds 		return 0;
27191da177e4SLinus Torvalds 
27201da177e4SLinus Torvalds 	length = blocksize - length;
272154b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
27221da177e4SLinus Torvalds 
27231da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
27241da177e4SLinus Torvalds 	err = -ENOMEM;
27251da177e4SLinus Torvalds 	if (!page)
27261da177e4SLinus Torvalds 		goto out;
27271da177e4SLinus Torvalds 
27281da177e4SLinus Torvalds 	if (!page_has_buffers(page))
27291da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
27301da177e4SLinus Torvalds 
27311da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
27321da177e4SLinus Torvalds 	bh = page_buffers(page);
27331da177e4SLinus Torvalds 	pos = blocksize;
27341da177e4SLinus Torvalds 	while (offset >= pos) {
27351da177e4SLinus Torvalds 		bh = bh->b_this_page;
27361da177e4SLinus Torvalds 		iblock++;
27371da177e4SLinus Torvalds 		pos += blocksize;
27381da177e4SLinus Torvalds 	}
27391da177e4SLinus Torvalds 
27401da177e4SLinus Torvalds 	err = 0;
27411da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2742b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
27431da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
27441da177e4SLinus Torvalds 		if (err)
27451da177e4SLinus Torvalds 			goto unlock;
27461da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
27471da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
27481da177e4SLinus Torvalds 			goto unlock;
27491da177e4SLinus Torvalds 	}
27501da177e4SLinus Torvalds 
27511da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
27521da177e4SLinus Torvalds 	if (PageUptodate(page))
27531da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
27541da177e4SLinus Torvalds 
275533a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
27561da177e4SLinus Torvalds 		err = -EIO;
27571da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
27581da177e4SLinus Torvalds 		wait_on_buffer(bh);
27591da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
27601da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
27611da177e4SLinus Torvalds 			goto unlock;
27621da177e4SLinus Torvalds 	}
27631da177e4SLinus Torvalds 
2764eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
27651da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
27661da177e4SLinus Torvalds 	err = 0;
27671da177e4SLinus Torvalds 
27681da177e4SLinus Torvalds unlock:
27691da177e4SLinus Torvalds 	unlock_page(page);
27701da177e4SLinus Torvalds 	page_cache_release(page);
27711da177e4SLinus Torvalds out:
27721da177e4SLinus Torvalds 	return err;
27731da177e4SLinus Torvalds }
27741da177e4SLinus Torvalds 
27751da177e4SLinus Torvalds /*
27761da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
27771da177e4SLinus Torvalds  */
27781da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
27791da177e4SLinus Torvalds 			struct writeback_control *wbc)
27801da177e4SLinus Torvalds {
27811da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
27821da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27831da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
27841da177e4SLinus Torvalds 	unsigned offset;
27851da177e4SLinus Torvalds 
27861da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
27871da177e4SLinus Torvalds 	if (page->index < end_index)
27881da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
27891da177e4SLinus Torvalds 
27901da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
27911da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
27921da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
27931da177e4SLinus Torvalds 		/*
27941da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
27951da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
27961da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
27971da177e4SLinus Torvalds 		 */
2798aaa4059bSJan Kara 		do_invalidatepage(page, 0);
27991da177e4SLinus Torvalds 		unlock_page(page);
28001da177e4SLinus Torvalds 		return 0; /* don't care */
28011da177e4SLinus Torvalds 	}
28021da177e4SLinus Torvalds 
28031da177e4SLinus Torvalds 	/*
28041da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
28051da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
28061da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
28071da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
28081da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
28091da177e4SLinus Torvalds 	 */
2810eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
28111da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
28121da177e4SLinus Torvalds }
28131da177e4SLinus Torvalds 
28141da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
28151da177e4SLinus Torvalds 			    get_block_t *get_block)
28161da177e4SLinus Torvalds {
28171da177e4SLinus Torvalds 	struct buffer_head tmp;
28181da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
28191da177e4SLinus Torvalds 	tmp.b_state = 0;
28201da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2821b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
28221da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
28231da177e4SLinus Torvalds 	return tmp.b_blocknr;
28241da177e4SLinus Torvalds }
28251da177e4SLinus Torvalds 
28266712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
28271da177e4SLinus Torvalds {
28281da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
28291da177e4SLinus Torvalds 
28301da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
28311da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
28321da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
28331da177e4SLinus Torvalds 	}
28341da177e4SLinus Torvalds 
283508bafc03SKeith Mannthey 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
283608bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
283708bafc03SKeith Mannthey 
28381da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
28391da177e4SLinus Torvalds 	bio_put(bio);
28401da177e4SLinus Torvalds }
28411da177e4SLinus Torvalds 
28421da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
28431da177e4SLinus Torvalds {
28441da177e4SLinus Torvalds 	struct bio *bio;
28451da177e4SLinus Torvalds 	int ret = 0;
28461da177e4SLinus Torvalds 
28471da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
28481da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
28491da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
28501da177e4SLinus Torvalds 
285148fd4f93SJens Axboe 	/*
285248fd4f93SJens Axboe 	 * Mask in barrier bit for a write (could be either a WRITE or a
285348fd4f93SJens Axboe 	 * WRITE_SYNC
285448fd4f93SJens Axboe 	 */
285548fd4f93SJens Axboe 	if (buffer_ordered(bh) && (rw & WRITE))
285648fd4f93SJens Axboe 		rw |= WRITE_BARRIER;
28571da177e4SLinus Torvalds 
28581da177e4SLinus Torvalds 	/*
285948fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
28601da177e4SLinus Torvalds 	 */
286148fd4f93SJens Axboe 	if (test_set_buffer_req(bh) && (rw & WRITE))
28621da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
28631da177e4SLinus Torvalds 
28641da177e4SLinus Torvalds 	/*
28651da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
28661da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
28671da177e4SLinus Torvalds 	 */
28681da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
28691da177e4SLinus Torvalds 
28701da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
28711da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
28721da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
28731da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
28741da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
28751da177e4SLinus Torvalds 
28761da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
28771da177e4SLinus Torvalds 	bio->bi_idx = 0;
28781da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
28791da177e4SLinus Torvalds 
28801da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
28811da177e4SLinus Torvalds 	bio->bi_private = bh;
28821da177e4SLinus Torvalds 
28831da177e4SLinus Torvalds 	bio_get(bio);
28841da177e4SLinus Torvalds 	submit_bio(rw, bio);
28851da177e4SLinus Torvalds 
28861da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
28871da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
28881da177e4SLinus Torvalds 
28891da177e4SLinus Torvalds 	bio_put(bio);
28901da177e4SLinus Torvalds 	return ret;
28911da177e4SLinus Torvalds }
28921da177e4SLinus Torvalds 
28931da177e4SLinus Torvalds /**
28941da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2895a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
28961da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
28971da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
28981da177e4SLinus Torvalds  *
2899a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2900a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2901a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2902a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2903a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
29041da177e4SLinus Torvalds  *
29051da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2906a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2907a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2908a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2909a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2910a7662236SJan Kara  * actually clean until the buffer gets unlocked).
29111da177e4SLinus Torvalds  *
29121da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
29131da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
29141da177e4SLinus Torvalds  * any waiters.
29151da177e4SLinus Torvalds  *
29161da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
29171da177e4SLinus Torvalds  * multiple of the current approved size for the device.
29181da177e4SLinus Torvalds  */
29191da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
29201da177e4SLinus Torvalds {
29211da177e4SLinus Torvalds 	int i;
29221da177e4SLinus Torvalds 
29231da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
29241da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
29251da177e4SLinus Torvalds 
292618ce3751SJens Axboe 		if (rw == SWRITE || rw == SWRITE_SYNC)
2927a7662236SJan Kara 			lock_buffer(bh);
2928ca5de404SNick Piggin 		else if (!trylock_buffer(bh))
29291da177e4SLinus Torvalds 			continue;
29301da177e4SLinus Torvalds 
293118ce3751SJens Axboe 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
29321da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
293376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2934e60e5c50SOGAWA Hirofumi 				get_bh(bh);
293518ce3751SJens Axboe 				if (rw == SWRITE_SYNC)
293618ce3751SJens Axboe 					submit_bh(WRITE_SYNC, bh);
293718ce3751SJens Axboe 				else
29381da177e4SLinus Torvalds 					submit_bh(WRITE, bh);
29391da177e4SLinus Torvalds 				continue;
29401da177e4SLinus Torvalds 			}
29411da177e4SLinus Torvalds 		} else {
29421da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
294376c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2944e60e5c50SOGAWA Hirofumi 				get_bh(bh);
29451da177e4SLinus Torvalds 				submit_bh(rw, bh);
29461da177e4SLinus Torvalds 				continue;
29471da177e4SLinus Torvalds 			}
29481da177e4SLinus Torvalds 		}
29491da177e4SLinus Torvalds 		unlock_buffer(bh);
29501da177e4SLinus Torvalds 	}
29511da177e4SLinus Torvalds }
29521da177e4SLinus Torvalds 
29531da177e4SLinus Torvalds /*
29541da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
29551da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
29561da177e4SLinus Torvalds  * the buffer_head.
29571da177e4SLinus Torvalds  */
29581da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
29591da177e4SLinus Torvalds {
29601da177e4SLinus Torvalds 	int ret = 0;
29611da177e4SLinus Torvalds 
29621da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
29631da177e4SLinus Torvalds 	lock_buffer(bh);
29641da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
29651da177e4SLinus Torvalds 		get_bh(bh);
29661da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
296778f707bfSJens Axboe 		ret = submit_bh(WRITE, bh);
29681da177e4SLinus Torvalds 		wait_on_buffer(bh);
29691da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
29701da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
29711da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
29721da177e4SLinus Torvalds 		}
29731da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
29741da177e4SLinus Torvalds 			ret = -EIO;
29751da177e4SLinus Torvalds 	} else {
29761da177e4SLinus Torvalds 		unlock_buffer(bh);
29771da177e4SLinus Torvalds 	}
29781da177e4SLinus Torvalds 	return ret;
29791da177e4SLinus Torvalds }
29801da177e4SLinus Torvalds 
29811da177e4SLinus Torvalds /*
29821da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
29831da177e4SLinus Torvalds  * are unused, and releases them if so.
29841da177e4SLinus Torvalds  *
29851da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
29861da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
29871da177e4SLinus Torvalds  *
29881da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
29891da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
29901da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
29911da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
29921da177e4SLinus Torvalds  * filesystem data on the same device.
29931da177e4SLinus Torvalds  *
29941da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
29951da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
29961da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
29971da177e4SLinus Torvalds  * private_lock.
29981da177e4SLinus Torvalds  *
29991da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
30001da177e4SLinus Torvalds  */
30011da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
30021da177e4SLinus Torvalds {
30031da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
30041da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
30051da177e4SLinus Torvalds }
30061da177e4SLinus Torvalds 
30071da177e4SLinus Torvalds static int
30081da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
30091da177e4SLinus Torvalds {
30101da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
30111da177e4SLinus Torvalds 	struct buffer_head *bh;
30121da177e4SLinus Torvalds 
30131da177e4SLinus Torvalds 	bh = head;
30141da177e4SLinus Torvalds 	do {
3015de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
30161da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
30171da177e4SLinus Torvalds 		if (buffer_busy(bh))
30181da177e4SLinus Torvalds 			goto failed;
30191da177e4SLinus Torvalds 		bh = bh->b_this_page;
30201da177e4SLinus Torvalds 	} while (bh != head);
30211da177e4SLinus Torvalds 
30221da177e4SLinus Torvalds 	do {
30231da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
30241da177e4SLinus Torvalds 
3025535ee2fbSJan Kara 		if (bh->b_assoc_map)
30261da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
30271da177e4SLinus Torvalds 		bh = next;
30281da177e4SLinus Torvalds 	} while (bh != head);
30291da177e4SLinus Torvalds 	*buffers_to_free = head;
30301da177e4SLinus Torvalds 	__clear_page_buffers(page);
30311da177e4SLinus Torvalds 	return 1;
30321da177e4SLinus Torvalds failed:
30331da177e4SLinus Torvalds 	return 0;
30341da177e4SLinus Torvalds }
30351da177e4SLinus Torvalds 
30361da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
30371da177e4SLinus Torvalds {
30381da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
30391da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
30401da177e4SLinus Torvalds 	int ret = 0;
30411da177e4SLinus Torvalds 
30421da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3043ecdfc978SLinus Torvalds 	if (PageWriteback(page))
30441da177e4SLinus Torvalds 		return 0;
30451da177e4SLinus Torvalds 
30461da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
30471da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
30481da177e4SLinus Torvalds 		goto out;
30491da177e4SLinus Torvalds 	}
30501da177e4SLinus Torvalds 
30511da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
30521da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3053ecdfc978SLinus Torvalds 
3054ecdfc978SLinus Torvalds 	/*
3055ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3056ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3057ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3058ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3059ecdfc978SLinus Torvalds 	 *
3060ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3061ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3062ecdfc978SLinus Torvalds 	 * the page also.
306387df7241SNick Piggin 	 *
306487df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
306587df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
306687df7241SNick Piggin 	 * dirty bit from being lost.
3067ecdfc978SLinus Torvalds 	 */
3068ecdfc978SLinus Torvalds 	if (ret)
3069ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
307087df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
30711da177e4SLinus Torvalds out:
30721da177e4SLinus Torvalds 	if (buffers_to_free) {
30731da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
30741da177e4SLinus Torvalds 
30751da177e4SLinus Torvalds 		do {
30761da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
30771da177e4SLinus Torvalds 			free_buffer_head(bh);
30781da177e4SLinus Torvalds 			bh = next;
30791da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
30801da177e4SLinus Torvalds 	}
30811da177e4SLinus Torvalds 	return ret;
30821da177e4SLinus Torvalds }
30831da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
30841da177e4SLinus Torvalds 
30853978d717SNeilBrown void block_sync_page(struct page *page)
30861da177e4SLinus Torvalds {
30871da177e4SLinus Torvalds 	struct address_space *mapping;
30881da177e4SLinus Torvalds 
30891da177e4SLinus Torvalds 	smp_mb();
30901da177e4SLinus Torvalds 	mapping = page_mapping(page);
30911da177e4SLinus Torvalds 	if (mapping)
30921da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
30931da177e4SLinus Torvalds }
30941da177e4SLinus Torvalds 
30951da177e4SLinus Torvalds /*
30961da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
30971da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
30981da177e4SLinus Torvalds  *
30991da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
31001da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
31011da177e4SLinus Torvalds  */
3102bdc480e3SHeiko Carstens SYSCALL_DEFINE2(bdflush, int, func, long, data)
31031da177e4SLinus Torvalds {
31041da177e4SLinus Torvalds 	static int msg_count;
31051da177e4SLinus Torvalds 
31061da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
31071da177e4SLinus Torvalds 		return -EPERM;
31081da177e4SLinus Torvalds 
31091da177e4SLinus Torvalds 	if (msg_count < 5) {
31101da177e4SLinus Torvalds 		msg_count++;
31111da177e4SLinus Torvalds 		printk(KERN_INFO
31121da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
31131da177e4SLinus Torvalds 			" system call\n", current->comm);
31141da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
31151da177e4SLinus Torvalds 	}
31161da177e4SLinus Torvalds 
31171da177e4SLinus Torvalds 	if (func == 1)
31181da177e4SLinus Torvalds 		do_exit(0);
31191da177e4SLinus Torvalds 	return 0;
31201da177e4SLinus Torvalds }
31211da177e4SLinus Torvalds 
31221da177e4SLinus Torvalds /*
31231da177e4SLinus Torvalds  * Buffer-head allocation
31241da177e4SLinus Torvalds  */
3125e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
31261da177e4SLinus Torvalds 
31271da177e4SLinus Torvalds /*
31281da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
31291da177e4SLinus Torvalds  * stripping them in writeback.
31301da177e4SLinus Torvalds  */
31311da177e4SLinus Torvalds static int max_buffer_heads;
31321da177e4SLinus Torvalds 
31331da177e4SLinus Torvalds int buffer_heads_over_limit;
31341da177e4SLinus Torvalds 
31351da177e4SLinus Torvalds struct bh_accounting {
31361da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
31371da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
31381da177e4SLinus Torvalds };
31391da177e4SLinus Torvalds 
31401da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
31411da177e4SLinus Torvalds 
31421da177e4SLinus Torvalds static void recalc_bh_state(void)
31431da177e4SLinus Torvalds {
31441da177e4SLinus Torvalds 	int i;
31451da177e4SLinus Torvalds 	int tot = 0;
31461da177e4SLinus Torvalds 
31471da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
31481da177e4SLinus Torvalds 		return;
31491da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
31508a143426SEric Dumazet 	for_each_online_cpu(i)
31511da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
31521da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
31531da177e4SLinus Torvalds }
31541da177e4SLinus Torvalds 
3155dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
31561da177e4SLinus Torvalds {
3157488514d1SChristoph Lameter 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
31581da177e4SLinus Torvalds 	if (ret) {
3159a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3160736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
31611da177e4SLinus Torvalds 		recalc_bh_state();
3162736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
31631da177e4SLinus Torvalds 	}
31641da177e4SLinus Torvalds 	return ret;
31651da177e4SLinus Torvalds }
31661da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
31671da177e4SLinus Torvalds 
31681da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
31691da177e4SLinus Torvalds {
31701da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
31711da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3172736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
31731da177e4SLinus Torvalds 	recalc_bh_state();
3174736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
31751da177e4SLinus Torvalds }
31761da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
31771da177e4SLinus Torvalds 
31781da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
31791da177e4SLinus Torvalds {
31801da177e4SLinus Torvalds 	int i;
31811da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
31821da177e4SLinus Torvalds 
31831da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
31841da177e4SLinus Torvalds 		brelse(b->bhs[i]);
31851da177e4SLinus Torvalds 		b->bhs[i] = NULL;
31861da177e4SLinus Torvalds 	}
31878a143426SEric Dumazet 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
31888a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
31898a143426SEric Dumazet 	put_cpu_var(bh_accounting);
31901da177e4SLinus Torvalds }
31911da177e4SLinus Torvalds 
31921da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
31931da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
31941da177e4SLinus Torvalds {
31958bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
31961da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
31971da177e4SLinus Torvalds 	return NOTIFY_OK;
31981da177e4SLinus Torvalds }
31991da177e4SLinus Torvalds 
3200389d1b08SAneesh Kumar K.V /**
3201a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3202389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3203389d1b08SAneesh Kumar K.V  *
3204389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3205389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3206389d1b08SAneesh Kumar K.V  */
3207389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3208389d1b08SAneesh Kumar K.V {
3209389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3210389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3211389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3212389d1b08SAneesh Kumar K.V 			return 0;
3213389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3214389d1b08SAneesh Kumar K.V 	}
3215389d1b08SAneesh Kumar K.V 	return 1;
3216389d1b08SAneesh Kumar K.V }
3217389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3218389d1b08SAneesh Kumar K.V 
3219389d1b08SAneesh Kumar K.V /**
3220a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3221389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3222389d1b08SAneesh Kumar K.V  *
3223389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3224389d1b08SAneesh Kumar K.V  */
3225389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3226389d1b08SAneesh Kumar K.V {
3227389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3228389d1b08SAneesh Kumar K.V 
3229389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3230389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3231389d1b08SAneesh Kumar K.V 		return 0;
3232389d1b08SAneesh Kumar K.V 	}
3233389d1b08SAneesh Kumar K.V 
3234389d1b08SAneesh Kumar K.V 	get_bh(bh);
3235389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3236389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3237389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3238389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3239389d1b08SAneesh Kumar K.V 		return 0;
3240389d1b08SAneesh Kumar K.V 	return -EIO;
3241389d1b08SAneesh Kumar K.V }
3242389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3243389d1b08SAneesh Kumar K.V 
3244b98938c3SChristoph Lameter static void
324551cc5068SAlexey Dobriyan init_buffer_head(void *data)
3246b98938c3SChristoph Lameter {
3247b98938c3SChristoph Lameter 	struct buffer_head *bh = data;
3248b98938c3SChristoph Lameter 
3249b98938c3SChristoph Lameter 	memset(bh, 0, sizeof(*bh));
3250b98938c3SChristoph Lameter 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3251b98938c3SChristoph Lameter }
3252b98938c3SChristoph Lameter 
32531da177e4SLinus Torvalds void __init buffer_init(void)
32541da177e4SLinus Torvalds {
32551da177e4SLinus Torvalds 	int nrpages;
32561da177e4SLinus Torvalds 
3257b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3258b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3259b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3260b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3261b98938c3SChristoph Lameter 				init_buffer_head);
32621da177e4SLinus Torvalds 
32631da177e4SLinus Torvalds 	/*
32641da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
32651da177e4SLinus Torvalds 	 */
32661da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
32671da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
32681da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
32691da177e4SLinus Torvalds }
32701da177e4SLinus Torvalds 
32711da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
32721da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
32731da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
32741da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
32751da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
327654171690SDavid Chinner EXPORT_SYMBOL(block_page_mkwrite);
32771da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
32781da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
32791da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
32801da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
328189e10787SNick Piggin EXPORT_SYMBOL(cont_write_begin);
32821da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
32831da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
32841da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
32851da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
32861da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
328705eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
32881da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
32891da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
32901da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
32911da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
32921da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
32931da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
32941da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3295