xref: /linux/fs/buffer.c (revision 721a9602e6607417c6bc15b18e97a2f35266c690)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/kernel.h>
221da177e4SLinus Torvalds #include <linux/syscalls.h>
231da177e4SLinus Torvalds #include <linux/fs.h>
241da177e4SLinus Torvalds #include <linux/mm.h>
251da177e4SLinus Torvalds #include <linux/percpu.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
2716f7e0feSRandy Dunlap #include <linux/capability.h>
281da177e4SLinus Torvalds #include <linux/blkdev.h>
291da177e4SLinus Torvalds #include <linux/file.h>
301da177e4SLinus Torvalds #include <linux/quotaops.h>
311da177e4SLinus Torvalds #include <linux/highmem.h>
321da177e4SLinus Torvalds #include <linux/module.h>
331da177e4SLinus Torvalds #include <linux/writeback.h>
341da177e4SLinus Torvalds #include <linux/hash.h>
351da177e4SLinus Torvalds #include <linux/suspend.h>
361da177e4SLinus Torvalds #include <linux/buffer_head.h>
3755e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
381da177e4SLinus Torvalds #include <linux/bio.h>
391da177e4SLinus Torvalds #include <linux/notifier.h>
401da177e4SLinus Torvalds #include <linux/cpu.h>
411da177e4SLinus Torvalds #include <linux/bitops.h>
421da177e4SLinus Torvalds #include <linux/mpage.h>
43fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds inline void
501da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
511da177e4SLinus Torvalds {
521da177e4SLinus Torvalds 	bh->b_end_io = handler;
531da177e4SLinus Torvalds 	bh->b_private = private;
541da177e4SLinus Torvalds }
551fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(init_buffer);
561da177e4SLinus Torvalds 
577eaceaccSJens Axboe static int sleep_on_buffer(void *word)
581da177e4SLinus Torvalds {
591da177e4SLinus Torvalds 	io_schedule();
601da177e4SLinus Torvalds 	return 0;
611da177e4SLinus Torvalds }
621da177e4SLinus Torvalds 
63fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
641da177e4SLinus Torvalds {
657eaceaccSJens Axboe 	wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
661da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
671da177e4SLinus Torvalds }
681da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
691da177e4SLinus Torvalds 
70fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
711da177e4SLinus Torvalds {
7251b07fc3SNick Piggin 	clear_bit_unlock(BH_Lock, &bh->b_state);
731da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
741da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
751da177e4SLinus Torvalds }
761fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /*
791da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
801da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
811da177e4SLinus Torvalds  * if you want to preserve its state.
821da177e4SLinus Torvalds  */
831da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
841da177e4SLinus Torvalds {
857eaceaccSJens Axboe 	wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
861da177e4SLinus Torvalds }
871fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds static void
901da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
911da177e4SLinus Torvalds {
921da177e4SLinus Torvalds 	ClearPagePrivate(page);
934c21e2f2SHugh Dickins 	set_page_private(page, 0);
941da177e4SLinus Torvalds 	page_cache_release(page);
951da177e4SLinus Torvalds }
961da177e4SLinus Torvalds 
9708bafc03SKeith Mannthey 
9808bafc03SKeith Mannthey static int quiet_error(struct buffer_head *bh)
9908bafc03SKeith Mannthey {
10008bafc03SKeith Mannthey 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
10108bafc03SKeith Mannthey 		return 0;
10208bafc03SKeith Mannthey 	return 1;
10308bafc03SKeith Mannthey }
10408bafc03SKeith Mannthey 
10508bafc03SKeith Mannthey 
1061da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1071da177e4SLinus Torvalds {
1081da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1091da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1101da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1111da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1121da177e4SLinus Torvalds }
1131da177e4SLinus Torvalds 
1141da177e4SLinus Torvalds /*
11568671f35SDmitry Monakhov  * End-of-IO handler helper function which does not touch the bh after
11668671f35SDmitry Monakhov  * unlocking it.
11768671f35SDmitry Monakhov  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
11868671f35SDmitry Monakhov  * a race there is benign: unlock_buffer() only use the bh's address for
11968671f35SDmitry Monakhov  * hashing after unlocking the buffer, so it doesn't actually touch the bh
12068671f35SDmitry Monakhov  * itself.
1211da177e4SLinus Torvalds  */
12268671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1231da177e4SLinus Torvalds {
1241da177e4SLinus Torvalds 	if (uptodate) {
1251da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1261da177e4SLinus Torvalds 	} else {
1271da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1281da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1291da177e4SLinus Torvalds 	}
1301da177e4SLinus Torvalds 	unlock_buffer(bh);
13168671f35SDmitry Monakhov }
13268671f35SDmitry Monakhov 
13368671f35SDmitry Monakhov /*
13468671f35SDmitry Monakhov  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
13568671f35SDmitry Monakhov  * unlock the buffer. This is what ll_rw_block uses too.
13668671f35SDmitry Monakhov  */
13768671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
13868671f35SDmitry Monakhov {
13968671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
1401da177e4SLinus Torvalds 	put_bh(bh);
1411da177e4SLinus Torvalds }
1421fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1431da177e4SLinus Torvalds 
1441da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds 	if (uptodate) {
1491da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1501da177e4SLinus Torvalds 	} else {
1510edd55faSChristoph Hellwig 		if (!quiet_error(bh)) {
1521da177e4SLinus Torvalds 			buffer_io_error(bh);
1531da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1541da177e4SLinus Torvalds 					"I/O error on %s\n",
1551da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1561da177e4SLinus Torvalds 		}
1571da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1581da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1591da177e4SLinus Torvalds 	}
1601da177e4SLinus Torvalds 	unlock_buffer(bh);
1611da177e4SLinus Torvalds 	put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds /*
1661da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
1671da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
1681da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
1691da177e4SLinus Torvalds  * private_lock.
1701da177e4SLinus Torvalds  *
1711da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
1721da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
1731da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
1741da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
1751da177e4SLinus Torvalds  */
1761da177e4SLinus Torvalds static struct buffer_head *
177385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1781da177e4SLinus Torvalds {
1791da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
1801da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
1811da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
1821da177e4SLinus Torvalds 	pgoff_t index;
1831da177e4SLinus Torvalds 	struct buffer_head *bh;
1841da177e4SLinus Torvalds 	struct buffer_head *head;
1851da177e4SLinus Torvalds 	struct page *page;
1861da177e4SLinus Torvalds 	int all_mapped = 1;
1871da177e4SLinus Torvalds 
1881da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
1891da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
1901da177e4SLinus Torvalds 	if (!page)
1911da177e4SLinus Torvalds 		goto out;
1921da177e4SLinus Torvalds 
1931da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
1941da177e4SLinus Torvalds 	if (!page_has_buffers(page))
1951da177e4SLinus Torvalds 		goto out_unlock;
1961da177e4SLinus Torvalds 	head = page_buffers(page);
1971da177e4SLinus Torvalds 	bh = head;
1981da177e4SLinus Torvalds 	do {
19997f76d3dSNikanth Karthikesan 		if (!buffer_mapped(bh))
20097f76d3dSNikanth Karthikesan 			all_mapped = 0;
20197f76d3dSNikanth Karthikesan 		else if (bh->b_blocknr == block) {
2021da177e4SLinus Torvalds 			ret = bh;
2031da177e4SLinus Torvalds 			get_bh(bh);
2041da177e4SLinus Torvalds 			goto out_unlock;
2051da177e4SLinus Torvalds 		}
2061da177e4SLinus Torvalds 		bh = bh->b_this_page;
2071da177e4SLinus Torvalds 	} while (bh != head);
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
2101da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
2111da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
2121da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
2131da177e4SLinus Torvalds 	 */
2141da177e4SLinus Torvalds 	if (all_mapped) {
2151da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
2161da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
217205f87f6SBadari Pulavarty 			(unsigned long long)block,
218205f87f6SBadari Pulavarty 			(unsigned long long)bh->b_blocknr);
219205f87f6SBadari Pulavarty 		printk("b_state=0x%08lx, b_size=%zu\n",
220205f87f6SBadari Pulavarty 			bh->b_state, bh->b_size);
2211da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
2221da177e4SLinus Torvalds 	}
2231da177e4SLinus Torvalds out_unlock:
2241da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
2251da177e4SLinus Torvalds 	page_cache_release(page);
2261da177e4SLinus Torvalds out:
2271da177e4SLinus Torvalds 	return ret;
2281da177e4SLinus Torvalds }
2291da177e4SLinus Torvalds 
2301da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
2311da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
2321da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
2331da177e4SLinus Torvalds    by the user.
2341da177e4SLinus Torvalds 
2351da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
2361da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
2371da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
2381da177e4SLinus Torvalds 
2391da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
2401da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
2431da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
2441da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
2451da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
2461da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
2471da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
2481da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
2491da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
2501da177e4SLinus Torvalds 
2511da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
2521da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
2531da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
2541da177e4SLinus Torvalds 
2551da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
2561da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
2571da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
2581da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
2591da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
2601da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
2611da177e4SLinus Torvalds    pass does the actual I/O. */
262f98393a6SPeter Zijlstra void invalidate_bdev(struct block_device *bdev)
2631da177e4SLinus Torvalds {
2640e1dfc66SAndrew Morton 	struct address_space *mapping = bdev->bd_inode->i_mapping;
2650e1dfc66SAndrew Morton 
2660e1dfc66SAndrew Morton 	if (mapping->nrpages == 0)
2670e1dfc66SAndrew Morton 		return;
2680e1dfc66SAndrew Morton 
2691da177e4SLinus Torvalds 	invalidate_bh_lrus();
270fa4b9074STejun Heo 	lru_add_drain_all();	/* make sure all lru add caches are flushed */
271fc0ecff6SAndrew Morton 	invalidate_mapping_pages(mapping, 0, -1);
2721da177e4SLinus Torvalds }
2731fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(invalidate_bdev);
2741da177e4SLinus Torvalds 
2751da177e4SLinus Torvalds /*
2765b0830cbSJens Axboe  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
2771da177e4SLinus Torvalds  */
2781da177e4SLinus Torvalds static void free_more_memory(void)
2791da177e4SLinus Torvalds {
28019770b32SMel Gorman 	struct zone *zone;
2810e88460dSMel Gorman 	int nid;
2821da177e4SLinus Torvalds 
28303ba3782SJens Axboe 	wakeup_flusher_threads(1024);
2841da177e4SLinus Torvalds 	yield();
2851da177e4SLinus Torvalds 
2860e88460dSMel Gorman 	for_each_online_node(nid) {
28719770b32SMel Gorman 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
28819770b32SMel Gorman 						gfp_zone(GFP_NOFS), NULL,
28919770b32SMel Gorman 						&zone);
29019770b32SMel Gorman 		if (zone)
29154a6eb5cSMel Gorman 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
292327c0e96SKAMEZAWA Hiroyuki 						GFP_NOFS, NULL);
2931da177e4SLinus Torvalds 	}
2941da177e4SLinus Torvalds }
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds /*
2971da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
2981da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
2991da177e4SLinus Torvalds  */
3001da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
3011da177e4SLinus Torvalds {
3021da177e4SLinus Torvalds 	unsigned long flags;
303a3972203SNick Piggin 	struct buffer_head *first;
3041da177e4SLinus Torvalds 	struct buffer_head *tmp;
3051da177e4SLinus Torvalds 	struct page *page;
3061da177e4SLinus Torvalds 	int page_uptodate = 1;
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
3091da177e4SLinus Torvalds 
3101da177e4SLinus Torvalds 	page = bh->b_page;
3111da177e4SLinus Torvalds 	if (uptodate) {
3121da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3131da177e4SLinus Torvalds 	} else {
3141da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
31508bafc03SKeith Mannthey 		if (!quiet_error(bh))
3161da177e4SLinus Torvalds 			buffer_io_error(bh);
3171da177e4SLinus Torvalds 		SetPageError(page);
3181da177e4SLinus Torvalds 	}
3191da177e4SLinus Torvalds 
3201da177e4SLinus Torvalds 	/*
3211da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
3221da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
3231da177e4SLinus Torvalds 	 * decide that the page is now completely done.
3241da177e4SLinus Torvalds 	 */
325a3972203SNick Piggin 	first = page_buffers(page);
326a3972203SNick Piggin 	local_irq_save(flags);
327a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
3281da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
3291da177e4SLinus Torvalds 	unlock_buffer(bh);
3301da177e4SLinus Torvalds 	tmp = bh;
3311da177e4SLinus Torvalds 	do {
3321da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
3331da177e4SLinus Torvalds 			page_uptodate = 0;
3341da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
3351da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3361da177e4SLinus Torvalds 			goto still_busy;
3371da177e4SLinus Torvalds 		}
3381da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
3391da177e4SLinus Torvalds 	} while (tmp != bh);
340a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
341a3972203SNick Piggin 	local_irq_restore(flags);
3421da177e4SLinus Torvalds 
3431da177e4SLinus Torvalds 	/*
3441da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
3451da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
3461da177e4SLinus Torvalds 	 */
3471da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
3481da177e4SLinus Torvalds 		SetPageUptodate(page);
3491da177e4SLinus Torvalds 	unlock_page(page);
3501da177e4SLinus Torvalds 	return;
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds still_busy:
353a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
354a3972203SNick Piggin 	local_irq_restore(flags);
3551da177e4SLinus Torvalds 	return;
3561da177e4SLinus Torvalds }
3571da177e4SLinus Torvalds 
3581da177e4SLinus Torvalds /*
3591da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
3601da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
3611da177e4SLinus Torvalds  */
36235c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3631da177e4SLinus Torvalds {
3641da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
3651da177e4SLinus Torvalds 	unsigned long flags;
366a3972203SNick Piggin 	struct buffer_head *first;
3671da177e4SLinus Torvalds 	struct buffer_head *tmp;
3681da177e4SLinus Torvalds 	struct page *page;
3691da177e4SLinus Torvalds 
3701da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
3711da177e4SLinus Torvalds 
3721da177e4SLinus Torvalds 	page = bh->b_page;
3731da177e4SLinus Torvalds 	if (uptodate) {
3741da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
3751da177e4SLinus Torvalds 	} else {
37608bafc03SKeith Mannthey 		if (!quiet_error(bh)) {
3771da177e4SLinus Torvalds 			buffer_io_error(bh);
3781da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
3791da177e4SLinus Torvalds 					"I/O error on %s\n",
3801da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
3811da177e4SLinus Torvalds 		}
3821da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
38358ff407bSJan Kara 		set_buffer_write_io_error(bh);
3841da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
3851da177e4SLinus Torvalds 		SetPageError(page);
3861da177e4SLinus Torvalds 	}
3871da177e4SLinus Torvalds 
388a3972203SNick Piggin 	first = page_buffers(page);
389a3972203SNick Piggin 	local_irq_save(flags);
390a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
391a3972203SNick Piggin 
3921da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
3931da177e4SLinus Torvalds 	unlock_buffer(bh);
3941da177e4SLinus Torvalds 	tmp = bh->b_this_page;
3951da177e4SLinus Torvalds 	while (tmp != bh) {
3961da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
3971da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
3981da177e4SLinus Torvalds 			goto still_busy;
3991da177e4SLinus Torvalds 		}
4001da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
4011da177e4SLinus Torvalds 	}
402a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
403a3972203SNick Piggin 	local_irq_restore(flags);
4041da177e4SLinus Torvalds 	end_page_writeback(page);
4051da177e4SLinus Torvalds 	return;
4061da177e4SLinus Torvalds 
4071da177e4SLinus Torvalds still_busy:
408a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
409a3972203SNick Piggin 	local_irq_restore(flags);
4101da177e4SLinus Torvalds 	return;
4111da177e4SLinus Torvalds }
4121fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write);
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds /*
4151da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
4161da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
4171da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
4181da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
4191da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
4201da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
4211da177e4SLinus Torvalds  * that this buffer is not under async I/O.
4221da177e4SLinus Torvalds  *
4231da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
4241da177e4SLinus Torvalds  * left.
4251da177e4SLinus Torvalds  *
4261da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
4271da177e4SLinus Torvalds  * the buffers.
4281da177e4SLinus Torvalds  *
4291da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
4301da177e4SLinus Torvalds  * page.
4311da177e4SLinus Torvalds  *
4321da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
4331da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
4341da177e4SLinus Torvalds  */
4351da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4361da177e4SLinus Torvalds {
4371da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
4381da177e4SLinus Torvalds 	set_buffer_async_read(bh);
4391da177e4SLinus Torvalds }
4401da177e4SLinus Torvalds 
4411fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
44235c80d5fSChris Mason 					  bh_end_io_t *handler)
44335c80d5fSChris Mason {
44435c80d5fSChris Mason 	bh->b_end_io = handler;
44535c80d5fSChris Mason 	set_buffer_async_write(bh);
44635c80d5fSChris Mason }
44735c80d5fSChris Mason 
4481da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4491da177e4SLinus Torvalds {
45035c80d5fSChris Mason 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
4511da177e4SLinus Torvalds }
4521da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds 
4551da177e4SLinus Torvalds /*
4561da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
4571da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
4581da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
4591da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
4601da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
4611da177e4SLinus Torvalds  *
4621da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4631da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4641da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
4651da177e4SLinus Torvalds  *
4661da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
4671da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
4681da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
4691da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
4701da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
4711da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
4721da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
4731da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
4741da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
4751da177e4SLinus Torvalds  * ->private_lock.
4761da177e4SLinus Torvalds  *
4771da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
4781da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
4791da177e4SLinus Torvalds  *
4801da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
4811da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
4821da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
4831da177e4SLinus Torvalds  * be true at clear_inode() time.
4841da177e4SLinus Torvalds  *
4851da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
4861da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
4871da177e4SLinus Torvalds  * BUG_ON(!list_empty).
4881da177e4SLinus Torvalds  *
4891da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
4901da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
4911da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
4921da177e4SLinus Torvalds  * queued up.
4931da177e4SLinus Torvalds  *
4941da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
4951da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
4961da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
4971da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
4981da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
4991da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
5001da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
5011da177e4SLinus Torvalds  * b_inode back.
5021da177e4SLinus Torvalds  */
5031da177e4SLinus Torvalds 
5041da177e4SLinus Torvalds /*
5051da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
5061da177e4SLinus Torvalds  */
507dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5081da177e4SLinus Torvalds {
5091da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
51058ff407bSJan Kara 	WARN_ON(!bh->b_assoc_map);
51158ff407bSJan Kara 	if (buffer_write_io_error(bh))
51258ff407bSJan Kara 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
51358ff407bSJan Kara 	bh->b_assoc_map = NULL;
5141da177e4SLinus Torvalds }
5151da177e4SLinus Torvalds 
5161da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5171da177e4SLinus Torvalds {
5181da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
5191da177e4SLinus Torvalds }
5201da177e4SLinus Torvalds 
5211da177e4SLinus Torvalds /*
5221da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
5231da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
5241da177e4SLinus Torvalds  * writes to the disk.
5251da177e4SLinus Torvalds  *
5261da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
5271da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
5281da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
5291da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
5301da177e4SLinus Torvalds  */
5311da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5321da177e4SLinus Torvalds {
5331da177e4SLinus Torvalds 	struct buffer_head *bh;
5341da177e4SLinus Torvalds 	struct list_head *p;
5351da177e4SLinus Torvalds 	int err = 0;
5361da177e4SLinus Torvalds 
5371da177e4SLinus Torvalds 	spin_lock(lock);
5381da177e4SLinus Torvalds repeat:
5391da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
5401da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
5411da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
5421da177e4SLinus Torvalds 			get_bh(bh);
5431da177e4SLinus Torvalds 			spin_unlock(lock);
5441da177e4SLinus Torvalds 			wait_on_buffer(bh);
5451da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
5461da177e4SLinus Torvalds 				err = -EIO;
5471da177e4SLinus Torvalds 			brelse(bh);
5481da177e4SLinus Torvalds 			spin_lock(lock);
5491da177e4SLinus Torvalds 			goto repeat;
5501da177e4SLinus Torvalds 		}
5511da177e4SLinus Torvalds 	}
5521da177e4SLinus Torvalds 	spin_unlock(lock);
5531da177e4SLinus Torvalds 	return err;
5541da177e4SLinus Torvalds }
5551da177e4SLinus Torvalds 
55601a05b33SAl Viro static void do_thaw_one(struct super_block *sb, void *unused)
557c2d75438SEric Sandeen {
558c2d75438SEric Sandeen 	char b[BDEVNAME_SIZE];
559c2d75438SEric Sandeen 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
560c2d75438SEric Sandeen 		printk(KERN_WARNING "Emergency Thaw on %s\n",
561c2d75438SEric Sandeen 		       bdevname(sb->s_bdev, b));
562c2d75438SEric Sandeen }
56301a05b33SAl Viro 
56401a05b33SAl Viro static void do_thaw_all(struct work_struct *work)
56501a05b33SAl Viro {
56601a05b33SAl Viro 	iterate_supers(do_thaw_one, NULL);
567053c525fSJens Axboe 	kfree(work);
568c2d75438SEric Sandeen 	printk(KERN_WARNING "Emergency Thaw complete\n");
569c2d75438SEric Sandeen }
570c2d75438SEric Sandeen 
571c2d75438SEric Sandeen /**
572c2d75438SEric Sandeen  * emergency_thaw_all -- forcibly thaw every frozen filesystem
573c2d75438SEric Sandeen  *
574c2d75438SEric Sandeen  * Used for emergency unfreeze of all filesystems via SysRq
575c2d75438SEric Sandeen  */
576c2d75438SEric Sandeen void emergency_thaw_all(void)
577c2d75438SEric Sandeen {
578053c525fSJens Axboe 	struct work_struct *work;
579053c525fSJens Axboe 
580053c525fSJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
581053c525fSJens Axboe 	if (work) {
582053c525fSJens Axboe 		INIT_WORK(work, do_thaw_all);
583053c525fSJens Axboe 		schedule_work(work);
584053c525fSJens Axboe 	}
585c2d75438SEric Sandeen }
586c2d75438SEric Sandeen 
5871da177e4SLinus Torvalds /**
58878a4a50aSRandy Dunlap  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
58967be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
5901da177e4SLinus Torvalds  *
5911da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
5921da177e4SLinus Torvalds  * that I/O.
5931da177e4SLinus Torvalds  *
59467be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
59567be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
59667be2dd1SMartin Waitz  * a successful fsync().
5971da177e4SLinus Torvalds  */
5981da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5991da177e4SLinus Torvalds {
6001da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
6011da177e4SLinus Torvalds 
6021da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
6031da177e4SLinus Torvalds 		return 0;
6041da177e4SLinus Torvalds 
6051da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
6061da177e4SLinus Torvalds 					&mapping->private_list);
6071da177e4SLinus Torvalds }
6081da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
6091da177e4SLinus Torvalds 
6101da177e4SLinus Torvalds /*
6111da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
6121da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
6131da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
6141da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
6151da177e4SLinus Torvalds  */
6161da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6171da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
6181da177e4SLinus Torvalds {
6191da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6201da177e4SLinus Torvalds 	if (bh) {
6211da177e4SLinus Torvalds 		if (buffer_dirty(bh))
6221da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
6231da177e4SLinus Torvalds 		put_bh(bh);
6241da177e4SLinus Torvalds 	}
6251da177e4SLinus Torvalds }
6261da177e4SLinus Torvalds 
6271da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6281da177e4SLinus Torvalds {
6291da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
6301da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
6311da177e4SLinus Torvalds 
6321da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
6331da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
6341da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
6351da177e4SLinus Torvalds 	} else {
636e827f923SEric Sesterhenn 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
6371da177e4SLinus Torvalds 	}
638535ee2fbSJan Kara 	if (!bh->b_assoc_map) {
6391da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
6401da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
6411da177e4SLinus Torvalds 				&mapping->private_list);
64258ff407bSJan Kara 		bh->b_assoc_map = mapping;
6431da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
6441da177e4SLinus Torvalds 	}
6451da177e4SLinus Torvalds }
6461da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds /*
649787d2214SNick Piggin  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
650787d2214SNick Piggin  * dirty.
651787d2214SNick Piggin  *
652787d2214SNick Piggin  * If warn is true, then emit a warning if the page is not uptodate and has
653787d2214SNick Piggin  * not been truncated.
654787d2214SNick Piggin  */
655a8e7d49aSLinus Torvalds static void __set_page_dirty(struct page *page,
656787d2214SNick Piggin 		struct address_space *mapping, int warn)
657787d2214SNick Piggin {
65819fd6231SNick Piggin 	spin_lock_irq(&mapping->tree_lock);
659787d2214SNick Piggin 	if (page->mapping) {	/* Race with truncate? */
660787d2214SNick Piggin 		WARN_ON_ONCE(warn && !PageUptodate(page));
661e3a7cca1SEdward Shishkin 		account_page_dirtied(page, mapping);
662787d2214SNick Piggin 		radix_tree_tag_set(&mapping->page_tree,
663787d2214SNick Piggin 				page_index(page), PAGECACHE_TAG_DIRTY);
664787d2214SNick Piggin 	}
66519fd6231SNick Piggin 	spin_unlock_irq(&mapping->tree_lock);
666787d2214SNick Piggin 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
667787d2214SNick Piggin }
668787d2214SNick Piggin 
669787d2214SNick Piggin /*
6701da177e4SLinus Torvalds  * Add a page to the dirty page list.
6711da177e4SLinus Torvalds  *
6721da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
6731da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
6741da177e4SLinus Torvalds  *
6751da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
6761da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
6771da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
6781da177e4SLinus Torvalds  * dirty.
6791da177e4SLinus Torvalds  *
6801da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
6811da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
6821da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
6831da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
6841da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
6851da177e4SLinus Torvalds  * page on the dirty page list.
6861da177e4SLinus Torvalds  *
6871da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
6881da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
6891da177e4SLinus Torvalds  * added to the page after it was set dirty.
6901da177e4SLinus Torvalds  *
6911da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
6921da177e4SLinus Torvalds  * address_space though.
6931da177e4SLinus Torvalds  */
6941da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
6951da177e4SLinus Torvalds {
696a8e7d49aSLinus Torvalds 	int newly_dirty;
697787d2214SNick Piggin 	struct address_space *mapping = page_mapping(page);
698ebf7a227SNick Piggin 
699ebf7a227SNick Piggin 	if (unlikely(!mapping))
700ebf7a227SNick Piggin 		return !TestSetPageDirty(page);
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
7031da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
7041da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
7051da177e4SLinus Torvalds 		struct buffer_head *bh = head;
7061da177e4SLinus Torvalds 
7071da177e4SLinus Torvalds 		do {
7081da177e4SLinus Torvalds 			set_buffer_dirty(bh);
7091da177e4SLinus Torvalds 			bh = bh->b_this_page;
7101da177e4SLinus Torvalds 		} while (bh != head);
7111da177e4SLinus Torvalds 	}
712a8e7d49aSLinus Torvalds 	newly_dirty = !TestSetPageDirty(page);
7131da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
7141da177e4SLinus Torvalds 
715a8e7d49aSLinus Torvalds 	if (newly_dirty)
716a8e7d49aSLinus Torvalds 		__set_page_dirty(page, mapping, 1);
717a8e7d49aSLinus Torvalds 	return newly_dirty;
7181da177e4SLinus Torvalds }
7191da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
7201da177e4SLinus Torvalds 
7211da177e4SLinus Torvalds /*
7221da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
7231da177e4SLinus Torvalds  *
7241da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
7251da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
7261da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
7271da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
7281da177e4SLinus Torvalds  *
7291da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
7301da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
7311da177e4SLinus Torvalds  * up, waiting for those writes to complete.
7321da177e4SLinus Torvalds  *
7331da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
7341da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
7351da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
7361da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
7371da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
7381da177e4SLinus Torvalds  * any newly dirty buffers for write.
7391da177e4SLinus Torvalds  */
7401da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7411da177e4SLinus Torvalds {
7421da177e4SLinus Torvalds 	struct buffer_head *bh;
7431da177e4SLinus Torvalds 	struct list_head tmp;
7447eaceaccSJens Axboe 	struct address_space *mapping;
7451da177e4SLinus Torvalds 	int err = 0, err2;
7461da177e4SLinus Torvalds 
7471da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
7481da177e4SLinus Torvalds 
7491da177e4SLinus Torvalds 	spin_lock(lock);
7501da177e4SLinus Torvalds 	while (!list_empty(list)) {
7511da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
752535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
75358ff407bSJan Kara 		__remove_assoc_queue(bh);
754535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
755535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
756535ee2fbSJan Kara 		smp_mb();
7571da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
7581da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
759535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
7601da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
7611da177e4SLinus Torvalds 				get_bh(bh);
7621da177e4SLinus Torvalds 				spin_unlock(lock);
7631da177e4SLinus Torvalds 				/*
7641da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
7659cb569d6SChristoph Hellwig 				 * write_dirty_buffer() actually writes the
7669cb569d6SChristoph Hellwig 				 * current contents - it is a noop if I/O is
7679cb569d6SChristoph Hellwig 				 * still in flight on potentially older
7689cb569d6SChristoph Hellwig 				 * contents.
7691da177e4SLinus Torvalds 				 */
770*721a9602SJens Axboe 				write_dirty_buffer(bh, WRITE_SYNC);
7719cf6b720SJens Axboe 
7729cf6b720SJens Axboe 				/*
7739cf6b720SJens Axboe 				 * Kick off IO for the previous mapping. Note
7749cf6b720SJens Axboe 				 * that we will not run the very last mapping,
7759cf6b720SJens Axboe 				 * wait_on_buffer() will do that for us
7769cf6b720SJens Axboe 				 * through sync_buffer().
7779cf6b720SJens Axboe 				 */
7781da177e4SLinus Torvalds 				brelse(bh);
7791da177e4SLinus Torvalds 				spin_lock(lock);
7801da177e4SLinus Torvalds 			}
7811da177e4SLinus Torvalds 		}
7821da177e4SLinus Torvalds 	}
7831da177e4SLinus Torvalds 
7841da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
7851da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
7861da177e4SLinus Torvalds 		get_bh(bh);
787535ee2fbSJan Kara 		mapping = bh->b_assoc_map;
788535ee2fbSJan Kara 		__remove_assoc_queue(bh);
789535ee2fbSJan Kara 		/* Avoid race with mark_buffer_dirty_inode() which does
790535ee2fbSJan Kara 		 * a lockless check and we rely on seeing the dirty bit */
791535ee2fbSJan Kara 		smp_mb();
792535ee2fbSJan Kara 		if (buffer_dirty(bh)) {
793535ee2fbSJan Kara 			list_add(&bh->b_assoc_buffers,
794e3892296SJan Kara 				 &mapping->private_list);
795535ee2fbSJan Kara 			bh->b_assoc_map = mapping;
796535ee2fbSJan Kara 		}
7971da177e4SLinus Torvalds 		spin_unlock(lock);
7981da177e4SLinus Torvalds 		wait_on_buffer(bh);
7991da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
8001da177e4SLinus Torvalds 			err = -EIO;
8011da177e4SLinus Torvalds 		brelse(bh);
8021da177e4SLinus Torvalds 		spin_lock(lock);
8031da177e4SLinus Torvalds 	}
8041da177e4SLinus Torvalds 
8051da177e4SLinus Torvalds 	spin_unlock(lock);
8061da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
8071da177e4SLinus Torvalds 	if (err)
8081da177e4SLinus Torvalds 		return err;
8091da177e4SLinus Torvalds 	else
8101da177e4SLinus Torvalds 		return err2;
8111da177e4SLinus Torvalds }
8121da177e4SLinus Torvalds 
8131da177e4SLinus Torvalds /*
8141da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
8151da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
8161da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
8171da177e4SLinus Torvalds  *
8181da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
8191da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
8201da177e4SLinus Torvalds  * for reiserfs.
8211da177e4SLinus Torvalds  */
8221da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8231da177e4SLinus Torvalds {
8241da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8251da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8261da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8271da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8281da177e4SLinus Torvalds 
8291da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8301da177e4SLinus Torvalds 		while (!list_empty(list))
8311da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
8321da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8331da177e4SLinus Torvalds 	}
8341da177e4SLinus Torvalds }
83552b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds /*
8381da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
8391da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
8401da177e4SLinus Torvalds  *
8411da177e4SLinus Torvalds  * Returns true if all buffers were removed.
8421da177e4SLinus Torvalds  */
8431da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8441da177e4SLinus Torvalds {
8451da177e4SLinus Torvalds 	int ret = 1;
8461da177e4SLinus Torvalds 
8471da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
8481da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
8491da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
8501da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
8511da177e4SLinus Torvalds 
8521da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8531da177e4SLinus Torvalds 		while (!list_empty(list)) {
8541da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
8551da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
8561da177e4SLinus Torvalds 				ret = 0;
8571da177e4SLinus Torvalds 				break;
8581da177e4SLinus Torvalds 			}
8591da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
8601da177e4SLinus Torvalds 		}
8611da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8621da177e4SLinus Torvalds 	}
8631da177e4SLinus Torvalds 	return ret;
8641da177e4SLinus Torvalds }
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds /*
8671da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
8681da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
8691da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
8701da177e4SLinus Torvalds  * buffers.
8711da177e4SLinus Torvalds  *
8721da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
8731da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
8741da177e4SLinus Torvalds  */
8751da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
8761da177e4SLinus Torvalds 		int retry)
8771da177e4SLinus Torvalds {
8781da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
8791da177e4SLinus Torvalds 	long offset;
8801da177e4SLinus Torvalds 
8811da177e4SLinus Torvalds try_again:
8821da177e4SLinus Torvalds 	head = NULL;
8831da177e4SLinus Torvalds 	offset = PAGE_SIZE;
8841da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
8851da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
8861da177e4SLinus Torvalds 		if (!bh)
8871da177e4SLinus Torvalds 			goto no_grow;
8881da177e4SLinus Torvalds 
8891da177e4SLinus Torvalds 		bh->b_bdev = NULL;
8901da177e4SLinus Torvalds 		bh->b_this_page = head;
8911da177e4SLinus Torvalds 		bh->b_blocknr = -1;
8921da177e4SLinus Torvalds 		head = bh;
8931da177e4SLinus Torvalds 
8941da177e4SLinus Torvalds 		bh->b_state = 0;
8951da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
8961da177e4SLinus Torvalds 		bh->b_size = size;
8971da177e4SLinus Torvalds 
8981da177e4SLinus Torvalds 		/* Link the buffer to its page */
8991da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
9001da177e4SLinus Torvalds 
90101ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
9021da177e4SLinus Torvalds 	}
9031da177e4SLinus Torvalds 	return head;
9041da177e4SLinus Torvalds /*
9051da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
9061da177e4SLinus Torvalds  */
9071da177e4SLinus Torvalds no_grow:
9081da177e4SLinus Torvalds 	if (head) {
9091da177e4SLinus Torvalds 		do {
9101da177e4SLinus Torvalds 			bh = head;
9111da177e4SLinus Torvalds 			head = head->b_this_page;
9121da177e4SLinus Torvalds 			free_buffer_head(bh);
9131da177e4SLinus Torvalds 		} while (head);
9141da177e4SLinus Torvalds 	}
9151da177e4SLinus Torvalds 
9161da177e4SLinus Torvalds 	/*
9171da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
9181da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
9191da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
9201da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
9211da177e4SLinus Torvalds 	 */
9221da177e4SLinus Torvalds 	if (!retry)
9231da177e4SLinus Torvalds 		return NULL;
9241da177e4SLinus Torvalds 
9251da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
9261da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
9271da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
9281da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
9291da177e4SLinus Torvalds 	 * async buffer heads in use.
9301da177e4SLinus Torvalds 	 */
9311da177e4SLinus Torvalds 	free_more_memory();
9321da177e4SLinus Torvalds 	goto try_again;
9331da177e4SLinus Torvalds }
9341da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9351da177e4SLinus Torvalds 
9361da177e4SLinus Torvalds static inline void
9371da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
9381da177e4SLinus Torvalds {
9391da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
9401da177e4SLinus Torvalds 
9411da177e4SLinus Torvalds 	bh = head;
9421da177e4SLinus Torvalds 	do {
9431da177e4SLinus Torvalds 		tail = bh;
9441da177e4SLinus Torvalds 		bh = bh->b_this_page;
9451da177e4SLinus Torvalds 	} while (bh);
9461da177e4SLinus Torvalds 	tail->b_this_page = head;
9471da177e4SLinus Torvalds 	attach_page_buffers(page, head);
9481da177e4SLinus Torvalds }
9491da177e4SLinus Torvalds 
9501da177e4SLinus Torvalds /*
9511da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
9521da177e4SLinus Torvalds  */
9531da177e4SLinus Torvalds static void
9541da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
9551da177e4SLinus Torvalds 			sector_t block, int size)
9561da177e4SLinus Torvalds {
9571da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
9581da177e4SLinus Torvalds 	struct buffer_head *bh = head;
9591da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
9601da177e4SLinus Torvalds 
9611da177e4SLinus Torvalds 	do {
9621da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
9631da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
9641da177e4SLinus Torvalds 			bh->b_bdev = bdev;
9651da177e4SLinus Torvalds 			bh->b_blocknr = block;
9661da177e4SLinus Torvalds 			if (uptodate)
9671da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
9681da177e4SLinus Torvalds 			set_buffer_mapped(bh);
9691da177e4SLinus Torvalds 		}
9701da177e4SLinus Torvalds 		block++;
9711da177e4SLinus Torvalds 		bh = bh->b_this_page;
9721da177e4SLinus Torvalds 	} while (bh != head);
9731da177e4SLinus Torvalds }
9741da177e4SLinus Torvalds 
9751da177e4SLinus Torvalds /*
9761da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
9771da177e4SLinus Torvalds  *
9781da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
9791da177e4SLinus Torvalds  */
9801da177e4SLinus Torvalds static struct page *
9811da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
9821da177e4SLinus Torvalds 		pgoff_t index, int size)
9831da177e4SLinus Torvalds {
9841da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
9851da177e4SLinus Torvalds 	struct page *page;
9861da177e4SLinus Torvalds 	struct buffer_head *bh;
9871da177e4SLinus Torvalds 
988ea125892SChristoph Lameter 	page = find_or_create_page(inode->i_mapping, index,
989769848c0SMel Gorman 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
9901da177e4SLinus Torvalds 	if (!page)
9911da177e4SLinus Torvalds 		return NULL;
9921da177e4SLinus Torvalds 
993e827f923SEric Sesterhenn 	BUG_ON(!PageLocked(page));
9941da177e4SLinus Torvalds 
9951da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
9961da177e4SLinus Torvalds 		bh = page_buffers(page);
9971da177e4SLinus Torvalds 		if (bh->b_size == size) {
9981da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
9991da177e4SLinus Torvalds 			return page;
10001da177e4SLinus Torvalds 		}
10011da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
10021da177e4SLinus Torvalds 			goto failed;
10031da177e4SLinus Torvalds 	}
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds 	/*
10061da177e4SLinus Torvalds 	 * Allocate some buffers for this page
10071da177e4SLinus Torvalds 	 */
10081da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
10091da177e4SLinus Torvalds 	if (!bh)
10101da177e4SLinus Torvalds 		goto failed;
10111da177e4SLinus Torvalds 
10121da177e4SLinus Torvalds 	/*
10131da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
10141da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
10151da177e4SLinus Torvalds 	 * run under the page lock.
10161da177e4SLinus Torvalds 	 */
10171da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
10181da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
10191da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
10201da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
10211da177e4SLinus Torvalds 	return page;
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds failed:
10241da177e4SLinus Torvalds 	BUG();
10251da177e4SLinus Torvalds 	unlock_page(page);
10261da177e4SLinus Torvalds 	page_cache_release(page);
10271da177e4SLinus Torvalds 	return NULL;
10281da177e4SLinus Torvalds }
10291da177e4SLinus Torvalds 
10301da177e4SLinus Torvalds /*
10311da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
10321da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
10331da177e4SLinus Torvalds  */
1034858119e1SArjan van de Ven static int
10351da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
10361da177e4SLinus Torvalds {
10371da177e4SLinus Torvalds 	struct page *page;
10381da177e4SLinus Torvalds 	pgoff_t index;
10391da177e4SLinus Torvalds 	int sizebits;
10401da177e4SLinus Torvalds 
10411da177e4SLinus Torvalds 	sizebits = -1;
10421da177e4SLinus Torvalds 	do {
10431da177e4SLinus Torvalds 		sizebits++;
10441da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
10451da177e4SLinus Torvalds 
10461da177e4SLinus Torvalds 	index = block >> sizebits;
10471da177e4SLinus Torvalds 
1048e5657933SAndrew Morton 	/*
1049e5657933SAndrew Morton 	 * Check for a block which wants to lie outside our maximum possible
1050e5657933SAndrew Morton 	 * pagecache index.  (this comparison is done using sector_t types).
1051e5657933SAndrew Morton 	 */
1052e5657933SAndrew Morton 	if (unlikely(index != block >> sizebits)) {
1053e5657933SAndrew Morton 		char b[BDEVNAME_SIZE];
1054e5657933SAndrew Morton 
1055e5657933SAndrew Morton 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056e5657933SAndrew Morton 			"device %s\n",
10578e24eea7SHarvey Harrison 			__func__, (unsigned long long)block,
1058e5657933SAndrew Morton 			bdevname(bdev, b));
1059e5657933SAndrew Morton 		return -EIO;
1060e5657933SAndrew Morton 	}
1061e5657933SAndrew Morton 	block = index << sizebits;
10621da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
10631da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
10641da177e4SLinus Torvalds 	if (!page)
10651da177e4SLinus Torvalds 		return 0;
10661da177e4SLinus Torvalds 	unlock_page(page);
10671da177e4SLinus Torvalds 	page_cache_release(page);
10681da177e4SLinus Torvalds 	return 1;
10691da177e4SLinus Torvalds }
10701da177e4SLinus Torvalds 
107175c96f85SAdrian Bunk static struct buffer_head *
10721da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
10731da177e4SLinus Torvalds {
10741da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
1075e1defc4fSMartin K. Petersen 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
10761da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
10771da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
10781da177e4SLinus Torvalds 					size);
1079e1defc4fSMartin K. Petersen 		printk(KERN_ERR "logical block size: %d\n",
1080e1defc4fSMartin K. Petersen 					bdev_logical_block_size(bdev));
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 		dump_stack();
10831da177e4SLinus Torvalds 		return NULL;
10841da177e4SLinus Torvalds 	}
10851da177e4SLinus Torvalds 
10861da177e4SLinus Torvalds 	for (;;) {
10871da177e4SLinus Torvalds 		struct buffer_head * bh;
1088e5657933SAndrew Morton 		int ret;
10891da177e4SLinus Torvalds 
10901da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
10911da177e4SLinus Torvalds 		if (bh)
10921da177e4SLinus Torvalds 			return bh;
10931da177e4SLinus Torvalds 
1094e5657933SAndrew Morton 		ret = grow_buffers(bdev, block, size);
1095e5657933SAndrew Morton 		if (ret < 0)
1096e5657933SAndrew Morton 			return NULL;
1097e5657933SAndrew Morton 		if (ret == 0)
10981da177e4SLinus Torvalds 			free_more_memory();
10991da177e4SLinus Torvalds 	}
11001da177e4SLinus Torvalds }
11011da177e4SLinus Torvalds 
11021da177e4SLinus Torvalds /*
11031da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
11041da177e4SLinus Torvalds  *
11051da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
11061da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
11071da177e4SLinus Torvalds  *
11081da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
11091da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
11101da177e4SLinus Torvalds  * merely a hint about the true dirty state.
11111da177e4SLinus Torvalds  *
11121da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
11131da177e4SLinus Torvalds  * (if the page has buffers).
11141da177e4SLinus Torvalds  *
11151da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
11161da177e4SLinus Torvalds  * buffers are not.
11171da177e4SLinus Torvalds  *
11181da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
11191da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
11201da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
11211da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
11221da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
11231da177e4SLinus Torvalds  */
11241da177e4SLinus Torvalds 
11251da177e4SLinus Torvalds /**
11261da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
112767be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
11281da177e4SLinus Torvalds  *
11291da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
11301da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
11311da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
11321da177e4SLinus Torvalds  * inode list.
11331da177e4SLinus Torvalds  *
11341da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
11351da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
11361da177e4SLinus Torvalds  */
1137fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11381da177e4SLinus Torvalds {
1139787d2214SNick Piggin 	WARN_ON_ONCE(!buffer_uptodate(bh));
11401be62dc1SLinus Torvalds 
11411be62dc1SLinus Torvalds 	/*
11421be62dc1SLinus Torvalds 	 * Very *carefully* optimize the it-is-already-dirty case.
11431be62dc1SLinus Torvalds 	 *
11441be62dc1SLinus Torvalds 	 * Don't let the final "is it dirty" escape to before we
11451be62dc1SLinus Torvalds 	 * perhaps modified the buffer.
11461be62dc1SLinus Torvalds 	 */
11471be62dc1SLinus Torvalds 	if (buffer_dirty(bh)) {
11481be62dc1SLinus Torvalds 		smp_mb();
11491be62dc1SLinus Torvalds 		if (buffer_dirty(bh))
11501be62dc1SLinus Torvalds 			return;
11511be62dc1SLinus Torvalds 	}
11521be62dc1SLinus Torvalds 
1153a8e7d49aSLinus Torvalds 	if (!test_set_buffer_dirty(bh)) {
1154a8e7d49aSLinus Torvalds 		struct page *page = bh->b_page;
11558e9d78edSLinus Torvalds 		if (!TestSetPageDirty(page)) {
11568e9d78edSLinus Torvalds 			struct address_space *mapping = page_mapping(page);
11578e9d78edSLinus Torvalds 			if (mapping)
11588e9d78edSLinus Torvalds 				__set_page_dirty(page, mapping, 0);
11598e9d78edSLinus Torvalds 		}
1160a8e7d49aSLinus Torvalds 	}
11611da177e4SLinus Torvalds }
11621fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds /*
11651da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
11661da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
11671da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
11681da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
11691da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
11701da177e4SLinus Torvalds  */
11711da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
11721da177e4SLinus Torvalds {
11731da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
11741da177e4SLinus Torvalds 		put_bh(buf);
11751da177e4SLinus Torvalds 		return;
11761da177e4SLinus Torvalds 	}
11775c752ad9SArjan van de Ven 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
11781da177e4SLinus Torvalds }
11791fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
11801da177e4SLinus Torvalds 
11811da177e4SLinus Torvalds /*
11821da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
11831da177e4SLinus Torvalds  * potentially dirty data.
11841da177e4SLinus Torvalds  */
11851da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
11861da177e4SLinus Torvalds {
11871da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
1188535ee2fbSJan Kara 	if (bh->b_assoc_map) {
11891da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
11901da177e4SLinus Torvalds 
11911da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
11921da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
119358ff407bSJan Kara 		bh->b_assoc_map = NULL;
11941da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
11951da177e4SLinus Torvalds 	}
11961da177e4SLinus Torvalds 	__brelse(bh);
11971da177e4SLinus Torvalds }
11981fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
11991da177e4SLinus Torvalds 
12001da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12011da177e4SLinus Torvalds {
12021da177e4SLinus Torvalds 	lock_buffer(bh);
12031da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12041da177e4SLinus Torvalds 		unlock_buffer(bh);
12051da177e4SLinus Torvalds 		return bh;
12061da177e4SLinus Torvalds 	} else {
12071da177e4SLinus Torvalds 		get_bh(bh);
12081da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
12091da177e4SLinus Torvalds 		submit_bh(READ, bh);
12101da177e4SLinus Torvalds 		wait_on_buffer(bh);
12111da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
12121da177e4SLinus Torvalds 			return bh;
12131da177e4SLinus Torvalds 	}
12141da177e4SLinus Torvalds 	brelse(bh);
12151da177e4SLinus Torvalds 	return NULL;
12161da177e4SLinus Torvalds }
12171da177e4SLinus Torvalds 
12181da177e4SLinus Torvalds /*
12191da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
12201da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
12211da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
12221da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
12231da177e4SLinus Torvalds  * CPU's LRUs at the same time.
12241da177e4SLinus Torvalds  *
12251da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12261da177e4SLinus Torvalds  * sb_find_get_block().
12271da177e4SLinus Torvalds  *
12281da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
12291da177e4SLinus Torvalds  * a local interrupt disable for that.
12301da177e4SLinus Torvalds  */
12311da177e4SLinus Torvalds 
12321da177e4SLinus Torvalds #define BH_LRU_SIZE	8
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds struct bh_lru {
12351da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
12361da177e4SLinus Torvalds };
12371da177e4SLinus Torvalds 
12381da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
12391da177e4SLinus Torvalds 
12401da177e4SLinus Torvalds #ifdef CONFIG_SMP
12411da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
12421da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
12431da177e4SLinus Torvalds #else
12441da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
12451da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
12461da177e4SLinus Torvalds #endif
12471da177e4SLinus Torvalds 
12481da177e4SLinus Torvalds static inline void check_irqs_on(void)
12491da177e4SLinus Torvalds {
12501da177e4SLinus Torvalds #ifdef irqs_disabled
12511da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
12521da177e4SLinus Torvalds #endif
12531da177e4SLinus Torvalds }
12541da177e4SLinus Torvalds 
12551da177e4SLinus Torvalds /*
12561da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
12571da177e4SLinus Torvalds  */
12581da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
12591da177e4SLinus Torvalds {
12601da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
12611da177e4SLinus Torvalds 
12621da177e4SLinus Torvalds 	check_irqs_on();
12631da177e4SLinus Torvalds 	bh_lru_lock();
1264c7b92516SChristoph Lameter 	if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
12651da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
12661da177e4SLinus Torvalds 		int in;
12671da177e4SLinus Torvalds 		int out = 0;
12681da177e4SLinus Torvalds 
12691da177e4SLinus Torvalds 		get_bh(bh);
12701da177e4SLinus Torvalds 		bhs[out++] = bh;
12711da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
1272c7b92516SChristoph Lameter 			struct buffer_head *bh2 =
1273c7b92516SChristoph Lameter 				__this_cpu_read(bh_lrus.bhs[in]);
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds 			if (bh2 == bh) {
12761da177e4SLinus Torvalds 				__brelse(bh2);
12771da177e4SLinus Torvalds 			} else {
12781da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
12791da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
12801da177e4SLinus Torvalds 					evictee = bh2;
12811da177e4SLinus Torvalds 				} else {
12821da177e4SLinus Torvalds 					bhs[out++] = bh2;
12831da177e4SLinus Torvalds 				}
12841da177e4SLinus Torvalds 			}
12851da177e4SLinus Torvalds 		}
12861da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
12871da177e4SLinus Torvalds 			bhs[out++] = NULL;
1288c7b92516SChristoph Lameter 		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
12891da177e4SLinus Torvalds 	}
12901da177e4SLinus Torvalds 	bh_lru_unlock();
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 	if (evictee)
12931da177e4SLinus Torvalds 		__brelse(evictee);
12941da177e4SLinus Torvalds }
12951da177e4SLinus Torvalds 
12961da177e4SLinus Torvalds /*
12971da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
12981da177e4SLinus Torvalds  */
1299858119e1SArjan van de Ven static struct buffer_head *
13003991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13011da177e4SLinus Torvalds {
13021da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13033991d3bdSTomasz Kvarsin 	unsigned int i;
13041da177e4SLinus Torvalds 
13051da177e4SLinus Torvalds 	check_irqs_on();
13061da177e4SLinus Torvalds 	bh_lru_lock();
13071da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
1308c7b92516SChristoph Lameter 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13091da177e4SLinus Torvalds 
13101da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
13111da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
13121da177e4SLinus Torvalds 			if (i) {
13131da177e4SLinus Torvalds 				while (i) {
1314c7b92516SChristoph Lameter 					__this_cpu_write(bh_lrus.bhs[i],
1315c7b92516SChristoph Lameter 						__this_cpu_read(bh_lrus.bhs[i - 1]));
13161da177e4SLinus Torvalds 					i--;
13171da177e4SLinus Torvalds 				}
1318c7b92516SChristoph Lameter 				__this_cpu_write(bh_lrus.bhs[0], bh);
13191da177e4SLinus Torvalds 			}
13201da177e4SLinus Torvalds 			get_bh(bh);
13211da177e4SLinus Torvalds 			ret = bh;
13221da177e4SLinus Torvalds 			break;
13231da177e4SLinus Torvalds 		}
13241da177e4SLinus Torvalds 	}
13251da177e4SLinus Torvalds 	bh_lru_unlock();
13261da177e4SLinus Torvalds 	return ret;
13271da177e4SLinus Torvalds }
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds /*
13301da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
13311da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
13321da177e4SLinus Torvalds  * NULL
13331da177e4SLinus Torvalds  */
13341da177e4SLinus Torvalds struct buffer_head *
13353991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
13361da177e4SLinus Torvalds {
13371da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
13381da177e4SLinus Torvalds 
13391da177e4SLinus Torvalds 	if (bh == NULL) {
1340385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
13411da177e4SLinus Torvalds 		if (bh)
13421da177e4SLinus Torvalds 			bh_lru_install(bh);
13431da177e4SLinus Torvalds 	}
13441da177e4SLinus Torvalds 	if (bh)
13451da177e4SLinus Torvalds 		touch_buffer(bh);
13461da177e4SLinus Torvalds 	return bh;
13471da177e4SLinus Torvalds }
13481da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
13491da177e4SLinus Torvalds 
13501da177e4SLinus Torvalds /*
13511da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
13521da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
13531da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
13541da177e4SLinus Torvalds  *
13551da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
13561da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
13571da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
13581da177e4SLinus Torvalds  *
13591da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
13601da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
13611da177e4SLinus Torvalds  */
13621da177e4SLinus Torvalds struct buffer_head *
13633991d3bdSTomasz Kvarsin __getblk(struct block_device *bdev, sector_t block, unsigned size)
13641da177e4SLinus Torvalds {
13651da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
13661da177e4SLinus Torvalds 
13671da177e4SLinus Torvalds 	might_sleep();
13681da177e4SLinus Torvalds 	if (bh == NULL)
13691da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
13701da177e4SLinus Torvalds 	return bh;
13711da177e4SLinus Torvalds }
13721da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
13731da177e4SLinus Torvalds 
13741da177e4SLinus Torvalds /*
13751da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
13761da177e4SLinus Torvalds  */
13773991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
13781da177e4SLinus Torvalds {
13791da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1380a3e713b5SAndrew Morton 	if (likely(bh)) {
13811da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
13821da177e4SLinus Torvalds 		brelse(bh);
13831da177e4SLinus Torvalds 	}
1384a3e713b5SAndrew Morton }
13851da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
13861da177e4SLinus Torvalds 
13871da177e4SLinus Torvalds /**
13881da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
138967be2dd1SMartin Waitz  *  @bdev: the block_device to read from
13901da177e4SLinus Torvalds  *  @block: number of block
13911da177e4SLinus Torvalds  *  @size: size (in bytes) to read
13921da177e4SLinus Torvalds  *
13931da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
13941da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
13951da177e4SLinus Torvalds  */
13961da177e4SLinus Torvalds struct buffer_head *
13973991d3bdSTomasz Kvarsin __bread(struct block_device *bdev, sector_t block, unsigned size)
13981da177e4SLinus Torvalds {
13991da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14001da177e4SLinus Torvalds 
1401a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14021da177e4SLinus Torvalds 		bh = __bread_slow(bh);
14031da177e4SLinus Torvalds 	return bh;
14041da177e4SLinus Torvalds }
14051da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
14061da177e4SLinus Torvalds 
14071da177e4SLinus Torvalds /*
14081da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
14091da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
14101da177e4SLinus Torvalds  * or with preempt disabled.
14111da177e4SLinus Torvalds  */
14121da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14131da177e4SLinus Torvalds {
14141da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
14151da177e4SLinus Torvalds 	int i;
14161da177e4SLinus Torvalds 
14171da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14181da177e4SLinus Torvalds 		brelse(b->bhs[i]);
14191da177e4SLinus Torvalds 		b->bhs[i] = NULL;
14201da177e4SLinus Torvalds 	}
14211da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
14221da177e4SLinus Torvalds }
14231da177e4SLinus Torvalds 
1424f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
14251da177e4SLinus Torvalds {
142615c8b6c1SJens Axboe 	on_each_cpu(invalidate_bh_lru, NULL, 1);
14271da177e4SLinus Torvalds }
14289db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
14311da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
14321da177e4SLinus Torvalds {
14331da177e4SLinus Torvalds 	bh->b_page = page;
1434e827f923SEric Sesterhenn 	BUG_ON(offset >= PAGE_SIZE);
14351da177e4SLinus Torvalds 	if (PageHighMem(page))
14361da177e4SLinus Torvalds 		/*
14371da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
14381da177e4SLinus Torvalds 		 */
14391da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
14401da177e4SLinus Torvalds 	else
14411da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
14421da177e4SLinus Torvalds }
14431da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds /*
14461da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
14471da177e4SLinus Torvalds  */
1448858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
14491da177e4SLinus Torvalds {
14501da177e4SLinus Torvalds 	lock_buffer(bh);
14511da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
14521da177e4SLinus Torvalds 	bh->b_bdev = NULL;
14531da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
14541da177e4SLinus Torvalds 	clear_buffer_req(bh);
14551da177e4SLinus Torvalds 	clear_buffer_new(bh);
14561da177e4SLinus Torvalds 	clear_buffer_delay(bh);
145733a266ddSDavid Chinner 	clear_buffer_unwritten(bh);
14581da177e4SLinus Torvalds 	unlock_buffer(bh);
14591da177e4SLinus Torvalds }
14601da177e4SLinus Torvalds 
14611da177e4SLinus Torvalds /**
14621da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
14631da177e4SLinus Torvalds  *
14641da177e4SLinus Torvalds  * @page: the page which is affected
14651da177e4SLinus Torvalds  * @offset: the index of the truncation point
14661da177e4SLinus Torvalds  *
14671da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
14681da177e4SLinus Torvalds  * invalidatedby a truncate operation.
14691da177e4SLinus Torvalds  *
14701da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
14711da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
14721da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
14731da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
14741da177e4SLinus Torvalds  * blocks on-disk.
14751da177e4SLinus Torvalds  */
14762ff28e22SNeilBrown void block_invalidatepage(struct page *page, unsigned long offset)
14771da177e4SLinus Torvalds {
14781da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
14791da177e4SLinus Torvalds 	unsigned int curr_off = 0;
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
14821da177e4SLinus Torvalds 	if (!page_has_buffers(page))
14831da177e4SLinus Torvalds 		goto out;
14841da177e4SLinus Torvalds 
14851da177e4SLinus Torvalds 	head = page_buffers(page);
14861da177e4SLinus Torvalds 	bh = head;
14871da177e4SLinus Torvalds 	do {
14881da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
14891da177e4SLinus Torvalds 		next = bh->b_this_page;
14901da177e4SLinus Torvalds 
14911da177e4SLinus Torvalds 		/*
14921da177e4SLinus Torvalds 		 * is this block fully invalidated?
14931da177e4SLinus Torvalds 		 */
14941da177e4SLinus Torvalds 		if (offset <= curr_off)
14951da177e4SLinus Torvalds 			discard_buffer(bh);
14961da177e4SLinus Torvalds 		curr_off = next_off;
14971da177e4SLinus Torvalds 		bh = next;
14981da177e4SLinus Torvalds 	} while (bh != head);
14991da177e4SLinus Torvalds 
15001da177e4SLinus Torvalds 	/*
15011da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
15021da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
15031da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
15041da177e4SLinus Torvalds 	 */
15051da177e4SLinus Torvalds 	if (offset == 0)
15062ff28e22SNeilBrown 		try_to_release_page(page, 0);
15071da177e4SLinus Torvalds out:
15082ff28e22SNeilBrown 	return;
15091da177e4SLinus Torvalds }
15101da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds /*
15131da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
15141da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
15151da177e4SLinus Torvalds  * is already excluded via the page lock.
15161da177e4SLinus Torvalds  */
15171da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
15181da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
15191da177e4SLinus Torvalds {
15201da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
15211da177e4SLinus Torvalds 
15221da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
15231da177e4SLinus Torvalds 	bh = head;
15241da177e4SLinus Torvalds 	do {
15251da177e4SLinus Torvalds 		bh->b_state |= b_state;
15261da177e4SLinus Torvalds 		tail = bh;
15271da177e4SLinus Torvalds 		bh = bh->b_this_page;
15281da177e4SLinus Torvalds 	} while (bh);
15291da177e4SLinus Torvalds 	tail->b_this_page = head;
15301da177e4SLinus Torvalds 
15311da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
15321da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
15331da177e4SLinus Torvalds 		bh = head;
15341da177e4SLinus Torvalds 		do {
15351da177e4SLinus Torvalds 			if (PageDirty(page))
15361da177e4SLinus Torvalds 				set_buffer_dirty(bh);
15371da177e4SLinus Torvalds 			if (PageUptodate(page))
15381da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
15391da177e4SLinus Torvalds 			bh = bh->b_this_page;
15401da177e4SLinus Torvalds 		} while (bh != head);
15411da177e4SLinus Torvalds 	}
15421da177e4SLinus Torvalds 	attach_page_buffers(page, head);
15431da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
15441da177e4SLinus Torvalds }
15451da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds /*
15481da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
15491da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
15501da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
15511da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
15521da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
15531da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
15541da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
15551da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
15561da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
15571da177e4SLinus Torvalds  *
15581da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
15591da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
15601da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
15611da177e4SLinus Torvalds  * only if we really need to.  That happens here.
15621da177e4SLinus Torvalds  */
15631da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
15641da177e4SLinus Torvalds {
15651da177e4SLinus Torvalds 	struct buffer_head *old_bh;
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds 	might_sleep();
15681da177e4SLinus Torvalds 
1569385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
15701da177e4SLinus Torvalds 	if (old_bh) {
15711da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
15721da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
15731da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
15741da177e4SLinus Torvalds 		__brelse(old_bh);
15751da177e4SLinus Torvalds 	}
15761da177e4SLinus Torvalds }
15771da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
15781da177e4SLinus Torvalds 
15791da177e4SLinus Torvalds /*
15801da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
15811da177e4SLinus Torvalds  *
15821da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
15831da177e4SLinus Torvalds  *
15841da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
15851da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
15861da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
15871da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
15881da177e4SLinus Torvalds  *
15891da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
15901da177e4SLinus Torvalds  */
15911da177e4SLinus Torvalds 
15921da177e4SLinus Torvalds /*
15931da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
15941da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
15951da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
15961da177e4SLinus Torvalds  * state inside lock_buffer().
15971da177e4SLinus Torvalds  *
15981da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
15991da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
16001da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
16011da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
16021da177e4SLinus Torvalds  * prevents this contention from occurring.
16036e34eeddSTheodore Ts'o  *
16046e34eeddSTheodore Ts'o  * If block_write_full_page() is called with wbc->sync_mode ==
1605*721a9602SJens Axboe  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1606*721a9602SJens Axboe  * causes the writes to be flagged as synchronous writes.
16071da177e4SLinus Torvalds  */
16081da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
160935c80d5fSChris Mason 			get_block_t *get_block, struct writeback_control *wbc,
161035c80d5fSChris Mason 			bh_end_io_t *handler)
16111da177e4SLinus Torvalds {
16121da177e4SLinus Torvalds 	int err;
16131da177e4SLinus Torvalds 	sector_t block;
16141da177e4SLinus Torvalds 	sector_t last_block;
1615f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
1616b0cf2321SBadari Pulavarty 	const unsigned blocksize = 1 << inode->i_blkbits;
16171da177e4SLinus Torvalds 	int nr_underway = 0;
16186e34eeddSTheodore Ts'o 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1619*721a9602SJens Axboe 			WRITE_SYNC : WRITE);
16201da177e4SLinus Torvalds 
16211da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
1626b0cf2321SBadari Pulavarty 		create_empty_buffers(page, blocksize,
16271da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
16281da177e4SLinus Torvalds 	}
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds 	/*
16311da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
16321da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
16331da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
16341da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
16351da177e4SLinus Torvalds 	 *
16361da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
16371da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
16381da177e4SLinus Torvalds 	 */
16391da177e4SLinus Torvalds 
164054b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
16411da177e4SLinus Torvalds 	head = page_buffers(page);
16421da177e4SLinus Torvalds 	bh = head;
16431da177e4SLinus Torvalds 
16441da177e4SLinus Torvalds 	/*
16451da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
16461da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
16471da177e4SLinus Torvalds 	 */
16481da177e4SLinus Torvalds 	do {
16491da177e4SLinus Torvalds 		if (block > last_block) {
16501da177e4SLinus Torvalds 			/*
16511da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
16521da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
16531da177e4SLinus Torvalds 			 * truncate in progress.
16541da177e4SLinus Torvalds 			 */
16551da177e4SLinus Torvalds 			/*
16561da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
16571da177e4SLinus Torvalds 			 */
16581da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
16591da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
166029a814d2SAlex Tomas 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
166129a814d2SAlex Tomas 			   buffer_dirty(bh)) {
1662b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
16631da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
16641da177e4SLinus Torvalds 			if (err)
16651da177e4SLinus Torvalds 				goto recover;
166629a814d2SAlex Tomas 			clear_buffer_delay(bh);
16671da177e4SLinus Torvalds 			if (buffer_new(bh)) {
16681da177e4SLinus Torvalds 				/* blockdev mappings never come here */
16691da177e4SLinus Torvalds 				clear_buffer_new(bh);
16701da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
16711da177e4SLinus Torvalds 							bh->b_blocknr);
16721da177e4SLinus Torvalds 			}
16731da177e4SLinus Torvalds 		}
16741da177e4SLinus Torvalds 		bh = bh->b_this_page;
16751da177e4SLinus Torvalds 		block++;
16761da177e4SLinus Torvalds 	} while (bh != head);
16771da177e4SLinus Torvalds 
16781da177e4SLinus Torvalds 	do {
16791da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
16801da177e4SLinus Torvalds 			continue;
16811da177e4SLinus Torvalds 		/*
16821da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
16831da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
16845b0830cbSJens Axboe 		 * potentially cause a busy-wait loop from writeback threads
16855b0830cbSJens Axboe 		 * and kswapd activity, but those code paths have their own
16865b0830cbSJens Axboe 		 * higher-level throttling.
16871da177e4SLinus Torvalds 		 */
16881b430beeSWu Fengguang 		if (wbc->sync_mode != WB_SYNC_NONE) {
16891da177e4SLinus Torvalds 			lock_buffer(bh);
1690ca5de404SNick Piggin 		} else if (!trylock_buffer(bh)) {
16911da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
16921da177e4SLinus Torvalds 			continue;
16931da177e4SLinus Torvalds 		}
16941da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
169535c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
16961da177e4SLinus Torvalds 		} else {
16971da177e4SLinus Torvalds 			unlock_buffer(bh);
16981da177e4SLinus Torvalds 		}
16991da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17001da177e4SLinus Torvalds 
17011da177e4SLinus Torvalds 	/*
17021da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
17031da177e4SLinus Torvalds 	 * drop the bh refcounts early.
17041da177e4SLinus Torvalds 	 */
17051da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17061da177e4SLinus Torvalds 	set_page_writeback(page);
17071da177e4SLinus Torvalds 
17081da177e4SLinus Torvalds 	do {
17091da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17101da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
1711a64c8610STheodore Ts'o 			submit_bh(write_op, bh);
17121da177e4SLinus Torvalds 			nr_underway++;
1713ad576e63SNick Piggin 		}
17141da177e4SLinus Torvalds 		bh = next;
17151da177e4SLinus Torvalds 	} while (bh != head);
171605937baaSAndrew Morton 	unlock_page(page);
17171da177e4SLinus Torvalds 
17181da177e4SLinus Torvalds 	err = 0;
17191da177e4SLinus Torvalds done:
17201da177e4SLinus Torvalds 	if (nr_underway == 0) {
17211da177e4SLinus Torvalds 		/*
17221da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
17231da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
17241da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
17251da177e4SLinus Torvalds 		 */
17261da177e4SLinus Torvalds 		end_page_writeback(page);
17273d67f2d7SNick Piggin 
17281da177e4SLinus Torvalds 		/*
17291da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
17301da177e4SLinus Torvalds 		 * here on.
17311da177e4SLinus Torvalds 		 */
17321da177e4SLinus Torvalds 	}
17331da177e4SLinus Torvalds 	return err;
17341da177e4SLinus Torvalds 
17351da177e4SLinus Torvalds recover:
17361da177e4SLinus Torvalds 	/*
17371da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
17381da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
17391da177e4SLinus Torvalds 	 * exposing stale data.
17401da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
17411da177e4SLinus Torvalds 	 */
17421da177e4SLinus Torvalds 	bh = head;
17431da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
17441da177e4SLinus Torvalds 	do {
174529a814d2SAlex Tomas 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
174629a814d2SAlex Tomas 		    !buffer_delay(bh)) {
17471da177e4SLinus Torvalds 			lock_buffer(bh);
174835c80d5fSChris Mason 			mark_buffer_async_write_endio(bh, handler);
17491da177e4SLinus Torvalds 		} else {
17501da177e4SLinus Torvalds 			/*
17511da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
17521da177e4SLinus Torvalds 			 * attachment to a dirty page.
17531da177e4SLinus Torvalds 			 */
17541da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17551da177e4SLinus Torvalds 		}
17561da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
17571da177e4SLinus Torvalds 	SetPageError(page);
17581da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
17597e4c3690SAndrew Morton 	mapping_set_error(page->mapping, err);
17601da177e4SLinus Torvalds 	set_page_writeback(page);
17611da177e4SLinus Torvalds 	do {
17621da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
17631da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
17641da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
1765a64c8610STheodore Ts'o 			submit_bh(write_op, bh);
17661da177e4SLinus Torvalds 			nr_underway++;
1767ad576e63SNick Piggin 		}
17681da177e4SLinus Torvalds 		bh = next;
17691da177e4SLinus Torvalds 	} while (bh != head);
1770ffda9d30SNick Piggin 	unlock_page(page);
17711da177e4SLinus Torvalds 	goto done;
17721da177e4SLinus Torvalds }
17731da177e4SLinus Torvalds 
1774afddba49SNick Piggin /*
1775afddba49SNick Piggin  * If a page has any new buffers, zero them out here, and mark them uptodate
1776afddba49SNick Piggin  * and dirty so they'll be written out (in order to prevent uninitialised
1777afddba49SNick Piggin  * block data from leaking). And clear the new bit.
1778afddba49SNick Piggin  */
1779afddba49SNick Piggin void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1780afddba49SNick Piggin {
1781afddba49SNick Piggin 	unsigned int block_start, block_end;
1782afddba49SNick Piggin 	struct buffer_head *head, *bh;
1783afddba49SNick Piggin 
1784afddba49SNick Piggin 	BUG_ON(!PageLocked(page));
1785afddba49SNick Piggin 	if (!page_has_buffers(page))
1786afddba49SNick Piggin 		return;
1787afddba49SNick Piggin 
1788afddba49SNick Piggin 	bh = head = page_buffers(page);
1789afddba49SNick Piggin 	block_start = 0;
1790afddba49SNick Piggin 	do {
1791afddba49SNick Piggin 		block_end = block_start + bh->b_size;
1792afddba49SNick Piggin 
1793afddba49SNick Piggin 		if (buffer_new(bh)) {
1794afddba49SNick Piggin 			if (block_end > from && block_start < to) {
1795afddba49SNick Piggin 				if (!PageUptodate(page)) {
1796afddba49SNick Piggin 					unsigned start, size;
1797afddba49SNick Piggin 
1798afddba49SNick Piggin 					start = max(from, block_start);
1799afddba49SNick Piggin 					size = min(to, block_end) - start;
1800afddba49SNick Piggin 
1801eebd2aa3SChristoph Lameter 					zero_user(page, start, size);
1802afddba49SNick Piggin 					set_buffer_uptodate(bh);
1803afddba49SNick Piggin 				}
1804afddba49SNick Piggin 
1805afddba49SNick Piggin 				clear_buffer_new(bh);
1806afddba49SNick Piggin 				mark_buffer_dirty(bh);
1807afddba49SNick Piggin 			}
1808afddba49SNick Piggin 		}
1809afddba49SNick Piggin 
1810afddba49SNick Piggin 		block_start = block_end;
1811afddba49SNick Piggin 		bh = bh->b_this_page;
1812afddba49SNick Piggin 	} while (bh != head);
1813afddba49SNick Piggin }
1814afddba49SNick Piggin EXPORT_SYMBOL(page_zero_new_buffers);
1815afddba49SNick Piggin 
1816ebdec241SChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
18176e1db88dSChristoph Hellwig 		get_block_t *get_block)
18181da177e4SLinus Torvalds {
1819ebdec241SChristoph Hellwig 	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1820ebdec241SChristoph Hellwig 	unsigned to = from + len;
18216e1db88dSChristoph Hellwig 	struct inode *inode = page->mapping->host;
18221da177e4SLinus Torvalds 	unsigned block_start, block_end;
18231da177e4SLinus Torvalds 	sector_t block;
18241da177e4SLinus Torvalds 	int err = 0;
18251da177e4SLinus Torvalds 	unsigned blocksize, bbits;
18261da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
18271da177e4SLinus Torvalds 
18281da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
18291da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
18301da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
18311da177e4SLinus Torvalds 	BUG_ON(from > to);
18321da177e4SLinus Torvalds 
18331da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
18341da177e4SLinus Torvalds 	if (!page_has_buffers(page))
18351da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
18361da177e4SLinus Torvalds 	head = page_buffers(page);
18371da177e4SLinus Torvalds 
18381da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
18391da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
18401da177e4SLinus Torvalds 
18411da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
18421da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
18431da177e4SLinus Torvalds 		block_end = block_start + blocksize;
18441da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
18451da177e4SLinus Torvalds 			if (PageUptodate(page)) {
18461da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
18471da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
18481da177e4SLinus Torvalds 			}
18491da177e4SLinus Torvalds 			continue;
18501da177e4SLinus Torvalds 		}
18511da177e4SLinus Torvalds 		if (buffer_new(bh))
18521da177e4SLinus Torvalds 			clear_buffer_new(bh);
18531da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
1854b0cf2321SBadari Pulavarty 			WARN_ON(bh->b_size != blocksize);
18551da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
18561da177e4SLinus Torvalds 			if (err)
1857f3ddbdc6SNick Piggin 				break;
18581da177e4SLinus Torvalds 			if (buffer_new(bh)) {
18591da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
18601da177e4SLinus Torvalds 							bh->b_blocknr);
18611da177e4SLinus Torvalds 				if (PageUptodate(page)) {
1862637aff46SNick Piggin 					clear_buffer_new(bh);
18631da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
1864637aff46SNick Piggin 					mark_buffer_dirty(bh);
18651da177e4SLinus Torvalds 					continue;
18661da177e4SLinus Torvalds 				}
1867eebd2aa3SChristoph Lameter 				if (block_end > to || block_start < from)
1868eebd2aa3SChristoph Lameter 					zero_user_segments(page,
1869eebd2aa3SChristoph Lameter 						to, block_end,
1870eebd2aa3SChristoph Lameter 						block_start, from);
18711da177e4SLinus Torvalds 				continue;
18721da177e4SLinus Torvalds 			}
18731da177e4SLinus Torvalds 		}
18741da177e4SLinus Torvalds 		if (PageUptodate(page)) {
18751da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
18761da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
18771da177e4SLinus Torvalds 			continue;
18781da177e4SLinus Torvalds 		}
18791da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
188033a266ddSDavid Chinner 		    !buffer_unwritten(bh) &&
18811da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
18821da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
18831da177e4SLinus Torvalds 			*wait_bh++=bh;
18841da177e4SLinus Torvalds 		}
18851da177e4SLinus Torvalds 	}
18861da177e4SLinus Torvalds 	/*
18871da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
18881da177e4SLinus Torvalds 	 */
18891da177e4SLinus Torvalds 	while(wait_bh > wait) {
18901da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
18911da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1892f3ddbdc6SNick Piggin 			err = -EIO;
18931da177e4SLinus Torvalds 	}
18946e1db88dSChristoph Hellwig 	if (unlikely(err)) {
1895afddba49SNick Piggin 		page_zero_new_buffers(page, from, to);
18966e1db88dSChristoph Hellwig 		ClearPageUptodate(page);
18976e1db88dSChristoph Hellwig 	}
18981da177e4SLinus Torvalds 	return err;
18991da177e4SLinus Torvalds }
1900ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
19011da177e4SLinus Torvalds 
19021da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
19031da177e4SLinus Torvalds 		unsigned from, unsigned to)
19041da177e4SLinus Torvalds {
19051da177e4SLinus Torvalds 	unsigned block_start, block_end;
19061da177e4SLinus Torvalds 	int partial = 0;
19071da177e4SLinus Torvalds 	unsigned blocksize;
19081da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
19091da177e4SLinus Torvalds 
19101da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19111da177e4SLinus Torvalds 
19121da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
19131da177e4SLinus Torvalds 	    bh != head || !block_start;
19141da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
19151da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19161da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19171da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19181da177e4SLinus Torvalds 				partial = 1;
19191da177e4SLinus Torvalds 		} else {
19201da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
19211da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
19221da177e4SLinus Torvalds 		}
1923afddba49SNick Piggin 		clear_buffer_new(bh);
19241da177e4SLinus Torvalds 	}
19251da177e4SLinus Torvalds 
19261da177e4SLinus Torvalds 	/*
19271da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
19281da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
19291da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
19301da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
19311da177e4SLinus Torvalds 	 */
19321da177e4SLinus Torvalds 	if (!partial)
19331da177e4SLinus Torvalds 		SetPageUptodate(page);
19341da177e4SLinus Torvalds 	return 0;
19351da177e4SLinus Torvalds }
19361da177e4SLinus Torvalds 
19371da177e4SLinus Torvalds /*
1938155130a4SChristoph Hellwig  * block_write_begin takes care of the basic task of block allocation and
1939155130a4SChristoph Hellwig  * bringing partial write blocks uptodate first.
1940155130a4SChristoph Hellwig  *
19417bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
1942afddba49SNick Piggin  */
1943155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1944155130a4SChristoph Hellwig 		unsigned flags, struct page **pagep, get_block_t *get_block)
1945afddba49SNick Piggin {
19466e1db88dSChristoph Hellwig 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1947afddba49SNick Piggin 	struct page *page;
19486e1db88dSChristoph Hellwig 	int status;
1949afddba49SNick Piggin 
195054566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
19516e1db88dSChristoph Hellwig 	if (!page)
19526e1db88dSChristoph Hellwig 		return -ENOMEM;
1953afddba49SNick Piggin 
19546e1db88dSChristoph Hellwig 	status = __block_write_begin(page, pos, len, get_block);
1955afddba49SNick Piggin 	if (unlikely(status)) {
1956afddba49SNick Piggin 		unlock_page(page);
1957afddba49SNick Piggin 		page_cache_release(page);
19586e1db88dSChristoph Hellwig 		page = NULL;
1959afddba49SNick Piggin 	}
1960afddba49SNick Piggin 
19616e1db88dSChristoph Hellwig 	*pagep = page;
1962afddba49SNick Piggin 	return status;
1963afddba49SNick Piggin }
1964afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
1965afddba49SNick Piggin 
1966afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
1967afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
1968afddba49SNick Piggin 			struct page *page, void *fsdata)
1969afddba49SNick Piggin {
1970afddba49SNick Piggin 	struct inode *inode = mapping->host;
1971afddba49SNick Piggin 	unsigned start;
1972afddba49SNick Piggin 
1973afddba49SNick Piggin 	start = pos & (PAGE_CACHE_SIZE - 1);
1974afddba49SNick Piggin 
1975afddba49SNick Piggin 	if (unlikely(copied < len)) {
1976afddba49SNick Piggin 		/*
1977afddba49SNick Piggin 		 * The buffers that were written will now be uptodate, so we
1978afddba49SNick Piggin 		 * don't have to worry about a readpage reading them and
1979afddba49SNick Piggin 		 * overwriting a partial write. However if we have encountered
1980afddba49SNick Piggin 		 * a short write and only partially written into a buffer, it
1981afddba49SNick Piggin 		 * will not be marked uptodate, so a readpage might come in and
1982afddba49SNick Piggin 		 * destroy our partial write.
1983afddba49SNick Piggin 		 *
1984afddba49SNick Piggin 		 * Do the simplest thing, and just treat any short write to a
1985afddba49SNick Piggin 		 * non uptodate page as a zero-length write, and force the
1986afddba49SNick Piggin 		 * caller to redo the whole thing.
1987afddba49SNick Piggin 		 */
1988afddba49SNick Piggin 		if (!PageUptodate(page))
1989afddba49SNick Piggin 			copied = 0;
1990afddba49SNick Piggin 
1991afddba49SNick Piggin 		page_zero_new_buffers(page, start+copied, start+len);
1992afddba49SNick Piggin 	}
1993afddba49SNick Piggin 	flush_dcache_page(page);
1994afddba49SNick Piggin 
1995afddba49SNick Piggin 	/* This could be a short (even 0-length) commit */
1996afddba49SNick Piggin 	__block_commit_write(inode, page, start, start+copied);
1997afddba49SNick Piggin 
1998afddba49SNick Piggin 	return copied;
1999afddba49SNick Piggin }
2000afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2001afddba49SNick Piggin 
2002afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2003afddba49SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
2004afddba49SNick Piggin 			struct page *page, void *fsdata)
2005afddba49SNick Piggin {
2006afddba49SNick Piggin 	struct inode *inode = mapping->host;
2007c7d206b3SJan Kara 	int i_size_changed = 0;
2008afddba49SNick Piggin 
2009afddba49SNick Piggin 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2010afddba49SNick Piggin 
2011afddba49SNick Piggin 	/*
2012afddba49SNick Piggin 	 * No need to use i_size_read() here, the i_size
2013afddba49SNick Piggin 	 * cannot change under us because we hold i_mutex.
2014afddba49SNick Piggin 	 *
2015afddba49SNick Piggin 	 * But it's important to update i_size while still holding page lock:
2016afddba49SNick Piggin 	 * page writeout could otherwise come in and zero beyond i_size.
2017afddba49SNick Piggin 	 */
2018afddba49SNick Piggin 	if (pos+copied > inode->i_size) {
2019afddba49SNick Piggin 		i_size_write(inode, pos+copied);
2020c7d206b3SJan Kara 		i_size_changed = 1;
2021afddba49SNick Piggin 	}
2022afddba49SNick Piggin 
2023afddba49SNick Piggin 	unlock_page(page);
2024afddba49SNick Piggin 	page_cache_release(page);
2025afddba49SNick Piggin 
2026c7d206b3SJan Kara 	/*
2027c7d206b3SJan Kara 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2028c7d206b3SJan Kara 	 * makes the holding time of page lock longer. Second, it forces lock
2029c7d206b3SJan Kara 	 * ordering of page lock and transaction start for journaling
2030c7d206b3SJan Kara 	 * filesystems.
2031c7d206b3SJan Kara 	 */
2032c7d206b3SJan Kara 	if (i_size_changed)
2033c7d206b3SJan Kara 		mark_inode_dirty(inode);
2034c7d206b3SJan Kara 
2035afddba49SNick Piggin 	return copied;
2036afddba49SNick Piggin }
2037afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2038afddba49SNick Piggin 
2039afddba49SNick Piggin /*
20408ab22b9aSHisashi Hifumi  * block_is_partially_uptodate checks whether buffers within a page are
20418ab22b9aSHisashi Hifumi  * uptodate or not.
20428ab22b9aSHisashi Hifumi  *
20438ab22b9aSHisashi Hifumi  * Returns true if all buffers which correspond to a file portion
20448ab22b9aSHisashi Hifumi  * we want to read are uptodate.
20458ab22b9aSHisashi Hifumi  */
20468ab22b9aSHisashi Hifumi int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
20478ab22b9aSHisashi Hifumi 					unsigned long from)
20488ab22b9aSHisashi Hifumi {
20498ab22b9aSHisashi Hifumi 	struct inode *inode = page->mapping->host;
20508ab22b9aSHisashi Hifumi 	unsigned block_start, block_end, blocksize;
20518ab22b9aSHisashi Hifumi 	unsigned to;
20528ab22b9aSHisashi Hifumi 	struct buffer_head *bh, *head;
20538ab22b9aSHisashi Hifumi 	int ret = 1;
20548ab22b9aSHisashi Hifumi 
20558ab22b9aSHisashi Hifumi 	if (!page_has_buffers(page))
20568ab22b9aSHisashi Hifumi 		return 0;
20578ab22b9aSHisashi Hifumi 
20588ab22b9aSHisashi Hifumi 	blocksize = 1 << inode->i_blkbits;
20598ab22b9aSHisashi Hifumi 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
20608ab22b9aSHisashi Hifumi 	to = from + to;
20618ab22b9aSHisashi Hifumi 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
20628ab22b9aSHisashi Hifumi 		return 0;
20638ab22b9aSHisashi Hifumi 
20648ab22b9aSHisashi Hifumi 	head = page_buffers(page);
20658ab22b9aSHisashi Hifumi 	bh = head;
20668ab22b9aSHisashi Hifumi 	block_start = 0;
20678ab22b9aSHisashi Hifumi 	do {
20688ab22b9aSHisashi Hifumi 		block_end = block_start + blocksize;
20698ab22b9aSHisashi Hifumi 		if (block_end > from && block_start < to) {
20708ab22b9aSHisashi Hifumi 			if (!buffer_uptodate(bh)) {
20718ab22b9aSHisashi Hifumi 				ret = 0;
20728ab22b9aSHisashi Hifumi 				break;
20738ab22b9aSHisashi Hifumi 			}
20748ab22b9aSHisashi Hifumi 			if (block_end >= to)
20758ab22b9aSHisashi Hifumi 				break;
20768ab22b9aSHisashi Hifumi 		}
20778ab22b9aSHisashi Hifumi 		block_start = block_end;
20788ab22b9aSHisashi Hifumi 		bh = bh->b_this_page;
20798ab22b9aSHisashi Hifumi 	} while (bh != head);
20808ab22b9aSHisashi Hifumi 
20818ab22b9aSHisashi Hifumi 	return ret;
20828ab22b9aSHisashi Hifumi }
20838ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
20848ab22b9aSHisashi Hifumi 
20858ab22b9aSHisashi Hifumi /*
20861da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
20871da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
20881da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
20891da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
20901da177e4SLinus Torvalds  * page struct once IO has completed.
20911da177e4SLinus Torvalds  */
20921da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
20931da177e4SLinus Torvalds {
20941da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
20951da177e4SLinus Torvalds 	sector_t iblock, lblock;
20961da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
20971da177e4SLinus Torvalds 	unsigned int blocksize;
20981da177e4SLinus Torvalds 	int nr, i;
20991da177e4SLinus Torvalds 	int fully_mapped = 1;
21001da177e4SLinus Torvalds 
2101cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
21021da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
21031da177e4SLinus Torvalds 	if (!page_has_buffers(page))
21041da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
21051da177e4SLinus Torvalds 	head = page_buffers(page);
21061da177e4SLinus Torvalds 
21071da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
21081da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
21091da177e4SLinus Torvalds 	bh = head;
21101da177e4SLinus Torvalds 	nr = 0;
21111da177e4SLinus Torvalds 	i = 0;
21121da177e4SLinus Torvalds 
21131da177e4SLinus Torvalds 	do {
21141da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21151da177e4SLinus Torvalds 			continue;
21161da177e4SLinus Torvalds 
21171da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2118c64610baSAndrew Morton 			int err = 0;
2119c64610baSAndrew Morton 
21201da177e4SLinus Torvalds 			fully_mapped = 0;
21211da177e4SLinus Torvalds 			if (iblock < lblock) {
2122b0cf2321SBadari Pulavarty 				WARN_ON(bh->b_size != blocksize);
2123c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2124c64610baSAndrew Morton 				if (err)
21251da177e4SLinus Torvalds 					SetPageError(page);
21261da177e4SLinus Torvalds 			}
21271da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
2128eebd2aa3SChristoph Lameter 				zero_user(page, i * blocksize, blocksize);
2129c64610baSAndrew Morton 				if (!err)
21301da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21311da177e4SLinus Torvalds 				continue;
21321da177e4SLinus Torvalds 			}
21331da177e4SLinus Torvalds 			/*
21341da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
21351da177e4SLinus Torvalds 			 * synchronously
21361da177e4SLinus Torvalds 			 */
21371da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
21381da177e4SLinus Torvalds 				continue;
21391da177e4SLinus Torvalds 		}
21401da177e4SLinus Torvalds 		arr[nr++] = bh;
21411da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
21421da177e4SLinus Torvalds 
21431da177e4SLinus Torvalds 	if (fully_mapped)
21441da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds 	if (!nr) {
21471da177e4SLinus Torvalds 		/*
21481da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
21491da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
21501da177e4SLinus Torvalds 		 */
21511da177e4SLinus Torvalds 		if (!PageError(page))
21521da177e4SLinus Torvalds 			SetPageUptodate(page);
21531da177e4SLinus Torvalds 		unlock_page(page);
21541da177e4SLinus Torvalds 		return 0;
21551da177e4SLinus Torvalds 	}
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
21581da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21591da177e4SLinus Torvalds 		bh = arr[i];
21601da177e4SLinus Torvalds 		lock_buffer(bh);
21611da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
21621da177e4SLinus Torvalds 	}
21631da177e4SLinus Torvalds 
21641da177e4SLinus Torvalds 	/*
21651da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
21661da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
21671da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
21681da177e4SLinus Torvalds 	 */
21691da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21701da177e4SLinus Torvalds 		bh = arr[i];
21711da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21721da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
21731da177e4SLinus Torvalds 		else
21741da177e4SLinus Torvalds 			submit_bh(READ, bh);
21751da177e4SLinus Torvalds 	}
21761da177e4SLinus Torvalds 	return 0;
21771da177e4SLinus Torvalds }
21781fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_read_full_page);
21791da177e4SLinus Torvalds 
21801da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
218189e10787SNick Piggin  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
21821da177e4SLinus Torvalds  * deal with the hole.
21831da177e4SLinus Torvalds  */
218489e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
21851da177e4SLinus Torvalds {
21861da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
21871da177e4SLinus Torvalds 	struct page *page;
218889e10787SNick Piggin 	void *fsdata;
21891da177e4SLinus Torvalds 	int err;
21901da177e4SLinus Torvalds 
2191c08d3b0eSnpiggin@suse.de 	err = inode_newsize_ok(inode, size);
2192c08d3b0eSnpiggin@suse.de 	if (err)
21931da177e4SLinus Torvalds 		goto out;
21941da177e4SLinus Torvalds 
219589e10787SNick Piggin 	err = pagecache_write_begin(NULL, mapping, size, 0,
219689e10787SNick Piggin 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
219789e10787SNick Piggin 				&page, &fsdata);
219889e10787SNick Piggin 	if (err)
219905eb0b51SOGAWA Hirofumi 		goto out;
220005eb0b51SOGAWA Hirofumi 
220189e10787SNick Piggin 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
220289e10787SNick Piggin 	BUG_ON(err > 0);
220305eb0b51SOGAWA Hirofumi 
220405eb0b51SOGAWA Hirofumi out:
220505eb0b51SOGAWA Hirofumi 	return err;
220605eb0b51SOGAWA Hirofumi }
22071fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
220805eb0b51SOGAWA Hirofumi 
2209f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
221089e10787SNick Piggin 			    loff_t pos, loff_t *bytes)
221105eb0b51SOGAWA Hirofumi {
221289e10787SNick Piggin 	struct inode *inode = mapping->host;
221389e10787SNick Piggin 	unsigned blocksize = 1 << inode->i_blkbits;
221489e10787SNick Piggin 	struct page *page;
221589e10787SNick Piggin 	void *fsdata;
221689e10787SNick Piggin 	pgoff_t index, curidx;
221789e10787SNick Piggin 	loff_t curpos;
221889e10787SNick Piggin 	unsigned zerofrom, offset, len;
221989e10787SNick Piggin 	int err = 0;
222005eb0b51SOGAWA Hirofumi 
222189e10787SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
222289e10787SNick Piggin 	offset = pos & ~PAGE_CACHE_MASK;
222389e10787SNick Piggin 
222489e10787SNick Piggin 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
222589e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
222689e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
222789e10787SNick Piggin 			*bytes |= (blocksize-1);
222889e10787SNick Piggin 			(*bytes)++;
222989e10787SNick Piggin 		}
223089e10787SNick Piggin 		len = PAGE_CACHE_SIZE - zerofrom;
223189e10787SNick Piggin 
223289e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
223389e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
223489e10787SNick Piggin 						&page, &fsdata);
223589e10787SNick Piggin 		if (err)
223689e10787SNick Piggin 			goto out;
2237eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
223889e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
223989e10787SNick Piggin 						page, fsdata);
224089e10787SNick Piggin 		if (err < 0)
224189e10787SNick Piggin 			goto out;
224289e10787SNick Piggin 		BUG_ON(err != len);
224389e10787SNick Piggin 		err = 0;
2244061e9746SOGAWA Hirofumi 
2245061e9746SOGAWA Hirofumi 		balance_dirty_pages_ratelimited(mapping);
224689e10787SNick Piggin 	}
224789e10787SNick Piggin 
224889e10787SNick Piggin 	/* page covers the boundary, find the boundary offset */
224989e10787SNick Piggin 	if (index == curidx) {
225089e10787SNick Piggin 		zerofrom = curpos & ~PAGE_CACHE_MASK;
225189e10787SNick Piggin 		/* if we will expand the thing last block will be filled */
225289e10787SNick Piggin 		if (offset <= zerofrom) {
225389e10787SNick Piggin 			goto out;
225489e10787SNick Piggin 		}
225589e10787SNick Piggin 		if (zerofrom & (blocksize-1)) {
225689e10787SNick Piggin 			*bytes |= (blocksize-1);
225789e10787SNick Piggin 			(*bytes)++;
225889e10787SNick Piggin 		}
225989e10787SNick Piggin 		len = offset - zerofrom;
226089e10787SNick Piggin 
226189e10787SNick Piggin 		err = pagecache_write_begin(file, mapping, curpos, len,
226289e10787SNick Piggin 						AOP_FLAG_UNINTERRUPTIBLE,
226389e10787SNick Piggin 						&page, &fsdata);
226489e10787SNick Piggin 		if (err)
226589e10787SNick Piggin 			goto out;
2266eebd2aa3SChristoph Lameter 		zero_user(page, zerofrom, len);
226789e10787SNick Piggin 		err = pagecache_write_end(file, mapping, curpos, len, len,
226889e10787SNick Piggin 						page, fsdata);
226989e10787SNick Piggin 		if (err < 0)
227089e10787SNick Piggin 			goto out;
227189e10787SNick Piggin 		BUG_ON(err != len);
227289e10787SNick Piggin 		err = 0;
227389e10787SNick Piggin 	}
227489e10787SNick Piggin out:
227589e10787SNick Piggin 	return err;
22761da177e4SLinus Torvalds }
22771da177e4SLinus Torvalds 
22781da177e4SLinus Torvalds /*
22791da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
22801da177e4SLinus Torvalds  * We may have to extend the file.
22811da177e4SLinus Torvalds  */
2282282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
228389e10787SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
228489e10787SNick Piggin 			struct page **pagep, void **fsdata,
228589e10787SNick Piggin 			get_block_t *get_block, loff_t *bytes)
22861da177e4SLinus Torvalds {
22871da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
22881da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
228989e10787SNick Piggin 	unsigned zerofrom;
229089e10787SNick Piggin 	int err;
22911da177e4SLinus Torvalds 
229289e10787SNick Piggin 	err = cont_expand_zero(file, mapping, pos, bytes);
229389e10787SNick Piggin 	if (err)
2294155130a4SChristoph Hellwig 		return err;
22951da177e4SLinus Torvalds 
22961da177e4SLinus Torvalds 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
229789e10787SNick Piggin 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
22981da177e4SLinus Torvalds 		*bytes |= (blocksize-1);
22991da177e4SLinus Torvalds 		(*bytes)++;
23001da177e4SLinus Torvalds 	}
23011da177e4SLinus Torvalds 
2302155130a4SChristoph Hellwig 	return block_write_begin(mapping, pos, len, flags, pagep, get_block);
23031da177e4SLinus Torvalds }
23041fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
23051da177e4SLinus Torvalds 
23061da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
23071da177e4SLinus Torvalds {
23081da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23091da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
23101da177e4SLinus Torvalds 	return 0;
23111da177e4SLinus Torvalds }
23121fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
23131da177e4SLinus Torvalds 
231454171690SDavid Chinner /*
231554171690SDavid Chinner  * block_page_mkwrite() is not allowed to change the file size as it gets
231654171690SDavid Chinner  * called from a page fault handler when a page is first dirtied. Hence we must
231754171690SDavid Chinner  * be careful to check for EOF conditions here. We set the page up correctly
231854171690SDavid Chinner  * for a written page which means we get ENOSPC checking when writing into
231954171690SDavid Chinner  * holes and correct delalloc and unwritten extent mapping on filesystems that
232054171690SDavid Chinner  * support these features.
232154171690SDavid Chinner  *
232254171690SDavid Chinner  * We are not allowed to take the i_mutex here so we have to play games to
232354171690SDavid Chinner  * protect against truncate races as the page could now be beyond EOF.  Because
23247bb46a67Snpiggin@suse.de  * truncate writes the inode size before removing pages, once we have the
232554171690SDavid Chinner  * page lock we can determine safely if the page is beyond EOF. If it is not
232654171690SDavid Chinner  * beyond EOF, then the page is guaranteed safe against truncation until we
232754171690SDavid Chinner  * unlock the page.
232854171690SDavid Chinner  */
232954171690SDavid Chinner int
2330c2ec175cSNick Piggin block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
233154171690SDavid Chinner 		   get_block_t get_block)
233254171690SDavid Chinner {
2333c2ec175cSNick Piggin 	struct page *page = vmf->page;
233454171690SDavid Chinner 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
233554171690SDavid Chinner 	unsigned long end;
233654171690SDavid Chinner 	loff_t size;
233756a76f82SNick Piggin 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
233854171690SDavid Chinner 
233954171690SDavid Chinner 	lock_page(page);
234054171690SDavid Chinner 	size = i_size_read(inode);
234154171690SDavid Chinner 	if ((page->mapping != inode->i_mapping) ||
234218336338SNick Piggin 	    (page_offset(page) > size)) {
234354171690SDavid Chinner 		/* page got truncated out from underneath us */
2344b827e496SNick Piggin 		unlock_page(page);
2345b827e496SNick Piggin 		goto out;
234654171690SDavid Chinner 	}
234754171690SDavid Chinner 
234854171690SDavid Chinner 	/* page is wholly or partially inside EOF */
234954171690SDavid Chinner 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
235054171690SDavid Chinner 		end = size & ~PAGE_CACHE_MASK;
235154171690SDavid Chinner 	else
235254171690SDavid Chinner 		end = PAGE_CACHE_SIZE;
235354171690SDavid Chinner 
2354ebdec241SChristoph Hellwig 	ret = __block_write_begin(page, 0, end, get_block);
235554171690SDavid Chinner 	if (!ret)
235654171690SDavid Chinner 		ret = block_commit_write(page, 0, end);
235754171690SDavid Chinner 
235856a76f82SNick Piggin 	if (unlikely(ret)) {
2359b827e496SNick Piggin 		unlock_page(page);
236056a76f82SNick Piggin 		if (ret == -ENOMEM)
236156a76f82SNick Piggin 			ret = VM_FAULT_OOM;
236256a76f82SNick Piggin 		else /* -ENOSPC, -EIO, etc */
2363c2ec175cSNick Piggin 			ret = VM_FAULT_SIGBUS;
2364b827e496SNick Piggin 	} else
2365b827e496SNick Piggin 		ret = VM_FAULT_LOCKED;
2366c2ec175cSNick Piggin 
2367b827e496SNick Piggin out:
236854171690SDavid Chinner 	return ret;
236954171690SDavid Chinner }
23701fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
23711da177e4SLinus Torvalds 
23721da177e4SLinus Torvalds /*
237303158cd7SNick Piggin  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
23741da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
23751da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
23761da177e4SLinus Torvalds  */
23771da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
23781da177e4SLinus Torvalds {
237968671f35SDmitry Monakhov 	__end_buffer_read_notouch(bh, uptodate);
23801da177e4SLinus Torvalds }
23811da177e4SLinus Torvalds 
23821da177e4SLinus Torvalds /*
238303158cd7SNick Piggin  * Attach the singly-linked list of buffers created by nobh_write_begin, to
238403158cd7SNick Piggin  * the page (converting it to circular linked list and taking care of page
238503158cd7SNick Piggin  * dirty races).
238603158cd7SNick Piggin  */
238703158cd7SNick Piggin static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
238803158cd7SNick Piggin {
238903158cd7SNick Piggin 	struct buffer_head *bh;
239003158cd7SNick Piggin 
239103158cd7SNick Piggin 	BUG_ON(!PageLocked(page));
239203158cd7SNick Piggin 
239303158cd7SNick Piggin 	spin_lock(&page->mapping->private_lock);
239403158cd7SNick Piggin 	bh = head;
239503158cd7SNick Piggin 	do {
239603158cd7SNick Piggin 		if (PageDirty(page))
239703158cd7SNick Piggin 			set_buffer_dirty(bh);
239803158cd7SNick Piggin 		if (!bh->b_this_page)
239903158cd7SNick Piggin 			bh->b_this_page = head;
240003158cd7SNick Piggin 		bh = bh->b_this_page;
240103158cd7SNick Piggin 	} while (bh != head);
240203158cd7SNick Piggin 	attach_page_buffers(page, head);
240303158cd7SNick Piggin 	spin_unlock(&page->mapping->private_lock);
240403158cd7SNick Piggin }
240503158cd7SNick Piggin 
240603158cd7SNick Piggin /*
2407ea0f04e5SChristoph Hellwig  * On entry, the page is fully not uptodate.
2408ea0f04e5SChristoph Hellwig  * On exit the page is fully uptodate in the areas outside (from,to)
24097bb46a67Snpiggin@suse.de  * The filesystem needs to handle block truncation upon failure.
24101da177e4SLinus Torvalds  */
2411ea0f04e5SChristoph Hellwig int nobh_write_begin(struct address_space *mapping,
241203158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
241303158cd7SNick Piggin 			struct page **pagep, void **fsdata,
24141da177e4SLinus Torvalds 			get_block_t *get_block)
24151da177e4SLinus Torvalds {
241603158cd7SNick Piggin 	struct inode *inode = mapping->host;
24171da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
24181da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
2419a4b0672dSNick Piggin 	struct buffer_head *head, *bh;
242003158cd7SNick Piggin 	struct page *page;
242103158cd7SNick Piggin 	pgoff_t index;
242203158cd7SNick Piggin 	unsigned from, to;
24231da177e4SLinus Torvalds 	unsigned block_in_page;
2424a4b0672dSNick Piggin 	unsigned block_start, block_end;
24251da177e4SLinus Torvalds 	sector_t block_in_file;
24261da177e4SLinus Torvalds 	int nr_reads = 0;
24271da177e4SLinus Torvalds 	int ret = 0;
24281da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
24291da177e4SLinus Torvalds 
243003158cd7SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
243103158cd7SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
243203158cd7SNick Piggin 	to = from + len;
243303158cd7SNick Piggin 
243454566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
243503158cd7SNick Piggin 	if (!page)
243603158cd7SNick Piggin 		return -ENOMEM;
243703158cd7SNick Piggin 	*pagep = page;
243803158cd7SNick Piggin 	*fsdata = NULL;
243903158cd7SNick Piggin 
244003158cd7SNick Piggin 	if (page_has_buffers(page)) {
2441309f77adSNamhyung Kim 		ret = __block_write_begin(page, pos, len, get_block);
2442309f77adSNamhyung Kim 		if (unlikely(ret))
2443309f77adSNamhyung Kim 			goto out_release;
2444309f77adSNamhyung Kim 		return ret;
244503158cd7SNick Piggin 	}
2446a4b0672dSNick Piggin 
24471da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
24481da177e4SLinus Torvalds 		return 0;
24491da177e4SLinus Torvalds 
2450a4b0672dSNick Piggin 	/*
2451a4b0672dSNick Piggin 	 * Allocate buffers so that we can keep track of state, and potentially
2452a4b0672dSNick Piggin 	 * attach them to the page if an error occurs. In the common case of
2453a4b0672dSNick Piggin 	 * no error, they will just be freed again without ever being attached
2454a4b0672dSNick Piggin 	 * to the page (which is all OK, because we're under the page lock).
2455a4b0672dSNick Piggin 	 *
2456a4b0672dSNick Piggin 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2457a4b0672dSNick Piggin 	 * than the circular one we're used to.
2458a4b0672dSNick Piggin 	 */
2459a4b0672dSNick Piggin 	head = alloc_page_buffers(page, blocksize, 0);
246003158cd7SNick Piggin 	if (!head) {
246103158cd7SNick Piggin 		ret = -ENOMEM;
246203158cd7SNick Piggin 		goto out_release;
246303158cd7SNick Piggin 	}
2464a4b0672dSNick Piggin 
24651da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
24661da177e4SLinus Torvalds 
24671da177e4SLinus Torvalds 	/*
24681da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
24691da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
24701da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
24711da177e4SLinus Torvalds 	 */
2472a4b0672dSNick Piggin 	for (block_start = 0, block_in_page = 0, bh = head;
24731da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
2474a4b0672dSNick Piggin 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
24751da177e4SLinus Torvalds 		int create;
24761da177e4SLinus Torvalds 
2477a4b0672dSNick Piggin 		block_end = block_start + blocksize;
2478a4b0672dSNick Piggin 		bh->b_state = 0;
24791da177e4SLinus Torvalds 		create = 1;
24801da177e4SLinus Torvalds 		if (block_start >= to)
24811da177e4SLinus Torvalds 			create = 0;
24821da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
2483a4b0672dSNick Piggin 					bh, create);
24841da177e4SLinus Torvalds 		if (ret)
24851da177e4SLinus Torvalds 			goto failed;
2486a4b0672dSNick Piggin 		if (!buffer_mapped(bh))
24871da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
2488a4b0672dSNick Piggin 		if (buffer_new(bh))
2489a4b0672dSNick Piggin 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2490a4b0672dSNick Piggin 		if (PageUptodate(page)) {
2491a4b0672dSNick Piggin 			set_buffer_uptodate(bh);
24921da177e4SLinus Torvalds 			continue;
2493a4b0672dSNick Piggin 		}
2494a4b0672dSNick Piggin 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2495eebd2aa3SChristoph Lameter 			zero_user_segments(page, block_start, from,
2496eebd2aa3SChristoph Lameter 							to, block_end);
24971da177e4SLinus Torvalds 			continue;
24981da177e4SLinus Torvalds 		}
2499a4b0672dSNick Piggin 		if (buffer_uptodate(bh))
25001da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
25011da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
2502a4b0672dSNick Piggin 			lock_buffer(bh);
2503a4b0672dSNick Piggin 			bh->b_end_io = end_buffer_read_nobh;
2504a4b0672dSNick Piggin 			submit_bh(READ, bh);
2505a4b0672dSNick Piggin 			nr_reads++;
25061da177e4SLinus Torvalds 		}
25071da177e4SLinus Torvalds 	}
25081da177e4SLinus Torvalds 
25091da177e4SLinus Torvalds 	if (nr_reads) {
25101da177e4SLinus Torvalds 		/*
25111da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
25121da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
25131da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
25141da177e4SLinus Torvalds 		 */
2515a4b0672dSNick Piggin 		for (bh = head; bh; bh = bh->b_this_page) {
25161da177e4SLinus Torvalds 			wait_on_buffer(bh);
25171da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
25181da177e4SLinus Torvalds 				ret = -EIO;
25191da177e4SLinus Torvalds 		}
25201da177e4SLinus Torvalds 		if (ret)
25211da177e4SLinus Torvalds 			goto failed;
25221da177e4SLinus Torvalds 	}
25231da177e4SLinus Torvalds 
25241da177e4SLinus Torvalds 	if (is_mapped_to_disk)
25251da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
25261da177e4SLinus Torvalds 
252703158cd7SNick Piggin 	*fsdata = head; /* to be released by nobh_write_end */
2528a4b0672dSNick Piggin 
25291da177e4SLinus Torvalds 	return 0;
25301da177e4SLinus Torvalds 
25311da177e4SLinus Torvalds failed:
253203158cd7SNick Piggin 	BUG_ON(!ret);
25331da177e4SLinus Torvalds 	/*
2534a4b0672dSNick Piggin 	 * Error recovery is a bit difficult. We need to zero out blocks that
2535a4b0672dSNick Piggin 	 * were newly allocated, and dirty them to ensure they get written out.
2536a4b0672dSNick Piggin 	 * Buffers need to be attached to the page at this point, otherwise
2537a4b0672dSNick Piggin 	 * the handling of potential IO errors during writeout would be hard
2538a4b0672dSNick Piggin 	 * (could try doing synchronous writeout, but what if that fails too?)
25391da177e4SLinus Torvalds 	 */
254003158cd7SNick Piggin 	attach_nobh_buffers(page, head);
254103158cd7SNick Piggin 	page_zero_new_buffers(page, from, to);
2542a4b0672dSNick Piggin 
254303158cd7SNick Piggin out_release:
254403158cd7SNick Piggin 	unlock_page(page);
254503158cd7SNick Piggin 	page_cache_release(page);
254603158cd7SNick Piggin 	*pagep = NULL;
2547a4b0672dSNick Piggin 
25487bb46a67Snpiggin@suse.de 	return ret;
25497bb46a67Snpiggin@suse.de }
255003158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_begin);
25511da177e4SLinus Torvalds 
255203158cd7SNick Piggin int nobh_write_end(struct file *file, struct address_space *mapping,
255303158cd7SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
255403158cd7SNick Piggin 			struct page *page, void *fsdata)
25551da177e4SLinus Torvalds {
25561da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
2557efdc3131SNick Piggin 	struct buffer_head *head = fsdata;
255803158cd7SNick Piggin 	struct buffer_head *bh;
25595b41e74aSDmitri Monakhov 	BUG_ON(fsdata != NULL && page_has_buffers(page));
25601da177e4SLinus Torvalds 
2561d4cf109fSDave Kleikamp 	if (unlikely(copied < len) && head)
256203158cd7SNick Piggin 		attach_nobh_buffers(page, head);
2563a4b0672dSNick Piggin 	if (page_has_buffers(page))
256403158cd7SNick Piggin 		return generic_write_end(file, mapping, pos, len,
256503158cd7SNick Piggin 					copied, page, fsdata);
2566a4b0672dSNick Piggin 
256722c8ca78SNick Piggin 	SetPageUptodate(page);
25681da177e4SLinus Torvalds 	set_page_dirty(page);
256903158cd7SNick Piggin 	if (pos+copied > inode->i_size) {
257003158cd7SNick Piggin 		i_size_write(inode, pos+copied);
25711da177e4SLinus Torvalds 		mark_inode_dirty(inode);
25721da177e4SLinus Torvalds 	}
257303158cd7SNick Piggin 
257403158cd7SNick Piggin 	unlock_page(page);
257503158cd7SNick Piggin 	page_cache_release(page);
257603158cd7SNick Piggin 
257703158cd7SNick Piggin 	while (head) {
257803158cd7SNick Piggin 		bh = head;
257903158cd7SNick Piggin 		head = head->b_this_page;
258003158cd7SNick Piggin 		free_buffer_head(bh);
25811da177e4SLinus Torvalds 	}
258203158cd7SNick Piggin 
258303158cd7SNick Piggin 	return copied;
258403158cd7SNick Piggin }
258503158cd7SNick Piggin EXPORT_SYMBOL(nobh_write_end);
25861da177e4SLinus Torvalds 
25871da177e4SLinus Torvalds /*
25881da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
25891da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
25901da177e4SLinus Torvalds  * the page.
25911da177e4SLinus Torvalds  */
25921da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
25931da177e4SLinus Torvalds 			struct writeback_control *wbc)
25941da177e4SLinus Torvalds {
25951da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
25961da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
25971da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
25981da177e4SLinus Torvalds 	unsigned offset;
25991da177e4SLinus Torvalds 	int ret;
26001da177e4SLinus Torvalds 
26011da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
26021da177e4SLinus Torvalds 	if (page->index < end_index)
26031da177e4SLinus Torvalds 		goto out;
26041da177e4SLinus Torvalds 
26051da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
26061da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
26071da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
26081da177e4SLinus Torvalds 		/*
26091da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
26101da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
26111da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
26121da177e4SLinus Torvalds 		 */
26131da177e4SLinus Torvalds #if 0
26141da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
26151da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
26161da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
26171da177e4SLinus Torvalds #endif
26181da177e4SLinus Torvalds 		unlock_page(page);
26191da177e4SLinus Torvalds 		return 0; /* don't care */
26201da177e4SLinus Torvalds 	}
26211da177e4SLinus Torvalds 
26221da177e4SLinus Torvalds 	/*
26231da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
26241da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
26251da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
26261da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
26271da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
26281da177e4SLinus Torvalds 	 */
2629eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
26301da177e4SLinus Torvalds out:
26311da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
26321da177e4SLinus Torvalds 	if (ret == -EAGAIN)
263335c80d5fSChris Mason 		ret = __block_write_full_page(inode, page, get_block, wbc,
263435c80d5fSChris Mason 					      end_buffer_async_write);
26351da177e4SLinus Torvalds 	return ret;
26361da177e4SLinus Torvalds }
26371da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
26381da177e4SLinus Torvalds 
263903158cd7SNick Piggin int nobh_truncate_page(struct address_space *mapping,
264003158cd7SNick Piggin 			loff_t from, get_block_t *get_block)
26411da177e4SLinus Torvalds {
26421da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
26431da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
264403158cd7SNick Piggin 	unsigned blocksize;
264503158cd7SNick Piggin 	sector_t iblock;
264603158cd7SNick Piggin 	unsigned length, pos;
264703158cd7SNick Piggin 	struct inode *inode = mapping->host;
26481da177e4SLinus Torvalds 	struct page *page;
264903158cd7SNick Piggin 	struct buffer_head map_bh;
265003158cd7SNick Piggin 	int err;
26511da177e4SLinus Torvalds 
265203158cd7SNick Piggin 	blocksize = 1 << inode->i_blkbits;
265303158cd7SNick Piggin 	length = offset & (blocksize - 1);
26541da177e4SLinus Torvalds 
265503158cd7SNick Piggin 	/* Block boundary? Nothing to do */
265603158cd7SNick Piggin 	if (!length)
265703158cd7SNick Piggin 		return 0;
265803158cd7SNick Piggin 
265903158cd7SNick Piggin 	length = blocksize - length;
266003158cd7SNick Piggin 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
266103158cd7SNick Piggin 
26621da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
266303158cd7SNick Piggin 	err = -ENOMEM;
26641da177e4SLinus Torvalds 	if (!page)
26651da177e4SLinus Torvalds 		goto out;
26661da177e4SLinus Torvalds 
266703158cd7SNick Piggin 	if (page_has_buffers(page)) {
266803158cd7SNick Piggin has_buffers:
266903158cd7SNick Piggin 		unlock_page(page);
267003158cd7SNick Piggin 		page_cache_release(page);
267103158cd7SNick Piggin 		return block_truncate_page(mapping, from, get_block);
26721da177e4SLinus Torvalds 	}
267303158cd7SNick Piggin 
267403158cd7SNick Piggin 	/* Find the buffer that contains "offset" */
267503158cd7SNick Piggin 	pos = blocksize;
267603158cd7SNick Piggin 	while (offset >= pos) {
267703158cd7SNick Piggin 		iblock++;
267803158cd7SNick Piggin 		pos += blocksize;
267903158cd7SNick Piggin 	}
268003158cd7SNick Piggin 
2681460bcf57STheodore Ts'o 	map_bh.b_size = blocksize;
2682460bcf57STheodore Ts'o 	map_bh.b_state = 0;
268303158cd7SNick Piggin 	err = get_block(inode, iblock, &map_bh, 0);
268403158cd7SNick Piggin 	if (err)
268503158cd7SNick Piggin 		goto unlock;
268603158cd7SNick Piggin 	/* unmapped? It's a hole - nothing to do */
268703158cd7SNick Piggin 	if (!buffer_mapped(&map_bh))
268803158cd7SNick Piggin 		goto unlock;
268903158cd7SNick Piggin 
269003158cd7SNick Piggin 	/* Ok, it's mapped. Make sure it's up-to-date */
269103158cd7SNick Piggin 	if (!PageUptodate(page)) {
269203158cd7SNick Piggin 		err = mapping->a_ops->readpage(NULL, page);
269303158cd7SNick Piggin 		if (err) {
269403158cd7SNick Piggin 			page_cache_release(page);
269503158cd7SNick Piggin 			goto out;
269603158cd7SNick Piggin 		}
269703158cd7SNick Piggin 		lock_page(page);
269803158cd7SNick Piggin 		if (!PageUptodate(page)) {
269903158cd7SNick Piggin 			err = -EIO;
270003158cd7SNick Piggin 			goto unlock;
270103158cd7SNick Piggin 		}
270203158cd7SNick Piggin 		if (page_has_buffers(page))
270303158cd7SNick Piggin 			goto has_buffers;
270403158cd7SNick Piggin 	}
2705eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
270603158cd7SNick Piggin 	set_page_dirty(page);
270703158cd7SNick Piggin 	err = 0;
270803158cd7SNick Piggin 
270903158cd7SNick Piggin unlock:
27101da177e4SLinus Torvalds 	unlock_page(page);
27111da177e4SLinus Torvalds 	page_cache_release(page);
27121da177e4SLinus Torvalds out:
271303158cd7SNick Piggin 	return err;
27141da177e4SLinus Torvalds }
27151da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
27161da177e4SLinus Torvalds 
27171da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
27181da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
27191da177e4SLinus Torvalds {
27201da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
27211da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
27221da177e4SLinus Torvalds 	unsigned blocksize;
272354b21a79SAndrew Morton 	sector_t iblock;
27241da177e4SLinus Torvalds 	unsigned length, pos;
27251da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27261da177e4SLinus Torvalds 	struct page *page;
27271da177e4SLinus Torvalds 	struct buffer_head *bh;
27281da177e4SLinus Torvalds 	int err;
27291da177e4SLinus Torvalds 
27301da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
27311da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
27321da177e4SLinus Torvalds 
27331da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
27341da177e4SLinus Torvalds 	if (!length)
27351da177e4SLinus Torvalds 		return 0;
27361da177e4SLinus Torvalds 
27371da177e4SLinus Torvalds 	length = blocksize - length;
273854b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
27391da177e4SLinus Torvalds 
27401da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
27411da177e4SLinus Torvalds 	err = -ENOMEM;
27421da177e4SLinus Torvalds 	if (!page)
27431da177e4SLinus Torvalds 		goto out;
27441da177e4SLinus Torvalds 
27451da177e4SLinus Torvalds 	if (!page_has_buffers(page))
27461da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
27471da177e4SLinus Torvalds 
27481da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
27491da177e4SLinus Torvalds 	bh = page_buffers(page);
27501da177e4SLinus Torvalds 	pos = blocksize;
27511da177e4SLinus Torvalds 	while (offset >= pos) {
27521da177e4SLinus Torvalds 		bh = bh->b_this_page;
27531da177e4SLinus Torvalds 		iblock++;
27541da177e4SLinus Torvalds 		pos += blocksize;
27551da177e4SLinus Torvalds 	}
27561da177e4SLinus Torvalds 
27571da177e4SLinus Torvalds 	err = 0;
27581da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
2759b0cf2321SBadari Pulavarty 		WARN_ON(bh->b_size != blocksize);
27601da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
27611da177e4SLinus Torvalds 		if (err)
27621da177e4SLinus Torvalds 			goto unlock;
27631da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
27641da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
27651da177e4SLinus Torvalds 			goto unlock;
27661da177e4SLinus Torvalds 	}
27671da177e4SLinus Torvalds 
27681da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
27691da177e4SLinus Torvalds 	if (PageUptodate(page))
27701da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
27711da177e4SLinus Torvalds 
277233a266ddSDavid Chinner 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
27731da177e4SLinus Torvalds 		err = -EIO;
27741da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
27751da177e4SLinus Torvalds 		wait_on_buffer(bh);
27761da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
27771da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
27781da177e4SLinus Torvalds 			goto unlock;
27791da177e4SLinus Torvalds 	}
27801da177e4SLinus Torvalds 
2781eebd2aa3SChristoph Lameter 	zero_user(page, offset, length);
27821da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
27831da177e4SLinus Torvalds 	err = 0;
27841da177e4SLinus Torvalds 
27851da177e4SLinus Torvalds unlock:
27861da177e4SLinus Torvalds 	unlock_page(page);
27871da177e4SLinus Torvalds 	page_cache_release(page);
27881da177e4SLinus Torvalds out:
27891da177e4SLinus Torvalds 	return err;
27901da177e4SLinus Torvalds }
27911fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
27921da177e4SLinus Torvalds 
27931da177e4SLinus Torvalds /*
27941da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
279535c80d5fSChris Mason  * this form passes in the end_io handler used to finish the IO.
27961da177e4SLinus Torvalds  */
279735c80d5fSChris Mason int block_write_full_page_endio(struct page *page, get_block_t *get_block,
279835c80d5fSChris Mason 			struct writeback_control *wbc, bh_end_io_t *handler)
27991da177e4SLinus Torvalds {
28001da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
28011da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
28021da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
28031da177e4SLinus Torvalds 	unsigned offset;
28041da177e4SLinus Torvalds 
28051da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
28061da177e4SLinus Torvalds 	if (page->index < end_index)
280735c80d5fSChris Mason 		return __block_write_full_page(inode, page, get_block, wbc,
280835c80d5fSChris Mason 					       handler);
28091da177e4SLinus Torvalds 
28101da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
28111da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
28121da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
28131da177e4SLinus Torvalds 		/*
28141da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
28151da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
28161da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
28171da177e4SLinus Torvalds 		 */
2818aaa4059bSJan Kara 		do_invalidatepage(page, 0);
28191da177e4SLinus Torvalds 		unlock_page(page);
28201da177e4SLinus Torvalds 		return 0; /* don't care */
28211da177e4SLinus Torvalds 	}
28221da177e4SLinus Torvalds 
28231da177e4SLinus Torvalds 	/*
28241da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
28252a61aa40SAdam Buchbinder 	 * writepage invocation because it may be mmapped.  "A file is mapped
28261da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
28271da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
28281da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
28291da177e4SLinus Torvalds 	 */
2830eebd2aa3SChristoph Lameter 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
283135c80d5fSChris Mason 	return __block_write_full_page(inode, page, get_block, wbc, handler);
28321da177e4SLinus Torvalds }
28331fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page_endio);
28341da177e4SLinus Torvalds 
283535c80d5fSChris Mason /*
283635c80d5fSChris Mason  * The generic ->writepage function for buffer-backed address_spaces
283735c80d5fSChris Mason  */
283835c80d5fSChris Mason int block_write_full_page(struct page *page, get_block_t *get_block,
283935c80d5fSChris Mason 			struct writeback_control *wbc)
284035c80d5fSChris Mason {
284135c80d5fSChris Mason 	return block_write_full_page_endio(page, get_block, wbc,
284235c80d5fSChris Mason 					   end_buffer_async_write);
284335c80d5fSChris Mason }
28441fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page);
284535c80d5fSChris Mason 
28461da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
28471da177e4SLinus Torvalds 			    get_block_t *get_block)
28481da177e4SLinus Torvalds {
28491da177e4SLinus Torvalds 	struct buffer_head tmp;
28501da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
28511da177e4SLinus Torvalds 	tmp.b_state = 0;
28521da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
2853b0cf2321SBadari Pulavarty 	tmp.b_size = 1 << inode->i_blkbits;
28541da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
28551da177e4SLinus Torvalds 	return tmp.b_blocknr;
28561da177e4SLinus Torvalds }
28571fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
28581da177e4SLinus Torvalds 
28596712ecf8SNeilBrown static void end_bio_bh_io_sync(struct bio *bio, int err)
28601da177e4SLinus Torvalds {
28611da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
28621da177e4SLinus Torvalds 
28631da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
28641da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
28651da177e4SLinus Torvalds 	}
28661da177e4SLinus Torvalds 
286708bafc03SKeith Mannthey 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
286808bafc03SKeith Mannthey 		set_bit(BH_Quiet, &bh->b_state);
286908bafc03SKeith Mannthey 
28701da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
28711da177e4SLinus Torvalds 	bio_put(bio);
28721da177e4SLinus Torvalds }
28731da177e4SLinus Torvalds 
28741da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
28751da177e4SLinus Torvalds {
28761da177e4SLinus Torvalds 	struct bio *bio;
28771da177e4SLinus Torvalds 	int ret = 0;
28781da177e4SLinus Torvalds 
28791da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
28801da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
28811da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
28828fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_delay(bh));
28838fb0e342SAneesh Kumar K.V 	BUG_ON(buffer_unwritten(bh));
28841da177e4SLinus Torvalds 
288548fd4f93SJens Axboe 	/*
288648fd4f93SJens Axboe 	 * Only clear out a write error when rewriting
28871da177e4SLinus Torvalds 	 */
288848fd4f93SJens Axboe 	if (test_set_buffer_req(bh) && (rw & WRITE))
28891da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
28901da177e4SLinus Torvalds 
28911da177e4SLinus Torvalds 	/*
28921da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
28931da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
28941da177e4SLinus Torvalds 	 */
28951da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
28961da177e4SLinus Torvalds 
28971da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
28981da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
28991da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
29001da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
29011da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
29021da177e4SLinus Torvalds 
29031da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
29041da177e4SLinus Torvalds 	bio->bi_idx = 0;
29051da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
29061da177e4SLinus Torvalds 
29071da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
29081da177e4SLinus Torvalds 	bio->bi_private = bh;
29091da177e4SLinus Torvalds 
29101da177e4SLinus Torvalds 	bio_get(bio);
29111da177e4SLinus Torvalds 	submit_bio(rw, bio);
29121da177e4SLinus Torvalds 
29131da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
29141da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
29151da177e4SLinus Torvalds 
29161da177e4SLinus Torvalds 	bio_put(bio);
29171da177e4SLinus Torvalds 	return ret;
29181da177e4SLinus Torvalds }
29191fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
29201da177e4SLinus Torvalds 
29211da177e4SLinus Torvalds /**
29221da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
29239cb569d6SChristoph Hellwig  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
29241da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
29251da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
29261da177e4SLinus Torvalds  *
2927a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2928a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
29299cb569d6SChristoph Hellwig  * %READA option is described in the documentation for generic_make_request()
29309cb569d6SChristoph Hellwig  * which ll_rw_block() calls.
29311da177e4SLinus Torvalds  *
29321da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
29339cb569d6SChristoph Hellwig  * BH_Lock state bit), any buffer that appears to be clean when doing a write
29349cb569d6SChristoph Hellwig  * request, and any buffer that appears to be up-to-date when doing read
29359cb569d6SChristoph Hellwig  * request.  Further it marks as clean buffers that are processed for
29369cb569d6SChristoph Hellwig  * writing (the buffer cache won't assume that they are actually clean
29379cb569d6SChristoph Hellwig  * until the buffer gets unlocked).
29381da177e4SLinus Torvalds  *
29391da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
29401da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
29411da177e4SLinus Torvalds  * any waiters.
29421da177e4SLinus Torvalds  *
29431da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
29441da177e4SLinus Torvalds  * multiple of the current approved size for the device.
29451da177e4SLinus Torvalds  */
29461da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
29471da177e4SLinus Torvalds {
29481da177e4SLinus Torvalds 	int i;
29491da177e4SLinus Torvalds 
29501da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
29511da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
29521da177e4SLinus Torvalds 
29539cb569d6SChristoph Hellwig 		if (!trylock_buffer(bh))
29541da177e4SLinus Torvalds 			continue;
29559cb569d6SChristoph Hellwig 		if (rw == WRITE) {
29561da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
295776c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2958e60e5c50SOGAWA Hirofumi 				get_bh(bh);
29591da177e4SLinus Torvalds 				submit_bh(WRITE, bh);
29601da177e4SLinus Torvalds 				continue;
29611da177e4SLinus Torvalds 			}
29621da177e4SLinus Torvalds 		} else {
29631da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
296476c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2965e60e5c50SOGAWA Hirofumi 				get_bh(bh);
29661da177e4SLinus Torvalds 				submit_bh(rw, bh);
29671da177e4SLinus Torvalds 				continue;
29681da177e4SLinus Torvalds 			}
29691da177e4SLinus Torvalds 		}
29701da177e4SLinus Torvalds 		unlock_buffer(bh);
29711da177e4SLinus Torvalds 	}
29721da177e4SLinus Torvalds }
29731fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(ll_rw_block);
29741da177e4SLinus Torvalds 
29759cb569d6SChristoph Hellwig void write_dirty_buffer(struct buffer_head *bh, int rw)
29769cb569d6SChristoph Hellwig {
29779cb569d6SChristoph Hellwig 	lock_buffer(bh);
29789cb569d6SChristoph Hellwig 	if (!test_clear_buffer_dirty(bh)) {
29799cb569d6SChristoph Hellwig 		unlock_buffer(bh);
29809cb569d6SChristoph Hellwig 		return;
29819cb569d6SChristoph Hellwig 	}
29829cb569d6SChristoph Hellwig 	bh->b_end_io = end_buffer_write_sync;
29839cb569d6SChristoph Hellwig 	get_bh(bh);
29849cb569d6SChristoph Hellwig 	submit_bh(rw, bh);
29859cb569d6SChristoph Hellwig }
29869cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
29879cb569d6SChristoph Hellwig 
29881da177e4SLinus Torvalds /*
29891da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
29901da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
29911da177e4SLinus Torvalds  * the buffer_head.
29921da177e4SLinus Torvalds  */
299387e99511SChristoph Hellwig int __sync_dirty_buffer(struct buffer_head *bh, int rw)
29941da177e4SLinus Torvalds {
29951da177e4SLinus Torvalds 	int ret = 0;
29961da177e4SLinus Torvalds 
29971da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
29981da177e4SLinus Torvalds 	lock_buffer(bh);
29991da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
30001da177e4SLinus Torvalds 		get_bh(bh);
30011da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
300287e99511SChristoph Hellwig 		ret = submit_bh(rw, bh);
30031da177e4SLinus Torvalds 		wait_on_buffer(bh);
30041da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
30051da177e4SLinus Torvalds 			ret = -EIO;
30061da177e4SLinus Torvalds 	} else {
30071da177e4SLinus Torvalds 		unlock_buffer(bh);
30081da177e4SLinus Torvalds 	}
30091da177e4SLinus Torvalds 	return ret;
30101da177e4SLinus Torvalds }
301187e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
301287e99511SChristoph Hellwig 
301387e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
301487e99511SChristoph Hellwig {
301587e99511SChristoph Hellwig 	return __sync_dirty_buffer(bh, WRITE_SYNC);
301687e99511SChristoph Hellwig }
30171fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
30181da177e4SLinus Torvalds 
30191da177e4SLinus Torvalds /*
30201da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
30211da177e4SLinus Torvalds  * are unused, and releases them if so.
30221da177e4SLinus Torvalds  *
30231da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
30241da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
30251da177e4SLinus Torvalds  *
30261da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
30271da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
30281da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
30291da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
30301da177e4SLinus Torvalds  * filesystem data on the same device.
30311da177e4SLinus Torvalds  *
30321da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
30331da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
30341da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
30351da177e4SLinus Torvalds  * private_lock.
30361da177e4SLinus Torvalds  *
30371da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
30381da177e4SLinus Torvalds  */
30391da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
30401da177e4SLinus Torvalds {
30411da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
30421da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
30431da177e4SLinus Torvalds }
30441da177e4SLinus Torvalds 
30451da177e4SLinus Torvalds static int
30461da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
30471da177e4SLinus Torvalds {
30481da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
30491da177e4SLinus Torvalds 	struct buffer_head *bh;
30501da177e4SLinus Torvalds 
30511da177e4SLinus Torvalds 	bh = head;
30521da177e4SLinus Torvalds 	do {
3053de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
30541da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
30551da177e4SLinus Torvalds 		if (buffer_busy(bh))
30561da177e4SLinus Torvalds 			goto failed;
30571da177e4SLinus Torvalds 		bh = bh->b_this_page;
30581da177e4SLinus Torvalds 	} while (bh != head);
30591da177e4SLinus Torvalds 
30601da177e4SLinus Torvalds 	do {
30611da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
30621da177e4SLinus Torvalds 
3063535ee2fbSJan Kara 		if (bh->b_assoc_map)
30641da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
30651da177e4SLinus Torvalds 		bh = next;
30661da177e4SLinus Torvalds 	} while (bh != head);
30671da177e4SLinus Torvalds 	*buffers_to_free = head;
30681da177e4SLinus Torvalds 	__clear_page_buffers(page);
30691da177e4SLinus Torvalds 	return 1;
30701da177e4SLinus Torvalds failed:
30711da177e4SLinus Torvalds 	return 0;
30721da177e4SLinus Torvalds }
30731da177e4SLinus Torvalds 
30741da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
30751da177e4SLinus Torvalds {
30761da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
30771da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
30781da177e4SLinus Torvalds 	int ret = 0;
30791da177e4SLinus Torvalds 
30801da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
3081ecdfc978SLinus Torvalds 	if (PageWriteback(page))
30821da177e4SLinus Torvalds 		return 0;
30831da177e4SLinus Torvalds 
30841da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
30851da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
30861da177e4SLinus Torvalds 		goto out;
30871da177e4SLinus Torvalds 	}
30881da177e4SLinus Torvalds 
30891da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
30901da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
3091ecdfc978SLinus Torvalds 
3092ecdfc978SLinus Torvalds 	/*
3093ecdfc978SLinus Torvalds 	 * If the filesystem writes its buffers by hand (eg ext3)
3094ecdfc978SLinus Torvalds 	 * then we can have clean buffers against a dirty page.  We
3095ecdfc978SLinus Torvalds 	 * clean the page here; otherwise the VM will never notice
3096ecdfc978SLinus Torvalds 	 * that the filesystem did any IO at all.
3097ecdfc978SLinus Torvalds 	 *
3098ecdfc978SLinus Torvalds 	 * Also, during truncate, discard_buffer will have marked all
3099ecdfc978SLinus Torvalds 	 * the page's buffers clean.  We discover that here and clean
3100ecdfc978SLinus Torvalds 	 * the page also.
310187df7241SNick Piggin 	 *
310287df7241SNick Piggin 	 * private_lock must be held over this entire operation in order
310387df7241SNick Piggin 	 * to synchronise against __set_page_dirty_buffers and prevent the
310487df7241SNick Piggin 	 * dirty bit from being lost.
3105ecdfc978SLinus Torvalds 	 */
3106ecdfc978SLinus Torvalds 	if (ret)
3107ecdfc978SLinus Torvalds 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
310887df7241SNick Piggin 	spin_unlock(&mapping->private_lock);
31091da177e4SLinus Torvalds out:
31101da177e4SLinus Torvalds 	if (buffers_to_free) {
31111da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
31121da177e4SLinus Torvalds 
31131da177e4SLinus Torvalds 		do {
31141da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
31151da177e4SLinus Torvalds 			free_buffer_head(bh);
31161da177e4SLinus Torvalds 			bh = next;
31171da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
31181da177e4SLinus Torvalds 	}
31191da177e4SLinus Torvalds 	return ret;
31201da177e4SLinus Torvalds }
31211da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
31221da177e4SLinus Torvalds 
31231da177e4SLinus Torvalds /*
31241da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
31251da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
31261da177e4SLinus Torvalds  *
31271da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
31285b0830cbSJens Axboe  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
31291da177e4SLinus Torvalds  */
3130bdc480e3SHeiko Carstens SYSCALL_DEFINE2(bdflush, int, func, long, data)
31311da177e4SLinus Torvalds {
31321da177e4SLinus Torvalds 	static int msg_count;
31331da177e4SLinus Torvalds 
31341da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
31351da177e4SLinus Torvalds 		return -EPERM;
31361da177e4SLinus Torvalds 
31371da177e4SLinus Torvalds 	if (msg_count < 5) {
31381da177e4SLinus Torvalds 		msg_count++;
31391da177e4SLinus Torvalds 		printk(KERN_INFO
31401da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
31411da177e4SLinus Torvalds 			" system call\n", current->comm);
31421da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
31431da177e4SLinus Torvalds 	}
31441da177e4SLinus Torvalds 
31451da177e4SLinus Torvalds 	if (func == 1)
31461da177e4SLinus Torvalds 		do_exit(0);
31471da177e4SLinus Torvalds 	return 0;
31481da177e4SLinus Torvalds }
31491da177e4SLinus Torvalds 
31501da177e4SLinus Torvalds /*
31511da177e4SLinus Torvalds  * Buffer-head allocation
31521da177e4SLinus Torvalds  */
3153e18b890bSChristoph Lameter static struct kmem_cache *bh_cachep;
31541da177e4SLinus Torvalds 
31551da177e4SLinus Torvalds /*
31561da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
31571da177e4SLinus Torvalds  * stripping them in writeback.
31581da177e4SLinus Torvalds  */
31591da177e4SLinus Torvalds static int max_buffer_heads;
31601da177e4SLinus Torvalds 
31611da177e4SLinus Torvalds int buffer_heads_over_limit;
31621da177e4SLinus Torvalds 
31631da177e4SLinus Torvalds struct bh_accounting {
31641da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
31651da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
31661da177e4SLinus Torvalds };
31671da177e4SLinus Torvalds 
31681da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
31691da177e4SLinus Torvalds 
31701da177e4SLinus Torvalds static void recalc_bh_state(void)
31711da177e4SLinus Torvalds {
31721da177e4SLinus Torvalds 	int i;
31731da177e4SLinus Torvalds 	int tot = 0;
31741da177e4SLinus Torvalds 
3175ee1be862SChristoph Lameter 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
31761da177e4SLinus Torvalds 		return;
3177c7b92516SChristoph Lameter 	__this_cpu_write(bh_accounting.ratelimit, 0);
31788a143426SEric Dumazet 	for_each_online_cpu(i)
31791da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
31801da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
31811da177e4SLinus Torvalds }
31821da177e4SLinus Torvalds 
3183dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
31841da177e4SLinus Torvalds {
3185019b4d12SRichard Kennedy 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
31861da177e4SLinus Torvalds 	if (ret) {
3187a35afb83SChristoph Lameter 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3188c7b92516SChristoph Lameter 		preempt_disable();
3189c7b92516SChristoph Lameter 		__this_cpu_inc(bh_accounting.nr);
31901da177e4SLinus Torvalds 		recalc_bh_state();
3191c7b92516SChristoph Lameter 		preempt_enable();
31921da177e4SLinus Torvalds 	}
31931da177e4SLinus Torvalds 	return ret;
31941da177e4SLinus Torvalds }
31951da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
31961da177e4SLinus Torvalds 
31971da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
31981da177e4SLinus Torvalds {
31991da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
32001da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3201c7b92516SChristoph Lameter 	preempt_disable();
3202c7b92516SChristoph Lameter 	__this_cpu_dec(bh_accounting.nr);
32031da177e4SLinus Torvalds 	recalc_bh_state();
3204c7b92516SChristoph Lameter 	preempt_enable();
32051da177e4SLinus Torvalds }
32061da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
32071da177e4SLinus Torvalds 
32081da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
32091da177e4SLinus Torvalds {
32101da177e4SLinus Torvalds 	int i;
32111da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
32121da177e4SLinus Torvalds 
32131da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
32141da177e4SLinus Torvalds 		brelse(b->bhs[i]);
32151da177e4SLinus Torvalds 		b->bhs[i] = NULL;
32161da177e4SLinus Torvalds 	}
3217c7b92516SChristoph Lameter 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
32188a143426SEric Dumazet 	per_cpu(bh_accounting, cpu).nr = 0;
32191da177e4SLinus Torvalds }
32201da177e4SLinus Torvalds 
32211da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
32221da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
32231da177e4SLinus Torvalds {
32248bb78442SRafael J. Wysocki 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
32251da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
32261da177e4SLinus Torvalds 	return NOTIFY_OK;
32271da177e4SLinus Torvalds }
32281da177e4SLinus Torvalds 
3229389d1b08SAneesh Kumar K.V /**
3230a6b91919SRandy Dunlap  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3231389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3232389d1b08SAneesh Kumar K.V  *
3233389d1b08SAneesh Kumar K.V  * Return true if the buffer is up-to-date and false,
3234389d1b08SAneesh Kumar K.V  * with the buffer locked, if not.
3235389d1b08SAneesh Kumar K.V  */
3236389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3237389d1b08SAneesh Kumar K.V {
3238389d1b08SAneesh Kumar K.V 	if (!buffer_uptodate(bh)) {
3239389d1b08SAneesh Kumar K.V 		lock_buffer(bh);
3240389d1b08SAneesh Kumar K.V 		if (!buffer_uptodate(bh))
3241389d1b08SAneesh Kumar K.V 			return 0;
3242389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3243389d1b08SAneesh Kumar K.V 	}
3244389d1b08SAneesh Kumar K.V 	return 1;
3245389d1b08SAneesh Kumar K.V }
3246389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3247389d1b08SAneesh Kumar K.V 
3248389d1b08SAneesh Kumar K.V /**
3249a6b91919SRandy Dunlap  * bh_submit_read - Submit a locked buffer for reading
3250389d1b08SAneesh Kumar K.V  * @bh: struct buffer_head
3251389d1b08SAneesh Kumar K.V  *
3252389d1b08SAneesh Kumar K.V  * Returns zero on success and -EIO on error.
3253389d1b08SAneesh Kumar K.V  */
3254389d1b08SAneesh Kumar K.V int bh_submit_read(struct buffer_head *bh)
3255389d1b08SAneesh Kumar K.V {
3256389d1b08SAneesh Kumar K.V 	BUG_ON(!buffer_locked(bh));
3257389d1b08SAneesh Kumar K.V 
3258389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh)) {
3259389d1b08SAneesh Kumar K.V 		unlock_buffer(bh);
3260389d1b08SAneesh Kumar K.V 		return 0;
3261389d1b08SAneesh Kumar K.V 	}
3262389d1b08SAneesh Kumar K.V 
3263389d1b08SAneesh Kumar K.V 	get_bh(bh);
3264389d1b08SAneesh Kumar K.V 	bh->b_end_io = end_buffer_read_sync;
3265389d1b08SAneesh Kumar K.V 	submit_bh(READ, bh);
3266389d1b08SAneesh Kumar K.V 	wait_on_buffer(bh);
3267389d1b08SAneesh Kumar K.V 	if (buffer_uptodate(bh))
3268389d1b08SAneesh Kumar K.V 		return 0;
3269389d1b08SAneesh Kumar K.V 	return -EIO;
3270389d1b08SAneesh Kumar K.V }
3271389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_submit_read);
3272389d1b08SAneesh Kumar K.V 
32731da177e4SLinus Torvalds void __init buffer_init(void)
32741da177e4SLinus Torvalds {
32751da177e4SLinus Torvalds 	int nrpages;
32761da177e4SLinus Torvalds 
3277b98938c3SChristoph Lameter 	bh_cachep = kmem_cache_create("buffer_head",
3278b98938c3SChristoph Lameter 			sizeof(struct buffer_head), 0,
3279b98938c3SChristoph Lameter 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3280b98938c3SChristoph Lameter 				SLAB_MEM_SPREAD),
3281019b4d12SRichard Kennedy 				NULL);
32821da177e4SLinus Torvalds 
32831da177e4SLinus Torvalds 	/*
32841da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
32851da177e4SLinus Torvalds 	 */
32861da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
32871da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
32881da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
32891da177e4SLinus Torvalds }
3290