xref: /linux/fs/buffer.c (revision e60e5c50aa5389db86e96fc52d02bc7db3d23f4a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/fs/buffer.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
81da177e4SLinus Torvalds  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Removed a lot of unnecessary code and simplified things now that
111da177e4SLinus Torvalds  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
141da177e4SLinus Torvalds  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #include <linux/config.h>
221da177e4SLinus Torvalds #include <linux/kernel.h>
231da177e4SLinus Torvalds #include <linux/syscalls.h>
241da177e4SLinus Torvalds #include <linux/fs.h>
251da177e4SLinus Torvalds #include <linux/mm.h>
261da177e4SLinus Torvalds #include <linux/percpu.h>
271da177e4SLinus Torvalds #include <linux/slab.h>
281da177e4SLinus Torvalds #include <linux/smp_lock.h>
2916f7e0feSRandy Dunlap #include <linux/capability.h>
301da177e4SLinus Torvalds #include <linux/blkdev.h>
311da177e4SLinus Torvalds #include <linux/file.h>
321da177e4SLinus Torvalds #include <linux/quotaops.h>
331da177e4SLinus Torvalds #include <linux/highmem.h>
341da177e4SLinus Torvalds #include <linux/module.h>
351da177e4SLinus Torvalds #include <linux/writeback.h>
361da177e4SLinus Torvalds #include <linux/hash.h>
371da177e4SLinus Torvalds #include <linux/suspend.h>
381da177e4SLinus Torvalds #include <linux/buffer_head.h>
391da177e4SLinus Torvalds #include <linux/bio.h>
401da177e4SLinus Torvalds #include <linux/notifier.h>
411da177e4SLinus Torvalds #include <linux/cpu.h>
421da177e4SLinus Torvalds #include <linux/bitops.h>
431da177e4SLinus Torvalds #include <linux/mpage.h>
44fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
451da177e4SLinus Torvalds 
461da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
471da177e4SLinus Torvalds static void invalidate_bh_lrus(void);
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds inline void
521da177e4SLinus Torvalds init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
531da177e4SLinus Torvalds {
541da177e4SLinus Torvalds 	bh->b_end_io = handler;
551da177e4SLinus Torvalds 	bh->b_private = private;
561da177e4SLinus Torvalds }
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds static int sync_buffer(void *word)
591da177e4SLinus Torvalds {
601da177e4SLinus Torvalds 	struct block_device *bd;
611da177e4SLinus Torvalds 	struct buffer_head *bh
621da177e4SLinus Torvalds 		= container_of(word, struct buffer_head, b_state);
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds 	smp_mb();
651da177e4SLinus Torvalds 	bd = bh->b_bdev;
661da177e4SLinus Torvalds 	if (bd)
671da177e4SLinus Torvalds 		blk_run_address_space(bd->bd_inode->i_mapping);
681da177e4SLinus Torvalds 	io_schedule();
691da177e4SLinus Torvalds 	return 0;
701da177e4SLinus Torvalds }
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds void fastcall __lock_buffer(struct buffer_head *bh)
731da177e4SLinus Torvalds {
741da177e4SLinus Torvalds 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
751da177e4SLinus Torvalds 							TASK_UNINTERRUPTIBLE);
761da177e4SLinus Torvalds }
771da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds void fastcall unlock_buffer(struct buffer_head *bh)
801da177e4SLinus Torvalds {
811da177e4SLinus Torvalds 	clear_buffer_locked(bh);
821da177e4SLinus Torvalds 	smp_mb__after_clear_bit();
831da177e4SLinus Torvalds 	wake_up_bit(&bh->b_state, BH_Lock);
841da177e4SLinus Torvalds }
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds /*
871da177e4SLinus Torvalds  * Block until a buffer comes unlocked.  This doesn't stop it
881da177e4SLinus Torvalds  * from becoming locked again - you have to lock it yourself
891da177e4SLinus Torvalds  * if you want to preserve its state.
901da177e4SLinus Torvalds  */
911da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
921da177e4SLinus Torvalds {
931da177e4SLinus Torvalds 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
941da177e4SLinus Torvalds }
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds static void
971da177e4SLinus Torvalds __clear_page_buffers(struct page *page)
981da177e4SLinus Torvalds {
991da177e4SLinus Torvalds 	ClearPagePrivate(page);
1004c21e2f2SHugh Dickins 	set_page_private(page, 0);
1011da177e4SLinus Torvalds 	page_cache_release(page);
1021da177e4SLinus Torvalds }
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds static void buffer_io_error(struct buffer_head *bh)
1051da177e4SLinus Torvalds {
1061da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
1091da177e4SLinus Torvalds 			bdevname(bh->b_bdev, b),
1101da177e4SLinus Torvalds 			(unsigned long long)bh->b_blocknr);
1111da177e4SLinus Torvalds }
1121da177e4SLinus Torvalds 
1131da177e4SLinus Torvalds /*
1141da177e4SLinus Torvalds  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
1151da177e4SLinus Torvalds  * unlock the buffer. This is what ll_rw_block uses too.
1161da177e4SLinus Torvalds  */
1171da177e4SLinus Torvalds void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
1181da177e4SLinus Torvalds {
1191da177e4SLinus Torvalds 	if (uptodate) {
1201da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1211da177e4SLinus Torvalds 	} else {
1221da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
1231da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1241da177e4SLinus Torvalds 	}
1251da177e4SLinus Torvalds 	unlock_buffer(bh);
1261da177e4SLinus Torvalds 	put_bh(bh);
1271da177e4SLinus Torvalds }
1281da177e4SLinus Torvalds 
1291da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1301da177e4SLinus Torvalds {
1311da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
1321da177e4SLinus Torvalds 
1331da177e4SLinus Torvalds 	if (uptodate) {
1341da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
1351da177e4SLinus Torvalds 	} else {
1361da177e4SLinus Torvalds 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1371da177e4SLinus Torvalds 			buffer_io_error(bh);
1381da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
1391da177e4SLinus Torvalds 					"I/O error on %s\n",
1401da177e4SLinus Torvalds 				       bdevname(bh->b_bdev, b));
1411da177e4SLinus Torvalds 		}
1421da177e4SLinus Torvalds 		set_buffer_write_io_error(bh);
1431da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
1441da177e4SLinus Torvalds 	}
1451da177e4SLinus Torvalds 	unlock_buffer(bh);
1461da177e4SLinus Torvalds 	put_bh(bh);
1471da177e4SLinus Torvalds }
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds /*
1501da177e4SLinus Torvalds  * Write out and wait upon all the dirty data associated with a block
1511da177e4SLinus Torvalds  * device via its mapping.  Does not take the superblock lock.
1521da177e4SLinus Torvalds  */
1531da177e4SLinus Torvalds int sync_blockdev(struct block_device *bdev)
1541da177e4SLinus Torvalds {
1551da177e4SLinus Torvalds 	int ret = 0;
1561da177e4SLinus Torvalds 
15728fd1298SOGAWA Hirofumi 	if (bdev)
15828fd1298SOGAWA Hirofumi 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1591da177e4SLinus Torvalds 	return ret;
1601da177e4SLinus Torvalds }
1611da177e4SLinus Torvalds EXPORT_SYMBOL(sync_blockdev);
1621da177e4SLinus Torvalds 
1631da177e4SLinus Torvalds /*
1641da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1651da177e4SLinus Torvalds  * superblock.  Filesystem data as well as the underlying block
1661da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds int fsync_super(struct super_block *sb)
1691da177e4SLinus Torvalds {
1701da177e4SLinus Torvalds 	sync_inodes_sb(sb, 0);
1711da177e4SLinus Torvalds 	DQUOT_SYNC(sb);
1721da177e4SLinus Torvalds 	lock_super(sb);
1731da177e4SLinus Torvalds 	if (sb->s_dirt && sb->s_op->write_super)
1741da177e4SLinus Torvalds 		sb->s_op->write_super(sb);
1751da177e4SLinus Torvalds 	unlock_super(sb);
1761da177e4SLinus Torvalds 	if (sb->s_op->sync_fs)
1771da177e4SLinus Torvalds 		sb->s_op->sync_fs(sb, 1);
1781da177e4SLinus Torvalds 	sync_blockdev(sb->s_bdev);
1791da177e4SLinus Torvalds 	sync_inodes_sb(sb, 1);
1801da177e4SLinus Torvalds 
1811da177e4SLinus Torvalds 	return sync_blockdev(sb->s_bdev);
1821da177e4SLinus Torvalds }
1831da177e4SLinus Torvalds 
1841da177e4SLinus Torvalds /*
1851da177e4SLinus Torvalds  * Write out and wait upon all dirty data associated with this
1861da177e4SLinus Torvalds  * device.   Filesystem data as well as the underlying block
1871da177e4SLinus Torvalds  * device.  Takes the superblock lock.
1881da177e4SLinus Torvalds  */
1891da177e4SLinus Torvalds int fsync_bdev(struct block_device *bdev)
1901da177e4SLinus Torvalds {
1911da177e4SLinus Torvalds 	struct super_block *sb = get_super(bdev);
1921da177e4SLinus Torvalds 	if (sb) {
1931da177e4SLinus Torvalds 		int res = fsync_super(sb);
1941da177e4SLinus Torvalds 		drop_super(sb);
1951da177e4SLinus Torvalds 		return res;
1961da177e4SLinus Torvalds 	}
1971da177e4SLinus Torvalds 	return sync_blockdev(bdev);
1981da177e4SLinus Torvalds }
1991da177e4SLinus Torvalds 
2001da177e4SLinus Torvalds /**
2011da177e4SLinus Torvalds  * freeze_bdev  --  lock a filesystem and force it into a consistent state
2021da177e4SLinus Torvalds  * @bdev:	blockdevice to lock
2031da177e4SLinus Torvalds  *
2041da177e4SLinus Torvalds  * This takes the block device bd_mount_sem to make sure no new mounts
2051da177e4SLinus Torvalds  * happen on bdev until thaw_bdev() is called.
2061da177e4SLinus Torvalds  * If a superblock is found on this device, we take the s_umount semaphore
2071da177e4SLinus Torvalds  * on it to make sure nobody unmounts until the snapshot creation is done.
2081da177e4SLinus Torvalds  */
2091da177e4SLinus Torvalds struct super_block *freeze_bdev(struct block_device *bdev)
2101da177e4SLinus Torvalds {
2111da177e4SLinus Torvalds 	struct super_block *sb;
2121da177e4SLinus Torvalds 
2131da177e4SLinus Torvalds 	down(&bdev->bd_mount_sem);
2141da177e4SLinus Torvalds 	sb = get_super(bdev);
2151da177e4SLinus Torvalds 	if (sb && !(sb->s_flags & MS_RDONLY)) {
2161da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_WRITE;
217d59dd462Sakpm@osdl.org 		smp_wmb();
2181da177e4SLinus Torvalds 
2191da177e4SLinus Torvalds 		sync_inodes_sb(sb, 0);
2201da177e4SLinus Torvalds 		DQUOT_SYNC(sb);
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 		lock_super(sb);
2231da177e4SLinus Torvalds 		if (sb->s_dirt && sb->s_op->write_super)
2241da177e4SLinus Torvalds 			sb->s_op->write_super(sb);
2251da177e4SLinus Torvalds 		unlock_super(sb);
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 		if (sb->s_op->sync_fs)
2281da177e4SLinus Torvalds 			sb->s_op->sync_fs(sb, 1);
2291da177e4SLinus Torvalds 
2301da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2311da177e4SLinus Torvalds 		sync_inodes_sb(sb, 1);
2321da177e4SLinus Torvalds 
2331da177e4SLinus Torvalds 		sb->s_frozen = SB_FREEZE_TRANS;
234d59dd462Sakpm@osdl.org 		smp_wmb();
2351da177e4SLinus Torvalds 
2361da177e4SLinus Torvalds 		sync_blockdev(sb->s_bdev);
2371da177e4SLinus Torvalds 
2381da177e4SLinus Torvalds 		if (sb->s_op->write_super_lockfs)
2391da177e4SLinus Torvalds 			sb->s_op->write_super_lockfs(sb);
2401da177e4SLinus Torvalds 	}
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds 	sync_blockdev(bdev);
2431da177e4SLinus Torvalds 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
2441da177e4SLinus Torvalds }
2451da177e4SLinus Torvalds EXPORT_SYMBOL(freeze_bdev);
2461da177e4SLinus Torvalds 
2471da177e4SLinus Torvalds /**
2481da177e4SLinus Torvalds  * thaw_bdev  -- unlock filesystem
2491da177e4SLinus Torvalds  * @bdev:	blockdevice to unlock
2501da177e4SLinus Torvalds  * @sb:		associated superblock
2511da177e4SLinus Torvalds  *
2521da177e4SLinus Torvalds  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
2531da177e4SLinus Torvalds  */
2541da177e4SLinus Torvalds void thaw_bdev(struct block_device *bdev, struct super_block *sb)
2551da177e4SLinus Torvalds {
2561da177e4SLinus Torvalds 	if (sb) {
2571da177e4SLinus Torvalds 		BUG_ON(sb->s_bdev != bdev);
2581da177e4SLinus Torvalds 
2591da177e4SLinus Torvalds 		if (sb->s_op->unlockfs)
2601da177e4SLinus Torvalds 			sb->s_op->unlockfs(sb);
2611da177e4SLinus Torvalds 		sb->s_frozen = SB_UNFROZEN;
262d59dd462Sakpm@osdl.org 		smp_wmb();
2631da177e4SLinus Torvalds 		wake_up(&sb->s_wait_unfrozen);
2641da177e4SLinus Torvalds 		drop_super(sb);
2651da177e4SLinus Torvalds 	}
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds 	up(&bdev->bd_mount_sem);
2681da177e4SLinus Torvalds }
2691da177e4SLinus Torvalds EXPORT_SYMBOL(thaw_bdev);
2701da177e4SLinus Torvalds 
2711da177e4SLinus Torvalds /*
2721da177e4SLinus Torvalds  * sync everything.  Start out by waking pdflush, because that writes back
2731da177e4SLinus Torvalds  * all queues in parallel.
2741da177e4SLinus Torvalds  */
2751da177e4SLinus Torvalds static void do_sync(unsigned long wait)
2761da177e4SLinus Torvalds {
277687a21ceSPekka J Enberg 	wakeup_pdflush(0);
2781da177e4SLinus Torvalds 	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
2791da177e4SLinus Torvalds 	DQUOT_SYNC(NULL);
2801da177e4SLinus Torvalds 	sync_supers();		/* Write the superblocks */
2811da177e4SLinus Torvalds 	sync_filesystems(0);	/* Start syncing the filesystems */
2821da177e4SLinus Torvalds 	sync_filesystems(wait);	/* Waitingly sync the filesystems */
2831da177e4SLinus Torvalds 	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
2841da177e4SLinus Torvalds 	if (!wait)
2851da177e4SLinus Torvalds 		printk("Emergency Sync complete\n");
2861da177e4SLinus Torvalds 	if (unlikely(laptop_mode))
2871da177e4SLinus Torvalds 		laptop_sync_completion();
2881da177e4SLinus Torvalds }
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds asmlinkage long sys_sync(void)
2911da177e4SLinus Torvalds {
2921da177e4SLinus Torvalds 	do_sync(1);
2931da177e4SLinus Torvalds 	return 0;
2941da177e4SLinus Torvalds }
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds void emergency_sync(void)
2971da177e4SLinus Torvalds {
2981da177e4SLinus Torvalds 	pdflush_operation(do_sync, 0);
2991da177e4SLinus Torvalds }
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds /*
3021da177e4SLinus Torvalds  * Generic function to fsync a file.
3031da177e4SLinus Torvalds  *
3041da177e4SLinus Torvalds  * filp may be NULL if called via the msync of a vma.
3051da177e4SLinus Torvalds  */
3061da177e4SLinus Torvalds 
3071da177e4SLinus Torvalds int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
3081da177e4SLinus Torvalds {
3091da177e4SLinus Torvalds 	struct inode * inode = dentry->d_inode;
3101da177e4SLinus Torvalds 	struct super_block * sb;
3111da177e4SLinus Torvalds 	int ret, err;
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds 	/* sync the inode to buffers */
3141da177e4SLinus Torvalds 	ret = write_inode_now(inode, 0);
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds 	/* sync the superblock to buffers */
3171da177e4SLinus Torvalds 	sb = inode->i_sb;
3181da177e4SLinus Torvalds 	lock_super(sb);
3191da177e4SLinus Torvalds 	if (sb->s_op->write_super)
3201da177e4SLinus Torvalds 		sb->s_op->write_super(sb);
3211da177e4SLinus Torvalds 	unlock_super(sb);
3221da177e4SLinus Torvalds 
3231da177e4SLinus Torvalds 	/* .. finally sync the buffers to disk */
3241da177e4SLinus Torvalds 	err = sync_blockdev(sb->s_bdev);
3251da177e4SLinus Torvalds 	if (!ret)
3261da177e4SLinus Torvalds 		ret = err;
3271da177e4SLinus Torvalds 	return ret;
3281da177e4SLinus Torvalds }
3291da177e4SLinus Torvalds 
330dfb388bfSOleg Nesterov static long do_fsync(unsigned int fd, int datasync)
3311da177e4SLinus Torvalds {
3321da177e4SLinus Torvalds 	struct file * file;
3331da177e4SLinus Torvalds 	struct address_space *mapping;
3341da177e4SLinus Torvalds 	int ret, err;
3351da177e4SLinus Torvalds 
3361da177e4SLinus Torvalds 	ret = -EBADF;
3371da177e4SLinus Torvalds 	file = fget(fd);
3381da177e4SLinus Torvalds 	if (!file)
3391da177e4SLinus Torvalds 		goto out;
3401da177e4SLinus Torvalds 
3411da177e4SLinus Torvalds 	ret = -EINVAL;
3421da177e4SLinus Torvalds 	if (!file->f_op || !file->f_op->fsync) {
3431da177e4SLinus Torvalds 		/* Why?  We can still call filemap_fdatawrite */
3441da177e4SLinus Torvalds 		goto out_putf;
3451da177e4SLinus Torvalds 	}
3461da177e4SLinus Torvalds 
347dfb388bfSOleg Nesterov 	mapping = file->f_mapping;
348dfb388bfSOleg Nesterov 
3491da177e4SLinus Torvalds 	current->flags |= PF_SYNCWRITE;
3501da177e4SLinus Torvalds 	ret = filemap_fdatawrite(mapping);
3511da177e4SLinus Torvalds 
3521da177e4SLinus Torvalds 	/*
3531da177e4SLinus Torvalds 	 * We need to protect against concurrent writers,
3541da177e4SLinus Torvalds 	 * which could cause livelocks in fsync_buffers_list
3551da177e4SLinus Torvalds 	 */
3561b1dcc1bSJes Sorensen 	mutex_lock(&mapping->host->i_mutex);
357dfb388bfSOleg Nesterov 	err = file->f_op->fsync(file, file->f_dentry, datasync);
3581da177e4SLinus Torvalds 	if (!ret)
3591da177e4SLinus Torvalds 		ret = err;
3601b1dcc1bSJes Sorensen 	mutex_unlock(&mapping->host->i_mutex);
3611da177e4SLinus Torvalds 	err = filemap_fdatawait(mapping);
3621da177e4SLinus Torvalds 	if (!ret)
3631da177e4SLinus Torvalds 		ret = err;
3641da177e4SLinus Torvalds 	current->flags &= ~PF_SYNCWRITE;
3651da177e4SLinus Torvalds 
3661da177e4SLinus Torvalds out_putf:
3671da177e4SLinus Torvalds 	fput(file);
3681da177e4SLinus Torvalds out:
3691da177e4SLinus Torvalds 	return ret;
3701da177e4SLinus Torvalds }
3711da177e4SLinus Torvalds 
372dfb388bfSOleg Nesterov asmlinkage long sys_fsync(unsigned int fd)
373dfb388bfSOleg Nesterov {
374dfb388bfSOleg Nesterov 	return do_fsync(fd, 0);
375dfb388bfSOleg Nesterov }
376dfb388bfSOleg Nesterov 
3771da177e4SLinus Torvalds asmlinkage long sys_fdatasync(unsigned int fd)
3781da177e4SLinus Torvalds {
379dfb388bfSOleg Nesterov 	return do_fsync(fd, 1);
3801da177e4SLinus Torvalds }
3811da177e4SLinus Torvalds 
3821da177e4SLinus Torvalds /*
3831da177e4SLinus Torvalds  * Various filesystems appear to want __find_get_block to be non-blocking.
3841da177e4SLinus Torvalds  * But it's the page lock which protects the buffers.  To get around this,
3851da177e4SLinus Torvalds  * we get exclusion from try_to_free_buffers with the blockdev mapping's
3861da177e4SLinus Torvalds  * private_lock.
3871da177e4SLinus Torvalds  *
3881da177e4SLinus Torvalds  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
3891da177e4SLinus Torvalds  * may be quite high.  This code could TryLock the page, and if that
3901da177e4SLinus Torvalds  * succeeds, there is no need to take private_lock. (But if
3911da177e4SLinus Torvalds  * private_lock is contended then so is mapping->tree_lock).
3921da177e4SLinus Torvalds  */
3931da177e4SLinus Torvalds static struct buffer_head *
394385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
3951da177e4SLinus Torvalds {
3961da177e4SLinus Torvalds 	struct inode *bd_inode = bdev->bd_inode;
3971da177e4SLinus Torvalds 	struct address_space *bd_mapping = bd_inode->i_mapping;
3981da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
3991da177e4SLinus Torvalds 	pgoff_t index;
4001da177e4SLinus Torvalds 	struct buffer_head *bh;
4011da177e4SLinus Torvalds 	struct buffer_head *head;
4021da177e4SLinus Torvalds 	struct page *page;
4031da177e4SLinus Torvalds 	int all_mapped = 1;
4041da177e4SLinus Torvalds 
4051da177e4SLinus Torvalds 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
4061da177e4SLinus Torvalds 	page = find_get_page(bd_mapping, index);
4071da177e4SLinus Torvalds 	if (!page)
4081da177e4SLinus Torvalds 		goto out;
4091da177e4SLinus Torvalds 
4101da177e4SLinus Torvalds 	spin_lock(&bd_mapping->private_lock);
4111da177e4SLinus Torvalds 	if (!page_has_buffers(page))
4121da177e4SLinus Torvalds 		goto out_unlock;
4131da177e4SLinus Torvalds 	head = page_buffers(page);
4141da177e4SLinus Torvalds 	bh = head;
4151da177e4SLinus Torvalds 	do {
4161da177e4SLinus Torvalds 		if (bh->b_blocknr == block) {
4171da177e4SLinus Torvalds 			ret = bh;
4181da177e4SLinus Torvalds 			get_bh(bh);
4191da177e4SLinus Torvalds 			goto out_unlock;
4201da177e4SLinus Torvalds 		}
4211da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
4221da177e4SLinus Torvalds 			all_mapped = 0;
4231da177e4SLinus Torvalds 		bh = bh->b_this_page;
4241da177e4SLinus Torvalds 	} while (bh != head);
4251da177e4SLinus Torvalds 
4261da177e4SLinus Torvalds 	/* we might be here because some of the buffers on this page are
4271da177e4SLinus Torvalds 	 * not mapped.  This is due to various races between
4281da177e4SLinus Torvalds 	 * file io on the block device and getblk.  It gets dealt with
4291da177e4SLinus Torvalds 	 * elsewhere, don't buffer_error if we had some unmapped buffers
4301da177e4SLinus Torvalds 	 */
4311da177e4SLinus Torvalds 	if (all_mapped) {
4321da177e4SLinus Torvalds 		printk("__find_get_block_slow() failed. "
4331da177e4SLinus Torvalds 			"block=%llu, b_blocknr=%llu\n",
4341da177e4SLinus Torvalds 			(unsigned long long)block, (unsigned long long)bh->b_blocknr);
4351da177e4SLinus Torvalds 		printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
4361da177e4SLinus Torvalds 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
4371da177e4SLinus Torvalds 	}
4381da177e4SLinus Torvalds out_unlock:
4391da177e4SLinus Torvalds 	spin_unlock(&bd_mapping->private_lock);
4401da177e4SLinus Torvalds 	page_cache_release(page);
4411da177e4SLinus Torvalds out:
4421da177e4SLinus Torvalds 	return ret;
4431da177e4SLinus Torvalds }
4441da177e4SLinus Torvalds 
4451da177e4SLinus Torvalds /* If invalidate_buffers() will trash dirty buffers, it means some kind
4461da177e4SLinus Torvalds    of fs corruption is going on. Trashing dirty data always imply losing
4471da177e4SLinus Torvalds    information that was supposed to be just stored on the physical layer
4481da177e4SLinus Torvalds    by the user.
4491da177e4SLinus Torvalds 
4501da177e4SLinus Torvalds    Thus invalidate_buffers in general usage is not allwowed to trash
4511da177e4SLinus Torvalds    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
4521da177e4SLinus Torvalds    be preserved.  These buffers are simply skipped.
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds    We also skip buffers which are still in use.  For example this can
4551da177e4SLinus Torvalds    happen if a userspace program is reading the block device.
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds    NOTE: In the case where the user removed a removable-media-disk even if
4581da177e4SLinus Torvalds    there's still dirty data not synced on disk (due a bug in the device driver
4591da177e4SLinus Torvalds    or due an error of the user), by not destroying the dirty buffers we could
4601da177e4SLinus Torvalds    generate corruption also on the next media inserted, thus a parameter is
4611da177e4SLinus Torvalds    necessary to handle this case in the most safe way possible (trying
4621da177e4SLinus Torvalds    to not corrupt also the new disk inserted with the data belonging to
4631da177e4SLinus Torvalds    the old now corrupted disk). Also for the ramdisk the natural thing
4641da177e4SLinus Torvalds    to do in order to release the ramdisk memory is to destroy dirty buffers.
4651da177e4SLinus Torvalds 
4661da177e4SLinus Torvalds    These are two special cases. Normal usage imply the device driver
4671da177e4SLinus Torvalds    to issue a sync on the device (without waiting I/O completion) and
4681da177e4SLinus Torvalds    then an invalidate_buffers call that doesn't trash dirty buffers.
4691da177e4SLinus Torvalds 
4701da177e4SLinus Torvalds    For handling cache coherency with the blkdev pagecache the 'update' case
4711da177e4SLinus Torvalds    is been introduced. It is needed to re-read from disk any pinned
4721da177e4SLinus Torvalds    buffer. NOTE: re-reading from disk is destructive so we can do it only
4731da177e4SLinus Torvalds    when we assume nobody is changing the buffercache under our I/O and when
4741da177e4SLinus Torvalds    we think the disk contains more recent information than the buffercache.
4751da177e4SLinus Torvalds    The update == 1 pass marks the buffers we need to update, the update == 2
4761da177e4SLinus Torvalds    pass does the actual I/O. */
4771da177e4SLinus Torvalds void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
4781da177e4SLinus Torvalds {
4791da177e4SLinus Torvalds 	invalidate_bh_lrus();
4801da177e4SLinus Torvalds 	/*
4811da177e4SLinus Torvalds 	 * FIXME: what about destroy_dirty_buffers?
4821da177e4SLinus Torvalds 	 * We really want to use invalidate_inode_pages2() for
4831da177e4SLinus Torvalds 	 * that, but not until that's cleaned up.
4841da177e4SLinus Torvalds 	 */
4851da177e4SLinus Torvalds 	invalidate_inode_pages(bdev->bd_inode->i_mapping);
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds 
4881da177e4SLinus Torvalds /*
4891da177e4SLinus Torvalds  * Kick pdflush then try to free up some ZONE_NORMAL memory.
4901da177e4SLinus Torvalds  */
4911da177e4SLinus Torvalds static void free_more_memory(void)
4921da177e4SLinus Torvalds {
4931da177e4SLinus Torvalds 	struct zone **zones;
4941da177e4SLinus Torvalds 	pg_data_t *pgdat;
4951da177e4SLinus Torvalds 
496687a21ceSPekka J Enberg 	wakeup_pdflush(1024);
4971da177e4SLinus Torvalds 	yield();
4981da177e4SLinus Torvalds 
4991da177e4SLinus Torvalds 	for_each_pgdat(pgdat) {
500af4ca457SAl Viro 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
5011da177e4SLinus Torvalds 		if (*zones)
5021ad539b2SDarren Hart 			try_to_free_pages(zones, GFP_NOFS);
5031da177e4SLinus Torvalds 	}
5041da177e4SLinus Torvalds }
5051da177e4SLinus Torvalds 
5061da177e4SLinus Torvalds /*
5071da177e4SLinus Torvalds  * I/O completion handler for block_read_full_page() - pages
5081da177e4SLinus Torvalds  * which come unlocked at the end of I/O.
5091da177e4SLinus Torvalds  */
5101da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
5111da177e4SLinus Torvalds {
5121da177e4SLinus Torvalds 	unsigned long flags;
513a3972203SNick Piggin 	struct buffer_head *first;
5141da177e4SLinus Torvalds 	struct buffer_head *tmp;
5151da177e4SLinus Torvalds 	struct page *page;
5161da177e4SLinus Torvalds 	int page_uptodate = 1;
5171da177e4SLinus Torvalds 
5181da177e4SLinus Torvalds 	BUG_ON(!buffer_async_read(bh));
5191da177e4SLinus Torvalds 
5201da177e4SLinus Torvalds 	page = bh->b_page;
5211da177e4SLinus Torvalds 	if (uptodate) {
5221da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
5231da177e4SLinus Torvalds 	} else {
5241da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
5251da177e4SLinus Torvalds 		if (printk_ratelimit())
5261da177e4SLinus Torvalds 			buffer_io_error(bh);
5271da177e4SLinus Torvalds 		SetPageError(page);
5281da177e4SLinus Torvalds 	}
5291da177e4SLinus Torvalds 
5301da177e4SLinus Torvalds 	/*
5311da177e4SLinus Torvalds 	 * Be _very_ careful from here on. Bad things can happen if
5321da177e4SLinus Torvalds 	 * two buffer heads end IO at almost the same time and both
5331da177e4SLinus Torvalds 	 * decide that the page is now completely done.
5341da177e4SLinus Torvalds 	 */
535a3972203SNick Piggin 	first = page_buffers(page);
536a3972203SNick Piggin 	local_irq_save(flags);
537a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
5381da177e4SLinus Torvalds 	clear_buffer_async_read(bh);
5391da177e4SLinus Torvalds 	unlock_buffer(bh);
5401da177e4SLinus Torvalds 	tmp = bh;
5411da177e4SLinus Torvalds 	do {
5421da177e4SLinus Torvalds 		if (!buffer_uptodate(tmp))
5431da177e4SLinus Torvalds 			page_uptodate = 0;
5441da177e4SLinus Torvalds 		if (buffer_async_read(tmp)) {
5451da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
5461da177e4SLinus Torvalds 			goto still_busy;
5471da177e4SLinus Torvalds 		}
5481da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
5491da177e4SLinus Torvalds 	} while (tmp != bh);
550a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
551a3972203SNick Piggin 	local_irq_restore(flags);
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds 	/*
5541da177e4SLinus Torvalds 	 * If none of the buffers had errors and they are all
5551da177e4SLinus Torvalds 	 * uptodate then we can set the page uptodate.
5561da177e4SLinus Torvalds 	 */
5571da177e4SLinus Torvalds 	if (page_uptodate && !PageError(page))
5581da177e4SLinus Torvalds 		SetPageUptodate(page);
5591da177e4SLinus Torvalds 	unlock_page(page);
5601da177e4SLinus Torvalds 	return;
5611da177e4SLinus Torvalds 
5621da177e4SLinus Torvalds still_busy:
563a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
564a3972203SNick Piggin 	local_irq_restore(flags);
5651da177e4SLinus Torvalds 	return;
5661da177e4SLinus Torvalds }
5671da177e4SLinus Torvalds 
5681da177e4SLinus Torvalds /*
5691da177e4SLinus Torvalds  * Completion handler for block_write_full_page() - pages which are unlocked
5701da177e4SLinus Torvalds  * during I/O, and which have PageWriteback cleared upon I/O completion.
5711da177e4SLinus Torvalds  */
5721da177e4SLinus Torvalds void end_buffer_async_write(struct buffer_head *bh, int uptodate)
5731da177e4SLinus Torvalds {
5741da177e4SLinus Torvalds 	char b[BDEVNAME_SIZE];
5751da177e4SLinus Torvalds 	unsigned long flags;
576a3972203SNick Piggin 	struct buffer_head *first;
5771da177e4SLinus Torvalds 	struct buffer_head *tmp;
5781da177e4SLinus Torvalds 	struct page *page;
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds 	BUG_ON(!buffer_async_write(bh));
5811da177e4SLinus Torvalds 
5821da177e4SLinus Torvalds 	page = bh->b_page;
5831da177e4SLinus Torvalds 	if (uptodate) {
5841da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
5851da177e4SLinus Torvalds 	} else {
5861da177e4SLinus Torvalds 		if (printk_ratelimit()) {
5871da177e4SLinus Torvalds 			buffer_io_error(bh);
5881da177e4SLinus Torvalds 			printk(KERN_WARNING "lost page write due to "
5891da177e4SLinus Torvalds 					"I/O error on %s\n",
5901da177e4SLinus Torvalds 			       bdevname(bh->b_bdev, b));
5911da177e4SLinus Torvalds 		}
5921da177e4SLinus Torvalds 		set_bit(AS_EIO, &page->mapping->flags);
5931da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
5941da177e4SLinus Torvalds 		SetPageError(page);
5951da177e4SLinus Torvalds 	}
5961da177e4SLinus Torvalds 
597a3972203SNick Piggin 	first = page_buffers(page);
598a3972203SNick Piggin 	local_irq_save(flags);
599a3972203SNick Piggin 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
600a3972203SNick Piggin 
6011da177e4SLinus Torvalds 	clear_buffer_async_write(bh);
6021da177e4SLinus Torvalds 	unlock_buffer(bh);
6031da177e4SLinus Torvalds 	tmp = bh->b_this_page;
6041da177e4SLinus Torvalds 	while (tmp != bh) {
6051da177e4SLinus Torvalds 		if (buffer_async_write(tmp)) {
6061da177e4SLinus Torvalds 			BUG_ON(!buffer_locked(tmp));
6071da177e4SLinus Torvalds 			goto still_busy;
6081da177e4SLinus Torvalds 		}
6091da177e4SLinus Torvalds 		tmp = tmp->b_this_page;
6101da177e4SLinus Torvalds 	}
611a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
612a3972203SNick Piggin 	local_irq_restore(flags);
6131da177e4SLinus Torvalds 	end_page_writeback(page);
6141da177e4SLinus Torvalds 	return;
6151da177e4SLinus Torvalds 
6161da177e4SLinus Torvalds still_busy:
617a3972203SNick Piggin 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
618a3972203SNick Piggin 	local_irq_restore(flags);
6191da177e4SLinus Torvalds 	return;
6201da177e4SLinus Torvalds }
6211da177e4SLinus Torvalds 
6221da177e4SLinus Torvalds /*
6231da177e4SLinus Torvalds  * If a page's buffers are under async readin (end_buffer_async_read
6241da177e4SLinus Torvalds  * completion) then there is a possibility that another thread of
6251da177e4SLinus Torvalds  * control could lock one of the buffers after it has completed
6261da177e4SLinus Torvalds  * but while some of the other buffers have not completed.  This
6271da177e4SLinus Torvalds  * locked buffer would confuse end_buffer_async_read() into not unlocking
6281da177e4SLinus Torvalds  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
6291da177e4SLinus Torvalds  * that this buffer is not under async I/O.
6301da177e4SLinus Torvalds  *
6311da177e4SLinus Torvalds  * The page comes unlocked when it has no locked buffer_async buffers
6321da177e4SLinus Torvalds  * left.
6331da177e4SLinus Torvalds  *
6341da177e4SLinus Torvalds  * PageLocked prevents anyone starting new async I/O reads any of
6351da177e4SLinus Torvalds  * the buffers.
6361da177e4SLinus Torvalds  *
6371da177e4SLinus Torvalds  * PageWriteback is used to prevent simultaneous writeout of the same
6381da177e4SLinus Torvalds  * page.
6391da177e4SLinus Torvalds  *
6401da177e4SLinus Torvalds  * PageLocked prevents anyone from starting writeback of a page which is
6411da177e4SLinus Torvalds  * under read I/O (PageWriteback is only ever set against a locked page).
6421da177e4SLinus Torvalds  */
6431da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
6441da177e4SLinus Torvalds {
6451da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_read;
6461da177e4SLinus Torvalds 	set_buffer_async_read(bh);
6471da177e4SLinus Torvalds }
6481da177e4SLinus Torvalds 
6491da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
6501da177e4SLinus Torvalds {
6511da177e4SLinus Torvalds 	bh->b_end_io = end_buffer_async_write;
6521da177e4SLinus Torvalds 	set_buffer_async_write(bh);
6531da177e4SLinus Torvalds }
6541da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
6551da177e4SLinus Torvalds 
6561da177e4SLinus Torvalds 
6571da177e4SLinus Torvalds /*
6581da177e4SLinus Torvalds  * fs/buffer.c contains helper functions for buffer-backed address space's
6591da177e4SLinus Torvalds  * fsync functions.  A common requirement for buffer-based filesystems is
6601da177e4SLinus Torvalds  * that certain data from the backing blockdev needs to be written out for
6611da177e4SLinus Torvalds  * a successful fsync().  For example, ext2 indirect blocks need to be
6621da177e4SLinus Torvalds  * written back and waited upon before fsync() returns.
6631da177e4SLinus Torvalds  *
6641da177e4SLinus Torvalds  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
6651da177e4SLinus Torvalds  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
6661da177e4SLinus Torvalds  * management of a list of dependent buffers at ->i_mapping->private_list.
6671da177e4SLinus Torvalds  *
6681da177e4SLinus Torvalds  * Locking is a little subtle: try_to_free_buffers() will remove buffers
6691da177e4SLinus Torvalds  * from their controlling inode's queue when they are being freed.  But
6701da177e4SLinus Torvalds  * try_to_free_buffers() will be operating against the *blockdev* mapping
6711da177e4SLinus Torvalds  * at the time, not against the S_ISREG file which depends on those buffers.
6721da177e4SLinus Torvalds  * So the locking for private_list is via the private_lock in the address_space
6731da177e4SLinus Torvalds  * which backs the buffers.  Which is different from the address_space
6741da177e4SLinus Torvalds  * against which the buffers are listed.  So for a particular address_space,
6751da177e4SLinus Torvalds  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
6761da177e4SLinus Torvalds  * mapping->private_list will always be protected by the backing blockdev's
6771da177e4SLinus Torvalds  * ->private_lock.
6781da177e4SLinus Torvalds  *
6791da177e4SLinus Torvalds  * Which introduces a requirement: all buffers on an address_space's
6801da177e4SLinus Torvalds  * ->private_list must be from the same address_space: the blockdev's.
6811da177e4SLinus Torvalds  *
6821da177e4SLinus Torvalds  * address_spaces which do not place buffers at ->private_list via these
6831da177e4SLinus Torvalds  * utility functions are free to use private_lock and private_list for
6841da177e4SLinus Torvalds  * whatever they want.  The only requirement is that list_empty(private_list)
6851da177e4SLinus Torvalds  * be true at clear_inode() time.
6861da177e4SLinus Torvalds  *
6871da177e4SLinus Torvalds  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
6881da177e4SLinus Torvalds  * filesystems should do that.  invalidate_inode_buffers() should just go
6891da177e4SLinus Torvalds  * BUG_ON(!list_empty).
6901da177e4SLinus Torvalds  *
6911da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
6921da177e4SLinus Torvalds  * take an address_space, not an inode.  And it should be called
6931da177e4SLinus Torvalds  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
6941da177e4SLinus Torvalds  * queued up.
6951da177e4SLinus Torvalds  *
6961da177e4SLinus Torvalds  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
6971da177e4SLinus Torvalds  * list if it is already on a list.  Because if the buffer is on a list,
6981da177e4SLinus Torvalds  * it *must* already be on the right one.  If not, the filesystem is being
6991da177e4SLinus Torvalds  * silly.  This will save a ton of locking.  But first we have to ensure
7001da177e4SLinus Torvalds  * that buffers are taken *off* the old inode's list when they are freed
7011da177e4SLinus Torvalds  * (presumably in truncate).  That requires careful auditing of all
7021da177e4SLinus Torvalds  * filesystems (do it inside bforget()).  It could also be done by bringing
7031da177e4SLinus Torvalds  * b_inode back.
7041da177e4SLinus Torvalds  */
7051da177e4SLinus Torvalds 
7061da177e4SLinus Torvalds /*
7071da177e4SLinus Torvalds  * The buffer's backing address_space's private_lock must be held
7081da177e4SLinus Torvalds  */
7091da177e4SLinus Torvalds static inline void __remove_assoc_queue(struct buffer_head *bh)
7101da177e4SLinus Torvalds {
7111da177e4SLinus Torvalds 	list_del_init(&bh->b_assoc_buffers);
7121da177e4SLinus Torvalds }
7131da177e4SLinus Torvalds 
7141da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
7151da177e4SLinus Torvalds {
7161da177e4SLinus Torvalds 	return !list_empty(&inode->i_data.private_list);
7171da177e4SLinus Torvalds }
7181da177e4SLinus Torvalds 
7191da177e4SLinus Torvalds /*
7201da177e4SLinus Torvalds  * osync is designed to support O_SYNC io.  It waits synchronously for
7211da177e4SLinus Torvalds  * all already-submitted IO to complete, but does not queue any new
7221da177e4SLinus Torvalds  * writes to the disk.
7231da177e4SLinus Torvalds  *
7241da177e4SLinus Torvalds  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
7251da177e4SLinus Torvalds  * you dirty the buffers, and then use osync_inode_buffers to wait for
7261da177e4SLinus Torvalds  * completion.  Any other dirty buffers which are not yet queued for
7271da177e4SLinus Torvalds  * write will not be flushed to disk by the osync.
7281da177e4SLinus Torvalds  */
7291da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
7301da177e4SLinus Torvalds {
7311da177e4SLinus Torvalds 	struct buffer_head *bh;
7321da177e4SLinus Torvalds 	struct list_head *p;
7331da177e4SLinus Torvalds 	int err = 0;
7341da177e4SLinus Torvalds 
7351da177e4SLinus Torvalds 	spin_lock(lock);
7361da177e4SLinus Torvalds repeat:
7371da177e4SLinus Torvalds 	list_for_each_prev(p, list) {
7381da177e4SLinus Torvalds 		bh = BH_ENTRY(p);
7391da177e4SLinus Torvalds 		if (buffer_locked(bh)) {
7401da177e4SLinus Torvalds 			get_bh(bh);
7411da177e4SLinus Torvalds 			spin_unlock(lock);
7421da177e4SLinus Torvalds 			wait_on_buffer(bh);
7431da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
7441da177e4SLinus Torvalds 				err = -EIO;
7451da177e4SLinus Torvalds 			brelse(bh);
7461da177e4SLinus Torvalds 			spin_lock(lock);
7471da177e4SLinus Torvalds 			goto repeat;
7481da177e4SLinus Torvalds 		}
7491da177e4SLinus Torvalds 	}
7501da177e4SLinus Torvalds 	spin_unlock(lock);
7511da177e4SLinus Torvalds 	return err;
7521da177e4SLinus Torvalds }
7531da177e4SLinus Torvalds 
7541da177e4SLinus Torvalds /**
7551da177e4SLinus Torvalds  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
7561da177e4SLinus Torvalds  *                        buffers
75767be2dd1SMartin Waitz  * @mapping: the mapping which wants those buffers written
7581da177e4SLinus Torvalds  *
7591da177e4SLinus Torvalds  * Starts I/O against the buffers at mapping->private_list, and waits upon
7601da177e4SLinus Torvalds  * that I/O.
7611da177e4SLinus Torvalds  *
76267be2dd1SMartin Waitz  * Basically, this is a convenience function for fsync().
76367be2dd1SMartin Waitz  * @mapping is a file or directory which needs those buffers to be written for
76467be2dd1SMartin Waitz  * a successful fsync().
7651da177e4SLinus Torvalds  */
7661da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
7671da177e4SLinus Torvalds {
7681da177e4SLinus Torvalds 	struct address_space *buffer_mapping = mapping->assoc_mapping;
7691da177e4SLinus Torvalds 
7701da177e4SLinus Torvalds 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
7711da177e4SLinus Torvalds 		return 0;
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 	return fsync_buffers_list(&buffer_mapping->private_lock,
7741da177e4SLinus Torvalds 					&mapping->private_list);
7751da177e4SLinus Torvalds }
7761da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
7771da177e4SLinus Torvalds 
7781da177e4SLinus Torvalds /*
7791da177e4SLinus Torvalds  * Called when we've recently written block `bblock', and it is known that
7801da177e4SLinus Torvalds  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
7811da177e4SLinus Torvalds  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
7821da177e4SLinus Torvalds  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
7831da177e4SLinus Torvalds  */
7841da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
7851da177e4SLinus Torvalds 			sector_t bblock, unsigned blocksize)
7861da177e4SLinus Torvalds {
7871da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
7881da177e4SLinus Torvalds 	if (bh) {
7891da177e4SLinus Torvalds 		if (buffer_dirty(bh))
7901da177e4SLinus Torvalds 			ll_rw_block(WRITE, 1, &bh);
7911da177e4SLinus Torvalds 		put_bh(bh);
7921da177e4SLinus Torvalds 	}
7931da177e4SLinus Torvalds }
7941da177e4SLinus Torvalds 
7951da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
7961da177e4SLinus Torvalds {
7971da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
7981da177e4SLinus Torvalds 	struct address_space *buffer_mapping = bh->b_page->mapping;
7991da177e4SLinus Torvalds 
8001da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
8011da177e4SLinus Torvalds 	if (!mapping->assoc_mapping) {
8021da177e4SLinus Torvalds 		mapping->assoc_mapping = buffer_mapping;
8031da177e4SLinus Torvalds 	} else {
8041da177e4SLinus Torvalds 		if (mapping->assoc_mapping != buffer_mapping)
8051da177e4SLinus Torvalds 			BUG();
8061da177e4SLinus Torvalds 	}
8071da177e4SLinus Torvalds 	if (list_empty(&bh->b_assoc_buffers)) {
8081da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
8091da177e4SLinus Torvalds 		list_move_tail(&bh->b_assoc_buffers,
8101da177e4SLinus Torvalds 				&mapping->private_list);
8111da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
8121da177e4SLinus Torvalds 	}
8131da177e4SLinus Torvalds }
8141da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
8151da177e4SLinus Torvalds 
8161da177e4SLinus Torvalds /*
8171da177e4SLinus Torvalds  * Add a page to the dirty page list.
8181da177e4SLinus Torvalds  *
8191da177e4SLinus Torvalds  * It is a sad fact of life that this function is called from several places
8201da177e4SLinus Torvalds  * deeply under spinlocking.  It may not sleep.
8211da177e4SLinus Torvalds  *
8221da177e4SLinus Torvalds  * If the page has buffers, the uptodate buffers are set dirty, to preserve
8231da177e4SLinus Torvalds  * dirty-state coherency between the page and the buffers.  It the page does
8241da177e4SLinus Torvalds  * not have buffers then when they are later attached they will all be set
8251da177e4SLinus Torvalds  * dirty.
8261da177e4SLinus Torvalds  *
8271da177e4SLinus Torvalds  * The buffers are dirtied before the page is dirtied.  There's a small race
8281da177e4SLinus Torvalds  * window in which a writepage caller may see the page cleanness but not the
8291da177e4SLinus Torvalds  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
8301da177e4SLinus Torvalds  * before the buffers, a concurrent writepage caller could clear the page dirty
8311da177e4SLinus Torvalds  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
8321da177e4SLinus Torvalds  * page on the dirty page list.
8331da177e4SLinus Torvalds  *
8341da177e4SLinus Torvalds  * We use private_lock to lock against try_to_free_buffers while using the
8351da177e4SLinus Torvalds  * page's buffer list.  Also use this to protect against clean buffers being
8361da177e4SLinus Torvalds  * added to the page after it was set dirty.
8371da177e4SLinus Torvalds  *
8381da177e4SLinus Torvalds  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
8391da177e4SLinus Torvalds  * address_space though.
8401da177e4SLinus Torvalds  */
8411da177e4SLinus Torvalds int __set_page_dirty_buffers(struct page *page)
8421da177e4SLinus Torvalds {
8431da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
8441da177e4SLinus Torvalds 
8451da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
8461da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
8471da177e4SLinus Torvalds 		struct buffer_head *head = page_buffers(page);
8481da177e4SLinus Torvalds 		struct buffer_head *bh = head;
8491da177e4SLinus Torvalds 
8501da177e4SLinus Torvalds 		do {
8511da177e4SLinus Torvalds 			set_buffer_dirty(bh);
8521da177e4SLinus Torvalds 			bh = bh->b_this_page;
8531da177e4SLinus Torvalds 		} while (bh != head);
8541da177e4SLinus Torvalds 	}
8551da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
8561da177e4SLinus Torvalds 
8571da177e4SLinus Torvalds 	if (!TestSetPageDirty(page)) {
8581da177e4SLinus Torvalds 		write_lock_irq(&mapping->tree_lock);
8591da177e4SLinus Torvalds 		if (page->mapping) {	/* Race with truncate? */
8601da177e4SLinus Torvalds 			if (mapping_cap_account_dirty(mapping))
8611da177e4SLinus Torvalds 				inc_page_state(nr_dirty);
8621da177e4SLinus Torvalds 			radix_tree_tag_set(&mapping->page_tree,
8631da177e4SLinus Torvalds 						page_index(page),
8641da177e4SLinus Torvalds 						PAGECACHE_TAG_DIRTY);
8651da177e4SLinus Torvalds 		}
8661da177e4SLinus Torvalds 		write_unlock_irq(&mapping->tree_lock);
8671da177e4SLinus Torvalds 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
8681da177e4SLinus Torvalds 	}
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds 	return 0;
8711da177e4SLinus Torvalds }
8721da177e4SLinus Torvalds EXPORT_SYMBOL(__set_page_dirty_buffers);
8731da177e4SLinus Torvalds 
8741da177e4SLinus Torvalds /*
8751da177e4SLinus Torvalds  * Write out and wait upon a list of buffers.
8761da177e4SLinus Torvalds  *
8771da177e4SLinus Torvalds  * We have conflicting pressures: we want to make sure that all
8781da177e4SLinus Torvalds  * initially dirty buffers get waited on, but that any subsequently
8791da177e4SLinus Torvalds  * dirtied buffers don't.  After all, we don't want fsync to last
8801da177e4SLinus Torvalds  * forever if somebody is actively writing to the file.
8811da177e4SLinus Torvalds  *
8821da177e4SLinus Torvalds  * Do this in two main stages: first we copy dirty buffers to a
8831da177e4SLinus Torvalds  * temporary inode list, queueing the writes as we go.  Then we clean
8841da177e4SLinus Torvalds  * up, waiting for those writes to complete.
8851da177e4SLinus Torvalds  *
8861da177e4SLinus Torvalds  * During this second stage, any subsequent updates to the file may end
8871da177e4SLinus Torvalds  * up refiling the buffer on the original inode's dirty list again, so
8881da177e4SLinus Torvalds  * there is a chance we will end up with a buffer queued for write but
8891da177e4SLinus Torvalds  * not yet completed on that list.  So, as a final cleanup we go through
8901da177e4SLinus Torvalds  * the osync code to catch these locked, dirty buffers without requeuing
8911da177e4SLinus Torvalds  * any newly dirty buffers for write.
8921da177e4SLinus Torvalds  */
8931da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
8941da177e4SLinus Torvalds {
8951da177e4SLinus Torvalds 	struct buffer_head *bh;
8961da177e4SLinus Torvalds 	struct list_head tmp;
8971da177e4SLinus Torvalds 	int err = 0, err2;
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds 	INIT_LIST_HEAD(&tmp);
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 	spin_lock(lock);
9021da177e4SLinus Torvalds 	while (!list_empty(list)) {
9031da177e4SLinus Torvalds 		bh = BH_ENTRY(list->next);
9041da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
9051da177e4SLinus Torvalds 		if (buffer_dirty(bh) || buffer_locked(bh)) {
9061da177e4SLinus Torvalds 			list_add(&bh->b_assoc_buffers, &tmp);
9071da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
9081da177e4SLinus Torvalds 				get_bh(bh);
9091da177e4SLinus Torvalds 				spin_unlock(lock);
9101da177e4SLinus Torvalds 				/*
9111da177e4SLinus Torvalds 				 * Ensure any pending I/O completes so that
9121da177e4SLinus Torvalds 				 * ll_rw_block() actually writes the current
9131da177e4SLinus Torvalds 				 * contents - it is a noop if I/O is still in
9141da177e4SLinus Torvalds 				 * flight on potentially older contents.
9151da177e4SLinus Torvalds 				 */
916a7662236SJan Kara 				ll_rw_block(SWRITE, 1, &bh);
9171da177e4SLinus Torvalds 				brelse(bh);
9181da177e4SLinus Torvalds 				spin_lock(lock);
9191da177e4SLinus Torvalds 			}
9201da177e4SLinus Torvalds 		}
9211da177e4SLinus Torvalds 	}
9221da177e4SLinus Torvalds 
9231da177e4SLinus Torvalds 	while (!list_empty(&tmp)) {
9241da177e4SLinus Torvalds 		bh = BH_ENTRY(tmp.prev);
9251da177e4SLinus Torvalds 		__remove_assoc_queue(bh);
9261da177e4SLinus Torvalds 		get_bh(bh);
9271da177e4SLinus Torvalds 		spin_unlock(lock);
9281da177e4SLinus Torvalds 		wait_on_buffer(bh);
9291da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
9301da177e4SLinus Torvalds 			err = -EIO;
9311da177e4SLinus Torvalds 		brelse(bh);
9321da177e4SLinus Torvalds 		spin_lock(lock);
9331da177e4SLinus Torvalds 	}
9341da177e4SLinus Torvalds 
9351da177e4SLinus Torvalds 	spin_unlock(lock);
9361da177e4SLinus Torvalds 	err2 = osync_buffers_list(lock, list);
9371da177e4SLinus Torvalds 	if (err)
9381da177e4SLinus Torvalds 		return err;
9391da177e4SLinus Torvalds 	else
9401da177e4SLinus Torvalds 		return err2;
9411da177e4SLinus Torvalds }
9421da177e4SLinus Torvalds 
9431da177e4SLinus Torvalds /*
9441da177e4SLinus Torvalds  * Invalidate any and all dirty buffers on a given inode.  We are
9451da177e4SLinus Torvalds  * probably unmounting the fs, but that doesn't mean we have already
9461da177e4SLinus Torvalds  * done a sync().  Just drop the buffers from the inode list.
9471da177e4SLinus Torvalds  *
9481da177e4SLinus Torvalds  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
9491da177e4SLinus Torvalds  * assumes that all the buffers are against the blockdev.  Not true
9501da177e4SLinus Torvalds  * for reiserfs.
9511da177e4SLinus Torvalds  */
9521da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
9531da177e4SLinus Torvalds {
9541da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
9551da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
9561da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
9571da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
9581da177e4SLinus Torvalds 
9591da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
9601da177e4SLinus Torvalds 		while (!list_empty(list))
9611da177e4SLinus Torvalds 			__remove_assoc_queue(BH_ENTRY(list->next));
9621da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
9631da177e4SLinus Torvalds 	}
9641da177e4SLinus Torvalds }
9651da177e4SLinus Torvalds 
9661da177e4SLinus Torvalds /*
9671da177e4SLinus Torvalds  * Remove any clean buffers from the inode's buffer list.  This is called
9681da177e4SLinus Torvalds  * when we're trying to free the inode itself.  Those buffers can pin it.
9691da177e4SLinus Torvalds  *
9701da177e4SLinus Torvalds  * Returns true if all buffers were removed.
9711da177e4SLinus Torvalds  */
9721da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
9731da177e4SLinus Torvalds {
9741da177e4SLinus Torvalds 	int ret = 1;
9751da177e4SLinus Torvalds 
9761da177e4SLinus Torvalds 	if (inode_has_buffers(inode)) {
9771da177e4SLinus Torvalds 		struct address_space *mapping = &inode->i_data;
9781da177e4SLinus Torvalds 		struct list_head *list = &mapping->private_list;
9791da177e4SLinus Torvalds 		struct address_space *buffer_mapping = mapping->assoc_mapping;
9801da177e4SLinus Torvalds 
9811da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
9821da177e4SLinus Torvalds 		while (!list_empty(list)) {
9831da177e4SLinus Torvalds 			struct buffer_head *bh = BH_ENTRY(list->next);
9841da177e4SLinus Torvalds 			if (buffer_dirty(bh)) {
9851da177e4SLinus Torvalds 				ret = 0;
9861da177e4SLinus Torvalds 				break;
9871da177e4SLinus Torvalds 			}
9881da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
9891da177e4SLinus Torvalds 		}
9901da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
9911da177e4SLinus Torvalds 	}
9921da177e4SLinus Torvalds 	return ret;
9931da177e4SLinus Torvalds }
9941da177e4SLinus Torvalds 
9951da177e4SLinus Torvalds /*
9961da177e4SLinus Torvalds  * Create the appropriate buffers when given a page for data area and
9971da177e4SLinus Torvalds  * the size of each buffer.. Use the bh->b_this_page linked list to
9981da177e4SLinus Torvalds  * follow the buffers created.  Return NULL if unable to create more
9991da177e4SLinus Torvalds  * buffers.
10001da177e4SLinus Torvalds  *
10011da177e4SLinus Torvalds  * The retry flag is used to differentiate async IO (paging, swapping)
10021da177e4SLinus Torvalds  * which may not fail from ordinary buffer allocations.
10031da177e4SLinus Torvalds  */
10041da177e4SLinus Torvalds struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
10051da177e4SLinus Torvalds 		int retry)
10061da177e4SLinus Torvalds {
10071da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
10081da177e4SLinus Torvalds 	long offset;
10091da177e4SLinus Torvalds 
10101da177e4SLinus Torvalds try_again:
10111da177e4SLinus Torvalds 	head = NULL;
10121da177e4SLinus Torvalds 	offset = PAGE_SIZE;
10131da177e4SLinus Torvalds 	while ((offset -= size) >= 0) {
10141da177e4SLinus Torvalds 		bh = alloc_buffer_head(GFP_NOFS);
10151da177e4SLinus Torvalds 		if (!bh)
10161da177e4SLinus Torvalds 			goto no_grow;
10171da177e4SLinus Torvalds 
10181da177e4SLinus Torvalds 		bh->b_bdev = NULL;
10191da177e4SLinus Torvalds 		bh->b_this_page = head;
10201da177e4SLinus Torvalds 		bh->b_blocknr = -1;
10211da177e4SLinus Torvalds 		head = bh;
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds 		bh->b_state = 0;
10241da177e4SLinus Torvalds 		atomic_set(&bh->b_count, 0);
1025fc5cd582SChris Mason 		bh->b_private = NULL;
10261da177e4SLinus Torvalds 		bh->b_size = size;
10271da177e4SLinus Torvalds 
10281da177e4SLinus Torvalds 		/* Link the buffer to its page */
10291da177e4SLinus Torvalds 		set_bh_page(bh, page, offset);
10301da177e4SLinus Torvalds 
103101ffe339SNathan Scott 		init_buffer(bh, NULL, NULL);
10321da177e4SLinus Torvalds 	}
10331da177e4SLinus Torvalds 	return head;
10341da177e4SLinus Torvalds /*
10351da177e4SLinus Torvalds  * In case anything failed, we just free everything we got.
10361da177e4SLinus Torvalds  */
10371da177e4SLinus Torvalds no_grow:
10381da177e4SLinus Torvalds 	if (head) {
10391da177e4SLinus Torvalds 		do {
10401da177e4SLinus Torvalds 			bh = head;
10411da177e4SLinus Torvalds 			head = head->b_this_page;
10421da177e4SLinus Torvalds 			free_buffer_head(bh);
10431da177e4SLinus Torvalds 		} while (head);
10441da177e4SLinus Torvalds 	}
10451da177e4SLinus Torvalds 
10461da177e4SLinus Torvalds 	/*
10471da177e4SLinus Torvalds 	 * Return failure for non-async IO requests.  Async IO requests
10481da177e4SLinus Torvalds 	 * are not allowed to fail, so we have to wait until buffer heads
10491da177e4SLinus Torvalds 	 * become available.  But we don't want tasks sleeping with
10501da177e4SLinus Torvalds 	 * partially complete buffers, so all were released above.
10511da177e4SLinus Torvalds 	 */
10521da177e4SLinus Torvalds 	if (!retry)
10531da177e4SLinus Torvalds 		return NULL;
10541da177e4SLinus Torvalds 
10551da177e4SLinus Torvalds 	/* We're _really_ low on memory. Now we just
10561da177e4SLinus Torvalds 	 * wait for old buffer heads to become free due to
10571da177e4SLinus Torvalds 	 * finishing IO.  Since this is an async request and
10581da177e4SLinus Torvalds 	 * the reserve list is empty, we're sure there are
10591da177e4SLinus Torvalds 	 * async buffer heads in use.
10601da177e4SLinus Torvalds 	 */
10611da177e4SLinus Torvalds 	free_more_memory();
10621da177e4SLinus Torvalds 	goto try_again;
10631da177e4SLinus Torvalds }
10641da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
10651da177e4SLinus Torvalds 
10661da177e4SLinus Torvalds static inline void
10671da177e4SLinus Torvalds link_dev_buffers(struct page *page, struct buffer_head *head)
10681da177e4SLinus Torvalds {
10691da177e4SLinus Torvalds 	struct buffer_head *bh, *tail;
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	bh = head;
10721da177e4SLinus Torvalds 	do {
10731da177e4SLinus Torvalds 		tail = bh;
10741da177e4SLinus Torvalds 		bh = bh->b_this_page;
10751da177e4SLinus Torvalds 	} while (bh);
10761da177e4SLinus Torvalds 	tail->b_this_page = head;
10771da177e4SLinus Torvalds 	attach_page_buffers(page, head);
10781da177e4SLinus Torvalds }
10791da177e4SLinus Torvalds 
10801da177e4SLinus Torvalds /*
10811da177e4SLinus Torvalds  * Initialise the state of a blockdev page's buffers.
10821da177e4SLinus Torvalds  */
10831da177e4SLinus Torvalds static void
10841da177e4SLinus Torvalds init_page_buffers(struct page *page, struct block_device *bdev,
10851da177e4SLinus Torvalds 			sector_t block, int size)
10861da177e4SLinus Torvalds {
10871da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
10881da177e4SLinus Torvalds 	struct buffer_head *bh = head;
10891da177e4SLinus Torvalds 	int uptodate = PageUptodate(page);
10901da177e4SLinus Torvalds 
10911da177e4SLinus Torvalds 	do {
10921da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
10931da177e4SLinus Torvalds 			init_buffer(bh, NULL, NULL);
10941da177e4SLinus Torvalds 			bh->b_bdev = bdev;
10951da177e4SLinus Torvalds 			bh->b_blocknr = block;
10961da177e4SLinus Torvalds 			if (uptodate)
10971da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
10981da177e4SLinus Torvalds 			set_buffer_mapped(bh);
10991da177e4SLinus Torvalds 		}
11001da177e4SLinus Torvalds 		block++;
11011da177e4SLinus Torvalds 		bh = bh->b_this_page;
11021da177e4SLinus Torvalds 	} while (bh != head);
11031da177e4SLinus Torvalds }
11041da177e4SLinus Torvalds 
11051da177e4SLinus Torvalds /*
11061da177e4SLinus Torvalds  * Create the page-cache page that contains the requested block.
11071da177e4SLinus Torvalds  *
11081da177e4SLinus Torvalds  * This is user purely for blockdev mappings.
11091da177e4SLinus Torvalds  */
11101da177e4SLinus Torvalds static struct page *
11111da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
11121da177e4SLinus Torvalds 		pgoff_t index, int size)
11131da177e4SLinus Torvalds {
11141da177e4SLinus Torvalds 	struct inode *inode = bdev->bd_inode;
11151da177e4SLinus Torvalds 	struct page *page;
11161da177e4SLinus Torvalds 	struct buffer_head *bh;
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
11191da177e4SLinus Torvalds 	if (!page)
11201da177e4SLinus Torvalds 		return NULL;
11211da177e4SLinus Torvalds 
11221da177e4SLinus Torvalds 	if (!PageLocked(page))
11231da177e4SLinus Torvalds 		BUG();
11241da177e4SLinus Torvalds 
11251da177e4SLinus Torvalds 	if (page_has_buffers(page)) {
11261da177e4SLinus Torvalds 		bh = page_buffers(page);
11271da177e4SLinus Torvalds 		if (bh->b_size == size) {
11281da177e4SLinus Torvalds 			init_page_buffers(page, bdev, block, size);
11291da177e4SLinus Torvalds 			return page;
11301da177e4SLinus Torvalds 		}
11311da177e4SLinus Torvalds 		if (!try_to_free_buffers(page))
11321da177e4SLinus Torvalds 			goto failed;
11331da177e4SLinus Torvalds 	}
11341da177e4SLinus Torvalds 
11351da177e4SLinus Torvalds 	/*
11361da177e4SLinus Torvalds 	 * Allocate some buffers for this page
11371da177e4SLinus Torvalds 	 */
11381da177e4SLinus Torvalds 	bh = alloc_page_buffers(page, size, 0);
11391da177e4SLinus Torvalds 	if (!bh)
11401da177e4SLinus Torvalds 		goto failed;
11411da177e4SLinus Torvalds 
11421da177e4SLinus Torvalds 	/*
11431da177e4SLinus Torvalds 	 * Link the page to the buffers and initialise them.  Take the
11441da177e4SLinus Torvalds 	 * lock to be atomic wrt __find_get_block(), which does not
11451da177e4SLinus Torvalds 	 * run under the page lock.
11461da177e4SLinus Torvalds 	 */
11471da177e4SLinus Torvalds 	spin_lock(&inode->i_mapping->private_lock);
11481da177e4SLinus Torvalds 	link_dev_buffers(page, bh);
11491da177e4SLinus Torvalds 	init_page_buffers(page, bdev, block, size);
11501da177e4SLinus Torvalds 	spin_unlock(&inode->i_mapping->private_lock);
11511da177e4SLinus Torvalds 	return page;
11521da177e4SLinus Torvalds 
11531da177e4SLinus Torvalds failed:
11541da177e4SLinus Torvalds 	BUG();
11551da177e4SLinus Torvalds 	unlock_page(page);
11561da177e4SLinus Torvalds 	page_cache_release(page);
11571da177e4SLinus Torvalds 	return NULL;
11581da177e4SLinus Torvalds }
11591da177e4SLinus Torvalds 
11601da177e4SLinus Torvalds /*
11611da177e4SLinus Torvalds  * Create buffers for the specified block device block's page.  If
11621da177e4SLinus Torvalds  * that page was dirty, the buffers are set dirty also.
11631da177e4SLinus Torvalds  *
11641da177e4SLinus Torvalds  * Except that's a bug.  Attaching dirty buffers to a dirty
11651da177e4SLinus Torvalds  * blockdev's page can result in filesystem corruption, because
11661da177e4SLinus Torvalds  * some of those buffers may be aliases of filesystem data.
11671da177e4SLinus Torvalds  * grow_dev_page() will go BUG() if this happens.
11681da177e4SLinus Torvalds  */
1169858119e1SArjan van de Ven static int
11701da177e4SLinus Torvalds grow_buffers(struct block_device *bdev, sector_t block, int size)
11711da177e4SLinus Torvalds {
11721da177e4SLinus Torvalds 	struct page *page;
11731da177e4SLinus Torvalds 	pgoff_t index;
11741da177e4SLinus Torvalds 	int sizebits;
11751da177e4SLinus Torvalds 
11761da177e4SLinus Torvalds 	sizebits = -1;
11771da177e4SLinus Torvalds 	do {
11781da177e4SLinus Torvalds 		sizebits++;
11791da177e4SLinus Torvalds 	} while ((size << sizebits) < PAGE_SIZE);
11801da177e4SLinus Torvalds 
11811da177e4SLinus Torvalds 	index = block >> sizebits;
11821da177e4SLinus Torvalds 	block = index << sizebits;
11831da177e4SLinus Torvalds 
11841da177e4SLinus Torvalds 	/* Create a page with the proper size buffers.. */
11851da177e4SLinus Torvalds 	page = grow_dev_page(bdev, block, index, size);
11861da177e4SLinus Torvalds 	if (!page)
11871da177e4SLinus Torvalds 		return 0;
11881da177e4SLinus Torvalds 	unlock_page(page);
11891da177e4SLinus Torvalds 	page_cache_release(page);
11901da177e4SLinus Torvalds 	return 1;
11911da177e4SLinus Torvalds }
11921da177e4SLinus Torvalds 
119375c96f85SAdrian Bunk static struct buffer_head *
11941da177e4SLinus Torvalds __getblk_slow(struct block_device *bdev, sector_t block, int size)
11951da177e4SLinus Torvalds {
11961da177e4SLinus Torvalds 	/* Size must be multiple of hard sectorsize */
11971da177e4SLinus Torvalds 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
11981da177e4SLinus Torvalds 			(size < 512 || size > PAGE_SIZE))) {
11991da177e4SLinus Torvalds 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
12001da177e4SLinus Torvalds 					size);
12011da177e4SLinus Torvalds 		printk(KERN_ERR "hardsect size: %d\n",
12021da177e4SLinus Torvalds 					bdev_hardsect_size(bdev));
12031da177e4SLinus Torvalds 
12041da177e4SLinus Torvalds 		dump_stack();
12051da177e4SLinus Torvalds 		return NULL;
12061da177e4SLinus Torvalds 	}
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds 	for (;;) {
12091da177e4SLinus Torvalds 		struct buffer_head * bh;
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds 		bh = __find_get_block(bdev, block, size);
12121da177e4SLinus Torvalds 		if (bh)
12131da177e4SLinus Torvalds 			return bh;
12141da177e4SLinus Torvalds 
12151da177e4SLinus Torvalds 		if (!grow_buffers(bdev, block, size))
12161da177e4SLinus Torvalds 			free_more_memory();
12171da177e4SLinus Torvalds 	}
12181da177e4SLinus Torvalds }
12191da177e4SLinus Torvalds 
12201da177e4SLinus Torvalds /*
12211da177e4SLinus Torvalds  * The relationship between dirty buffers and dirty pages:
12221da177e4SLinus Torvalds  *
12231da177e4SLinus Torvalds  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
12241da177e4SLinus Torvalds  * the page is tagged dirty in its radix tree.
12251da177e4SLinus Torvalds  *
12261da177e4SLinus Torvalds  * At all times, the dirtiness of the buffers represents the dirtiness of
12271da177e4SLinus Torvalds  * subsections of the page.  If the page has buffers, the page dirty bit is
12281da177e4SLinus Torvalds  * merely a hint about the true dirty state.
12291da177e4SLinus Torvalds  *
12301da177e4SLinus Torvalds  * When a page is set dirty in its entirety, all its buffers are marked dirty
12311da177e4SLinus Torvalds  * (if the page has buffers).
12321da177e4SLinus Torvalds  *
12331da177e4SLinus Torvalds  * When a buffer is marked dirty, its page is dirtied, but the page's other
12341da177e4SLinus Torvalds  * buffers are not.
12351da177e4SLinus Torvalds  *
12361da177e4SLinus Torvalds  * Also.  When blockdev buffers are explicitly read with bread(), they
12371da177e4SLinus Torvalds  * individually become uptodate.  But their backing page remains not
12381da177e4SLinus Torvalds  * uptodate - even if all of its buffers are uptodate.  A subsequent
12391da177e4SLinus Torvalds  * block_read_full_page() against that page will discover all the uptodate
12401da177e4SLinus Torvalds  * buffers, will set the page uptodate and will perform no I/O.
12411da177e4SLinus Torvalds  */
12421da177e4SLinus Torvalds 
12431da177e4SLinus Torvalds /**
12441da177e4SLinus Torvalds  * mark_buffer_dirty - mark a buffer_head as needing writeout
124567be2dd1SMartin Waitz  * @bh: the buffer_head to mark dirty
12461da177e4SLinus Torvalds  *
12471da177e4SLinus Torvalds  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
12481da177e4SLinus Torvalds  * backing page dirty, then tag the page as dirty in its address_space's radix
12491da177e4SLinus Torvalds  * tree and then attach the address_space's inode to its superblock's dirty
12501da177e4SLinus Torvalds  * inode list.
12511da177e4SLinus Torvalds  *
12521da177e4SLinus Torvalds  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
12531da177e4SLinus Torvalds  * mapping->tree_lock and the global inode_lock.
12541da177e4SLinus Torvalds  */
12551da177e4SLinus Torvalds void fastcall mark_buffer_dirty(struct buffer_head *bh)
12561da177e4SLinus Torvalds {
12571da177e4SLinus Torvalds 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
12581da177e4SLinus Torvalds 		__set_page_dirty_nobuffers(bh->b_page);
12591da177e4SLinus Torvalds }
12601da177e4SLinus Torvalds 
12611da177e4SLinus Torvalds /*
12621da177e4SLinus Torvalds  * Decrement a buffer_head's reference count.  If all buffers against a page
12631da177e4SLinus Torvalds  * have zero reference count, are clean and unlocked, and if the page is clean
12641da177e4SLinus Torvalds  * and unlocked then try_to_free_buffers() may strip the buffers from the page
12651da177e4SLinus Torvalds  * in preparation for freeing it (sometimes, rarely, buffers are removed from
12661da177e4SLinus Torvalds  * a page but it ends up not being freed, and buffers may later be reattached).
12671da177e4SLinus Torvalds  */
12681da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
12691da177e4SLinus Torvalds {
12701da177e4SLinus Torvalds 	if (atomic_read(&buf->b_count)) {
12711da177e4SLinus Torvalds 		put_bh(buf);
12721da177e4SLinus Torvalds 		return;
12731da177e4SLinus Torvalds 	}
12741da177e4SLinus Torvalds 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12751da177e4SLinus Torvalds 	WARN_ON(1);
12761da177e4SLinus Torvalds }
12771da177e4SLinus Torvalds 
12781da177e4SLinus Torvalds /*
12791da177e4SLinus Torvalds  * bforget() is like brelse(), except it discards any
12801da177e4SLinus Torvalds  * potentially dirty data.
12811da177e4SLinus Torvalds  */
12821da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12831da177e4SLinus Torvalds {
12841da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
12851da177e4SLinus Torvalds 	if (!list_empty(&bh->b_assoc_buffers)) {
12861da177e4SLinus Torvalds 		struct address_space *buffer_mapping = bh->b_page->mapping;
12871da177e4SLinus Torvalds 
12881da177e4SLinus Torvalds 		spin_lock(&buffer_mapping->private_lock);
12891da177e4SLinus Torvalds 		list_del_init(&bh->b_assoc_buffers);
12901da177e4SLinus Torvalds 		spin_unlock(&buffer_mapping->private_lock);
12911da177e4SLinus Torvalds 	}
12921da177e4SLinus Torvalds 	__brelse(bh);
12931da177e4SLinus Torvalds }
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12961da177e4SLinus Torvalds {
12971da177e4SLinus Torvalds 	lock_buffer(bh);
12981da177e4SLinus Torvalds 	if (buffer_uptodate(bh)) {
12991da177e4SLinus Torvalds 		unlock_buffer(bh);
13001da177e4SLinus Torvalds 		return bh;
13011da177e4SLinus Torvalds 	} else {
13021da177e4SLinus Torvalds 		get_bh(bh);
13031da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_read_sync;
13041da177e4SLinus Torvalds 		submit_bh(READ, bh);
13051da177e4SLinus Torvalds 		wait_on_buffer(bh);
13061da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
13071da177e4SLinus Torvalds 			return bh;
13081da177e4SLinus Torvalds 	}
13091da177e4SLinus Torvalds 	brelse(bh);
13101da177e4SLinus Torvalds 	return NULL;
13111da177e4SLinus Torvalds }
13121da177e4SLinus Torvalds 
13131da177e4SLinus Torvalds /*
13141da177e4SLinus Torvalds  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
13151da177e4SLinus Torvalds  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
13161da177e4SLinus Torvalds  * refcount elevated by one when they're in an LRU.  A buffer can only appear
13171da177e4SLinus Torvalds  * once in a particular CPU's LRU.  A single buffer can be present in multiple
13181da177e4SLinus Torvalds  * CPU's LRUs at the same time.
13191da177e4SLinus Torvalds  *
13201da177e4SLinus Torvalds  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
13211da177e4SLinus Torvalds  * sb_find_get_block().
13221da177e4SLinus Torvalds  *
13231da177e4SLinus Torvalds  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
13241da177e4SLinus Torvalds  * a local interrupt disable for that.
13251da177e4SLinus Torvalds  */
13261da177e4SLinus Torvalds 
13271da177e4SLinus Torvalds #define BH_LRU_SIZE	8
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds struct bh_lru {
13301da177e4SLinus Torvalds 	struct buffer_head *bhs[BH_LRU_SIZE];
13311da177e4SLinus Torvalds };
13321da177e4SLinus Torvalds 
13331da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
13341da177e4SLinus Torvalds 
13351da177e4SLinus Torvalds #ifdef CONFIG_SMP
13361da177e4SLinus Torvalds #define bh_lru_lock()	local_irq_disable()
13371da177e4SLinus Torvalds #define bh_lru_unlock()	local_irq_enable()
13381da177e4SLinus Torvalds #else
13391da177e4SLinus Torvalds #define bh_lru_lock()	preempt_disable()
13401da177e4SLinus Torvalds #define bh_lru_unlock()	preempt_enable()
13411da177e4SLinus Torvalds #endif
13421da177e4SLinus Torvalds 
13431da177e4SLinus Torvalds static inline void check_irqs_on(void)
13441da177e4SLinus Torvalds {
13451da177e4SLinus Torvalds #ifdef irqs_disabled
13461da177e4SLinus Torvalds 	BUG_ON(irqs_disabled());
13471da177e4SLinus Torvalds #endif
13481da177e4SLinus Torvalds }
13491da177e4SLinus Torvalds 
13501da177e4SLinus Torvalds /*
13511da177e4SLinus Torvalds  * The LRU management algorithm is dopey-but-simple.  Sorry.
13521da177e4SLinus Torvalds  */
13531da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13541da177e4SLinus Torvalds {
13551da177e4SLinus Torvalds 	struct buffer_head *evictee = NULL;
13561da177e4SLinus Torvalds 	struct bh_lru *lru;
13571da177e4SLinus Torvalds 
13581da177e4SLinus Torvalds 	check_irqs_on();
13591da177e4SLinus Torvalds 	bh_lru_lock();
13601da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
13611da177e4SLinus Torvalds 	if (lru->bhs[0] != bh) {
13621da177e4SLinus Torvalds 		struct buffer_head *bhs[BH_LRU_SIZE];
13631da177e4SLinus Torvalds 		int in;
13641da177e4SLinus Torvalds 		int out = 0;
13651da177e4SLinus Torvalds 
13661da177e4SLinus Torvalds 		get_bh(bh);
13671da177e4SLinus Torvalds 		bhs[out++] = bh;
13681da177e4SLinus Torvalds 		for (in = 0; in < BH_LRU_SIZE; in++) {
13691da177e4SLinus Torvalds 			struct buffer_head *bh2 = lru->bhs[in];
13701da177e4SLinus Torvalds 
13711da177e4SLinus Torvalds 			if (bh2 == bh) {
13721da177e4SLinus Torvalds 				__brelse(bh2);
13731da177e4SLinus Torvalds 			} else {
13741da177e4SLinus Torvalds 				if (out >= BH_LRU_SIZE) {
13751da177e4SLinus Torvalds 					BUG_ON(evictee != NULL);
13761da177e4SLinus Torvalds 					evictee = bh2;
13771da177e4SLinus Torvalds 				} else {
13781da177e4SLinus Torvalds 					bhs[out++] = bh2;
13791da177e4SLinus Torvalds 				}
13801da177e4SLinus Torvalds 			}
13811da177e4SLinus Torvalds 		}
13821da177e4SLinus Torvalds 		while (out < BH_LRU_SIZE)
13831da177e4SLinus Torvalds 			bhs[out++] = NULL;
13841da177e4SLinus Torvalds 		memcpy(lru->bhs, bhs, sizeof(bhs));
13851da177e4SLinus Torvalds 	}
13861da177e4SLinus Torvalds 	bh_lru_unlock();
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 	if (evictee)
13891da177e4SLinus Torvalds 		__brelse(evictee);
13901da177e4SLinus Torvalds }
13911da177e4SLinus Torvalds 
13921da177e4SLinus Torvalds /*
13931da177e4SLinus Torvalds  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
13941da177e4SLinus Torvalds  */
1395858119e1SArjan van de Ven static struct buffer_head *
13961da177e4SLinus Torvalds lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
13971da177e4SLinus Torvalds {
13981da177e4SLinus Torvalds 	struct buffer_head *ret = NULL;
13991da177e4SLinus Torvalds 	struct bh_lru *lru;
14001da177e4SLinus Torvalds 	int i;
14011da177e4SLinus Torvalds 
14021da177e4SLinus Torvalds 	check_irqs_on();
14031da177e4SLinus Torvalds 	bh_lru_lock();
14041da177e4SLinus Torvalds 	lru = &__get_cpu_var(bh_lrus);
14051da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
14061da177e4SLinus Torvalds 		struct buffer_head *bh = lru->bhs[i];
14071da177e4SLinus Torvalds 
14081da177e4SLinus Torvalds 		if (bh && bh->b_bdev == bdev &&
14091da177e4SLinus Torvalds 				bh->b_blocknr == block && bh->b_size == size) {
14101da177e4SLinus Torvalds 			if (i) {
14111da177e4SLinus Torvalds 				while (i) {
14121da177e4SLinus Torvalds 					lru->bhs[i] = lru->bhs[i - 1];
14131da177e4SLinus Torvalds 					i--;
14141da177e4SLinus Torvalds 				}
14151da177e4SLinus Torvalds 				lru->bhs[0] = bh;
14161da177e4SLinus Torvalds 			}
14171da177e4SLinus Torvalds 			get_bh(bh);
14181da177e4SLinus Torvalds 			ret = bh;
14191da177e4SLinus Torvalds 			break;
14201da177e4SLinus Torvalds 		}
14211da177e4SLinus Torvalds 	}
14221da177e4SLinus Torvalds 	bh_lru_unlock();
14231da177e4SLinus Torvalds 	return ret;
14241da177e4SLinus Torvalds }
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds /*
14271da177e4SLinus Torvalds  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
14281da177e4SLinus Torvalds  * it in the LRU and mark it as accessed.  If it is not present then return
14291da177e4SLinus Torvalds  * NULL
14301da177e4SLinus Torvalds  */
14311da177e4SLinus Torvalds struct buffer_head *
14321da177e4SLinus Torvalds __find_get_block(struct block_device *bdev, sector_t block, int size)
14331da177e4SLinus Torvalds {
14341da177e4SLinus Torvalds 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
14351da177e4SLinus Torvalds 
14361da177e4SLinus Torvalds 	if (bh == NULL) {
1437385fd4c5SCoywolf Qi Hunt 		bh = __find_get_block_slow(bdev, block);
14381da177e4SLinus Torvalds 		if (bh)
14391da177e4SLinus Torvalds 			bh_lru_install(bh);
14401da177e4SLinus Torvalds 	}
14411da177e4SLinus Torvalds 	if (bh)
14421da177e4SLinus Torvalds 		touch_buffer(bh);
14431da177e4SLinus Torvalds 	return bh;
14441da177e4SLinus Torvalds }
14451da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14461da177e4SLinus Torvalds 
14471da177e4SLinus Torvalds /*
14481da177e4SLinus Torvalds  * __getblk will locate (and, if necessary, create) the buffer_head
14491da177e4SLinus Torvalds  * which corresponds to the passed block_device, block and size. The
14501da177e4SLinus Torvalds  * returned buffer has its reference count incremented.
14511da177e4SLinus Torvalds  *
14521da177e4SLinus Torvalds  * __getblk() cannot fail - it just keeps trying.  If you pass it an
14531da177e4SLinus Torvalds  * illegal block number, __getblk() will happily return a buffer_head
14541da177e4SLinus Torvalds  * which represents the non-existent block.  Very weird.
14551da177e4SLinus Torvalds  *
14561da177e4SLinus Torvalds  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
14571da177e4SLinus Torvalds  * attempt is failing.  FIXME, perhaps?
14581da177e4SLinus Torvalds  */
14591da177e4SLinus Torvalds struct buffer_head *
14601da177e4SLinus Torvalds __getblk(struct block_device *bdev, sector_t block, int size)
14611da177e4SLinus Torvalds {
14621da177e4SLinus Torvalds 	struct buffer_head *bh = __find_get_block(bdev, block, size);
14631da177e4SLinus Torvalds 
14641da177e4SLinus Torvalds 	might_sleep();
14651da177e4SLinus Torvalds 	if (bh == NULL)
14661da177e4SLinus Torvalds 		bh = __getblk_slow(bdev, block, size);
14671da177e4SLinus Torvalds 	return bh;
14681da177e4SLinus Torvalds }
14691da177e4SLinus Torvalds EXPORT_SYMBOL(__getblk);
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds /*
14721da177e4SLinus Torvalds  * Do async read-ahead on a buffer..
14731da177e4SLinus Torvalds  */
14741da177e4SLinus Torvalds void __breadahead(struct block_device *bdev, sector_t block, int size)
14751da177e4SLinus Torvalds {
14761da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
1477a3e713b5SAndrew Morton 	if (likely(bh)) {
14781da177e4SLinus Torvalds 		ll_rw_block(READA, 1, &bh);
14791da177e4SLinus Torvalds 		brelse(bh);
14801da177e4SLinus Torvalds 	}
1481a3e713b5SAndrew Morton }
14821da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14831da177e4SLinus Torvalds 
14841da177e4SLinus Torvalds /**
14851da177e4SLinus Torvalds  *  __bread() - reads a specified block and returns the bh
148667be2dd1SMartin Waitz  *  @bdev: the block_device to read from
14871da177e4SLinus Torvalds  *  @block: number of block
14881da177e4SLinus Torvalds  *  @size: size (in bytes) to read
14891da177e4SLinus Torvalds  *
14901da177e4SLinus Torvalds  *  Reads a specified block, and returns buffer head that contains it.
14911da177e4SLinus Torvalds  *  It returns NULL if the block was unreadable.
14921da177e4SLinus Torvalds  */
14931da177e4SLinus Torvalds struct buffer_head *
14941da177e4SLinus Torvalds __bread(struct block_device *bdev, sector_t block, int size)
14951da177e4SLinus Torvalds {
14961da177e4SLinus Torvalds 	struct buffer_head *bh = __getblk(bdev, block, size);
14971da177e4SLinus Torvalds 
1498a3e713b5SAndrew Morton 	if (likely(bh) && !buffer_uptodate(bh))
14991da177e4SLinus Torvalds 		bh = __bread_slow(bh);
15001da177e4SLinus Torvalds 	return bh;
15011da177e4SLinus Torvalds }
15021da177e4SLinus Torvalds EXPORT_SYMBOL(__bread);
15031da177e4SLinus Torvalds 
15041da177e4SLinus Torvalds /*
15051da177e4SLinus Torvalds  * invalidate_bh_lrus() is called rarely - but not only at unmount.
15061da177e4SLinus Torvalds  * This doesn't race because it runs in each cpu either in irq
15071da177e4SLinus Torvalds  * or with preempt disabled.
15081da177e4SLinus Torvalds  */
15091da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
15101da177e4SLinus Torvalds {
15111da177e4SLinus Torvalds 	struct bh_lru *b = &get_cpu_var(bh_lrus);
15121da177e4SLinus Torvalds 	int i;
15131da177e4SLinus Torvalds 
15141da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
15151da177e4SLinus Torvalds 		brelse(b->bhs[i]);
15161da177e4SLinus Torvalds 		b->bhs[i] = NULL;
15171da177e4SLinus Torvalds 	}
15181da177e4SLinus Torvalds 	put_cpu_var(bh_lrus);
15191da177e4SLinus Torvalds }
15201da177e4SLinus Torvalds 
15211da177e4SLinus Torvalds static void invalidate_bh_lrus(void)
15221da177e4SLinus Torvalds {
15231da177e4SLinus Torvalds 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
15241da177e4SLinus Torvalds }
15251da177e4SLinus Torvalds 
15261da177e4SLinus Torvalds void set_bh_page(struct buffer_head *bh,
15271da177e4SLinus Torvalds 		struct page *page, unsigned long offset)
15281da177e4SLinus Torvalds {
15291da177e4SLinus Torvalds 	bh->b_page = page;
15301da177e4SLinus Torvalds 	if (offset >= PAGE_SIZE)
15311da177e4SLinus Torvalds 		BUG();
15321da177e4SLinus Torvalds 	if (PageHighMem(page))
15331da177e4SLinus Torvalds 		/*
15341da177e4SLinus Torvalds 		 * This catches illegal uses and preserves the offset:
15351da177e4SLinus Torvalds 		 */
15361da177e4SLinus Torvalds 		bh->b_data = (char *)(0 + offset);
15371da177e4SLinus Torvalds 	else
15381da177e4SLinus Torvalds 		bh->b_data = page_address(page) + offset;
15391da177e4SLinus Torvalds }
15401da177e4SLinus Torvalds EXPORT_SYMBOL(set_bh_page);
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds /*
15431da177e4SLinus Torvalds  * Called when truncating a buffer on a page completely.
15441da177e4SLinus Torvalds  */
1545858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
15461da177e4SLinus Torvalds {
15471da177e4SLinus Torvalds 	lock_buffer(bh);
15481da177e4SLinus Torvalds 	clear_buffer_dirty(bh);
15491da177e4SLinus Torvalds 	bh->b_bdev = NULL;
15501da177e4SLinus Torvalds 	clear_buffer_mapped(bh);
15511da177e4SLinus Torvalds 	clear_buffer_req(bh);
15521da177e4SLinus Torvalds 	clear_buffer_new(bh);
15531da177e4SLinus Torvalds 	clear_buffer_delay(bh);
15541da177e4SLinus Torvalds 	unlock_buffer(bh);
15551da177e4SLinus Torvalds }
15561da177e4SLinus Torvalds 
15571da177e4SLinus Torvalds /**
15581da177e4SLinus Torvalds  * try_to_release_page() - release old fs-specific metadata on a page
15591da177e4SLinus Torvalds  *
15601da177e4SLinus Torvalds  * @page: the page which the kernel is trying to free
15611da177e4SLinus Torvalds  * @gfp_mask: memory allocation flags (and I/O mode)
15621da177e4SLinus Torvalds  *
15631da177e4SLinus Torvalds  * The address_space is to try to release any data against the page
15641da177e4SLinus Torvalds  * (presumably at page->private).  If the release was successful, return `1'.
15651da177e4SLinus Torvalds  * Otherwise return zero.
15661da177e4SLinus Torvalds  *
15671da177e4SLinus Torvalds  * The @gfp_mask argument specifies whether I/O may be performed to release
15681da177e4SLinus Torvalds  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
15691da177e4SLinus Torvalds  *
15701da177e4SLinus Torvalds  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
15711da177e4SLinus Torvalds  */
157227496a8cSAl Viro int try_to_release_page(struct page *page, gfp_t gfp_mask)
15731da177e4SLinus Torvalds {
15741da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
15751da177e4SLinus Torvalds 
15761da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
15771da177e4SLinus Torvalds 	if (PageWriteback(page))
15781da177e4SLinus Torvalds 		return 0;
15791da177e4SLinus Torvalds 
15801da177e4SLinus Torvalds 	if (mapping && mapping->a_ops->releasepage)
15811da177e4SLinus Torvalds 		return mapping->a_ops->releasepage(page, gfp_mask);
15821da177e4SLinus Torvalds 	return try_to_free_buffers(page);
15831da177e4SLinus Torvalds }
15841da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_release_page);
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds /**
15871da177e4SLinus Torvalds  * block_invalidatepage - invalidate part of all of a buffer-backed page
15881da177e4SLinus Torvalds  *
15891da177e4SLinus Torvalds  * @page: the page which is affected
15901da177e4SLinus Torvalds  * @offset: the index of the truncation point
15911da177e4SLinus Torvalds  *
15921da177e4SLinus Torvalds  * block_invalidatepage() is called when all or part of the page has become
15931da177e4SLinus Torvalds  * invalidatedby a truncate operation.
15941da177e4SLinus Torvalds  *
15951da177e4SLinus Torvalds  * block_invalidatepage() does not have to release all buffers, but it must
15961da177e4SLinus Torvalds  * ensure that no dirty buffer is left outside @offset and that no I/O
15971da177e4SLinus Torvalds  * is underway against any of the blocks which are outside the truncation
15981da177e4SLinus Torvalds  * point.  Because the caller is about to free (and possibly reuse) those
15991da177e4SLinus Torvalds  * blocks on-disk.
16001da177e4SLinus Torvalds  */
16011da177e4SLinus Torvalds int block_invalidatepage(struct page *page, unsigned long offset)
16021da177e4SLinus Torvalds {
16031da177e4SLinus Torvalds 	struct buffer_head *head, *bh, *next;
16041da177e4SLinus Torvalds 	unsigned int curr_off = 0;
16051da177e4SLinus Torvalds 	int ret = 1;
16061da177e4SLinus Torvalds 
16071da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
16081da177e4SLinus Torvalds 	if (!page_has_buffers(page))
16091da177e4SLinus Torvalds 		goto out;
16101da177e4SLinus Torvalds 
16111da177e4SLinus Torvalds 	head = page_buffers(page);
16121da177e4SLinus Torvalds 	bh = head;
16131da177e4SLinus Torvalds 	do {
16141da177e4SLinus Torvalds 		unsigned int next_off = curr_off + bh->b_size;
16151da177e4SLinus Torvalds 		next = bh->b_this_page;
16161da177e4SLinus Torvalds 
16171da177e4SLinus Torvalds 		/*
16181da177e4SLinus Torvalds 		 * is this block fully invalidated?
16191da177e4SLinus Torvalds 		 */
16201da177e4SLinus Torvalds 		if (offset <= curr_off)
16211da177e4SLinus Torvalds 			discard_buffer(bh);
16221da177e4SLinus Torvalds 		curr_off = next_off;
16231da177e4SLinus Torvalds 		bh = next;
16241da177e4SLinus Torvalds 	} while (bh != head);
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 	/*
16271da177e4SLinus Torvalds 	 * We release buffers only if the entire page is being invalidated.
16281da177e4SLinus Torvalds 	 * The get_block cached value has been unconditionally invalidated,
16291da177e4SLinus Torvalds 	 * so real IO is not possible anymore.
16301da177e4SLinus Torvalds 	 */
16311da177e4SLinus Torvalds 	if (offset == 0)
16321da177e4SLinus Torvalds 		ret = try_to_release_page(page, 0);
16331da177e4SLinus Torvalds out:
16341da177e4SLinus Torvalds 	return ret;
16351da177e4SLinus Torvalds }
16361da177e4SLinus Torvalds EXPORT_SYMBOL(block_invalidatepage);
16371da177e4SLinus Torvalds 
1638aaa4059bSJan Kara int do_invalidatepage(struct page *page, unsigned long offset)
1639aaa4059bSJan Kara {
1640aaa4059bSJan Kara 	int (*invalidatepage)(struct page *, unsigned long);
1641aaa4059bSJan Kara 	invalidatepage = page->mapping->a_ops->invalidatepage;
1642aaa4059bSJan Kara 	if (invalidatepage == NULL)
1643aaa4059bSJan Kara 		invalidatepage = block_invalidatepage;
1644aaa4059bSJan Kara 	return (*invalidatepage)(page, offset);
1645aaa4059bSJan Kara }
1646aaa4059bSJan Kara 
16471da177e4SLinus Torvalds /*
16481da177e4SLinus Torvalds  * We attach and possibly dirty the buffers atomically wrt
16491da177e4SLinus Torvalds  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
16501da177e4SLinus Torvalds  * is already excluded via the page lock.
16511da177e4SLinus Torvalds  */
16521da177e4SLinus Torvalds void create_empty_buffers(struct page *page,
16531da177e4SLinus Torvalds 			unsigned long blocksize, unsigned long b_state)
16541da177e4SLinus Torvalds {
16551da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *tail;
16561da177e4SLinus Torvalds 
16571da177e4SLinus Torvalds 	head = alloc_page_buffers(page, blocksize, 1);
16581da177e4SLinus Torvalds 	bh = head;
16591da177e4SLinus Torvalds 	do {
16601da177e4SLinus Torvalds 		bh->b_state |= b_state;
16611da177e4SLinus Torvalds 		tail = bh;
16621da177e4SLinus Torvalds 		bh = bh->b_this_page;
16631da177e4SLinus Torvalds 	} while (bh);
16641da177e4SLinus Torvalds 	tail->b_this_page = head;
16651da177e4SLinus Torvalds 
16661da177e4SLinus Torvalds 	spin_lock(&page->mapping->private_lock);
16671da177e4SLinus Torvalds 	if (PageUptodate(page) || PageDirty(page)) {
16681da177e4SLinus Torvalds 		bh = head;
16691da177e4SLinus Torvalds 		do {
16701da177e4SLinus Torvalds 			if (PageDirty(page))
16711da177e4SLinus Torvalds 				set_buffer_dirty(bh);
16721da177e4SLinus Torvalds 			if (PageUptodate(page))
16731da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
16741da177e4SLinus Torvalds 			bh = bh->b_this_page;
16751da177e4SLinus Torvalds 		} while (bh != head);
16761da177e4SLinus Torvalds 	}
16771da177e4SLinus Torvalds 	attach_page_buffers(page, head);
16781da177e4SLinus Torvalds 	spin_unlock(&page->mapping->private_lock);
16791da177e4SLinus Torvalds }
16801da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16811da177e4SLinus Torvalds 
16821da177e4SLinus Torvalds /*
16831da177e4SLinus Torvalds  * We are taking a block for data and we don't want any output from any
16841da177e4SLinus Torvalds  * buffer-cache aliases starting from return from that function and
16851da177e4SLinus Torvalds  * until the moment when something will explicitly mark the buffer
16861da177e4SLinus Torvalds  * dirty (hopefully that will not happen until we will free that block ;-)
16871da177e4SLinus Torvalds  * We don't even need to mark it not-uptodate - nobody can expect
16881da177e4SLinus Torvalds  * anything from a newly allocated buffer anyway. We used to used
16891da177e4SLinus Torvalds  * unmap_buffer() for such invalidation, but that was wrong. We definitely
16901da177e4SLinus Torvalds  * don't want to mark the alias unmapped, for example - it would confuse
16911da177e4SLinus Torvalds  * anyone who might pick it with bread() afterwards...
16921da177e4SLinus Torvalds  *
16931da177e4SLinus Torvalds  * Also..  Note that bforget() doesn't lock the buffer.  So there can
16941da177e4SLinus Torvalds  * be writeout I/O going on against recently-freed buffers.  We don't
16951da177e4SLinus Torvalds  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
16961da177e4SLinus Torvalds  * only if we really need to.  That happens here.
16971da177e4SLinus Torvalds  */
16981da177e4SLinus Torvalds void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
16991da177e4SLinus Torvalds {
17001da177e4SLinus Torvalds 	struct buffer_head *old_bh;
17011da177e4SLinus Torvalds 
17021da177e4SLinus Torvalds 	might_sleep();
17031da177e4SLinus Torvalds 
1704385fd4c5SCoywolf Qi Hunt 	old_bh = __find_get_block_slow(bdev, block);
17051da177e4SLinus Torvalds 	if (old_bh) {
17061da177e4SLinus Torvalds 		clear_buffer_dirty(old_bh);
17071da177e4SLinus Torvalds 		wait_on_buffer(old_bh);
17081da177e4SLinus Torvalds 		clear_buffer_req(old_bh);
17091da177e4SLinus Torvalds 		__brelse(old_bh);
17101da177e4SLinus Torvalds 	}
17111da177e4SLinus Torvalds }
17121da177e4SLinus Torvalds EXPORT_SYMBOL(unmap_underlying_metadata);
17131da177e4SLinus Torvalds 
17141da177e4SLinus Torvalds /*
17151da177e4SLinus Torvalds  * NOTE! All mapped/uptodate combinations are valid:
17161da177e4SLinus Torvalds  *
17171da177e4SLinus Torvalds  *	Mapped	Uptodate	Meaning
17181da177e4SLinus Torvalds  *
17191da177e4SLinus Torvalds  *	No	No		"unknown" - must do get_block()
17201da177e4SLinus Torvalds  *	No	Yes		"hole" - zero-filled
17211da177e4SLinus Torvalds  *	Yes	No		"allocated" - allocated on disk, not read in
17221da177e4SLinus Torvalds  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
17231da177e4SLinus Torvalds  *
17241da177e4SLinus Torvalds  * "Dirty" is valid only with the last case (mapped+uptodate).
17251da177e4SLinus Torvalds  */
17261da177e4SLinus Torvalds 
17271da177e4SLinus Torvalds /*
17281da177e4SLinus Torvalds  * While block_write_full_page is writing back the dirty buffers under
17291da177e4SLinus Torvalds  * the page lock, whoever dirtied the buffers may decide to clean them
17301da177e4SLinus Torvalds  * again at any time.  We handle that by only looking at the buffer
17311da177e4SLinus Torvalds  * state inside lock_buffer().
17321da177e4SLinus Torvalds  *
17331da177e4SLinus Torvalds  * If block_write_full_page() is called for regular writeback
17341da177e4SLinus Torvalds  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
17351da177e4SLinus Torvalds  * locked buffer.   This only can happen if someone has written the buffer
17361da177e4SLinus Torvalds  * directly, with submit_bh().  At the address_space level PageWriteback
17371da177e4SLinus Torvalds  * prevents this contention from occurring.
17381da177e4SLinus Torvalds  */
17391da177e4SLinus Torvalds static int __block_write_full_page(struct inode *inode, struct page *page,
17401da177e4SLinus Torvalds 			get_block_t *get_block, struct writeback_control *wbc)
17411da177e4SLinus Torvalds {
17421da177e4SLinus Torvalds 	int err;
17431da177e4SLinus Torvalds 	sector_t block;
17441da177e4SLinus Torvalds 	sector_t last_block;
1745f0fbd5fcSAndrew Morton 	struct buffer_head *bh, *head;
17461da177e4SLinus Torvalds 	int nr_underway = 0;
17471da177e4SLinus Torvalds 
17481da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
17491da177e4SLinus Torvalds 
17501da177e4SLinus Torvalds 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds 	if (!page_has_buffers(page)) {
17531da177e4SLinus Torvalds 		create_empty_buffers(page, 1 << inode->i_blkbits,
17541da177e4SLinus Torvalds 					(1 << BH_Dirty)|(1 << BH_Uptodate));
17551da177e4SLinus Torvalds 	}
17561da177e4SLinus Torvalds 
17571da177e4SLinus Torvalds 	/*
17581da177e4SLinus Torvalds 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
17591da177e4SLinus Torvalds 	 * here, and the (potentially unmapped) buffers may become dirty at
17601da177e4SLinus Torvalds 	 * any time.  If a buffer becomes dirty here after we've inspected it
17611da177e4SLinus Torvalds 	 * then we just miss that fact, and the page stays dirty.
17621da177e4SLinus Torvalds 	 *
17631da177e4SLinus Torvalds 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
17641da177e4SLinus Torvalds 	 * handle that here by just cleaning them.
17651da177e4SLinus Torvalds 	 */
17661da177e4SLinus Torvalds 
176754b21a79SAndrew Morton 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
17681da177e4SLinus Torvalds 	head = page_buffers(page);
17691da177e4SLinus Torvalds 	bh = head;
17701da177e4SLinus Torvalds 
17711da177e4SLinus Torvalds 	/*
17721da177e4SLinus Torvalds 	 * Get all the dirty buffers mapped to disk addresses and
17731da177e4SLinus Torvalds 	 * handle any aliases from the underlying blockdev's mapping.
17741da177e4SLinus Torvalds 	 */
17751da177e4SLinus Torvalds 	do {
17761da177e4SLinus Torvalds 		if (block > last_block) {
17771da177e4SLinus Torvalds 			/*
17781da177e4SLinus Torvalds 			 * mapped buffers outside i_size will occur, because
17791da177e4SLinus Torvalds 			 * this page can be outside i_size when there is a
17801da177e4SLinus Torvalds 			 * truncate in progress.
17811da177e4SLinus Torvalds 			 */
17821da177e4SLinus Torvalds 			/*
17831da177e4SLinus Torvalds 			 * The buffer was zeroed by block_write_full_page()
17841da177e4SLinus Torvalds 			 */
17851da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
17861da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
17871da177e4SLinus Torvalds 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
17881da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
17891da177e4SLinus Torvalds 			if (err)
17901da177e4SLinus Torvalds 				goto recover;
17911da177e4SLinus Torvalds 			if (buffer_new(bh)) {
17921da177e4SLinus Torvalds 				/* blockdev mappings never come here */
17931da177e4SLinus Torvalds 				clear_buffer_new(bh);
17941da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
17951da177e4SLinus Torvalds 							bh->b_blocknr);
17961da177e4SLinus Torvalds 			}
17971da177e4SLinus Torvalds 		}
17981da177e4SLinus Torvalds 		bh = bh->b_this_page;
17991da177e4SLinus Torvalds 		block++;
18001da177e4SLinus Torvalds 	} while (bh != head);
18011da177e4SLinus Torvalds 
18021da177e4SLinus Torvalds 	do {
18031da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
18041da177e4SLinus Torvalds 			continue;
18051da177e4SLinus Torvalds 		/*
18061da177e4SLinus Torvalds 		 * If it's a fully non-blocking write attempt and we cannot
18071da177e4SLinus Torvalds 		 * lock the buffer then redirty the page.  Note that this can
18081da177e4SLinus Torvalds 		 * potentially cause a busy-wait loop from pdflush and kswapd
18091da177e4SLinus Torvalds 		 * activity, but those code paths have their own higher-level
18101da177e4SLinus Torvalds 		 * throttling.
18111da177e4SLinus Torvalds 		 */
18121da177e4SLinus Torvalds 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
18131da177e4SLinus Torvalds 			lock_buffer(bh);
18141da177e4SLinus Torvalds 		} else if (test_set_buffer_locked(bh)) {
18151da177e4SLinus Torvalds 			redirty_page_for_writepage(wbc, page);
18161da177e4SLinus Torvalds 			continue;
18171da177e4SLinus Torvalds 		}
18181da177e4SLinus Torvalds 		if (test_clear_buffer_dirty(bh)) {
18191da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
18201da177e4SLinus Torvalds 		} else {
18211da177e4SLinus Torvalds 			unlock_buffer(bh);
18221da177e4SLinus Torvalds 		}
18231da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18241da177e4SLinus Torvalds 
18251da177e4SLinus Torvalds 	/*
18261da177e4SLinus Torvalds 	 * The page and its buffers are protected by PageWriteback(), so we can
18271da177e4SLinus Torvalds 	 * drop the bh refcounts early.
18281da177e4SLinus Torvalds 	 */
18291da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18301da177e4SLinus Torvalds 	set_page_writeback(page);
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds 	do {
18331da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18341da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18351da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
18361da177e4SLinus Torvalds 			nr_underway++;
1837ad576e63SNick Piggin 		}
18381da177e4SLinus Torvalds 		bh = next;
18391da177e4SLinus Torvalds 	} while (bh != head);
184005937baaSAndrew Morton 	unlock_page(page);
18411da177e4SLinus Torvalds 
18421da177e4SLinus Torvalds 	err = 0;
18431da177e4SLinus Torvalds done:
18441da177e4SLinus Torvalds 	if (nr_underway == 0) {
18451da177e4SLinus Torvalds 		/*
18461da177e4SLinus Torvalds 		 * The page was marked dirty, but the buffers were
18471da177e4SLinus Torvalds 		 * clean.  Someone wrote them back by hand with
18481da177e4SLinus Torvalds 		 * ll_rw_block/submit_bh.  A rare case.
18491da177e4SLinus Torvalds 		 */
18501da177e4SLinus Torvalds 		int uptodate = 1;
18511da177e4SLinus Torvalds 		do {
18521da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
18531da177e4SLinus Torvalds 				uptodate = 0;
18541da177e4SLinus Torvalds 				break;
18551da177e4SLinus Torvalds 			}
18561da177e4SLinus Torvalds 			bh = bh->b_this_page;
18571da177e4SLinus Torvalds 		} while (bh != head);
18581da177e4SLinus Torvalds 		if (uptodate)
18591da177e4SLinus Torvalds 			SetPageUptodate(page);
18601da177e4SLinus Torvalds 		end_page_writeback(page);
18611da177e4SLinus Torvalds 		/*
18621da177e4SLinus Torvalds 		 * The page and buffer_heads can be released at any time from
18631da177e4SLinus Torvalds 		 * here on.
18641da177e4SLinus Torvalds 		 */
18651da177e4SLinus Torvalds 		wbc->pages_skipped++;	/* We didn't write this page */
18661da177e4SLinus Torvalds 	}
18671da177e4SLinus Torvalds 	return err;
18681da177e4SLinus Torvalds 
18691da177e4SLinus Torvalds recover:
18701da177e4SLinus Torvalds 	/*
18711da177e4SLinus Torvalds 	 * ENOSPC, or some other error.  We may already have added some
18721da177e4SLinus Torvalds 	 * blocks to the file, so we need to write these out to avoid
18731da177e4SLinus Torvalds 	 * exposing stale data.
18741da177e4SLinus Torvalds 	 * The page is currently locked and not marked for writeback
18751da177e4SLinus Torvalds 	 */
18761da177e4SLinus Torvalds 	bh = head;
18771da177e4SLinus Torvalds 	/* Recovery: lock and submit the mapped buffers */
18781da177e4SLinus Torvalds 	do {
18791da177e4SLinus Torvalds 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
18801da177e4SLinus Torvalds 			lock_buffer(bh);
18811da177e4SLinus Torvalds 			mark_buffer_async_write(bh);
18821da177e4SLinus Torvalds 		} else {
18831da177e4SLinus Torvalds 			/*
18841da177e4SLinus Torvalds 			 * The buffer may have been set dirty during
18851da177e4SLinus Torvalds 			 * attachment to a dirty page.
18861da177e4SLinus Torvalds 			 */
18871da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18881da177e4SLinus Torvalds 		}
18891da177e4SLinus Torvalds 	} while ((bh = bh->b_this_page) != head);
18901da177e4SLinus Torvalds 	SetPageError(page);
18911da177e4SLinus Torvalds 	BUG_ON(PageWriteback(page));
18921da177e4SLinus Torvalds 	set_page_writeback(page);
18931da177e4SLinus Torvalds 	unlock_page(page);
18941da177e4SLinus Torvalds 	do {
18951da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
18961da177e4SLinus Torvalds 		if (buffer_async_write(bh)) {
18971da177e4SLinus Torvalds 			clear_buffer_dirty(bh);
18981da177e4SLinus Torvalds 			submit_bh(WRITE, bh);
18991da177e4SLinus Torvalds 			nr_underway++;
1900ad576e63SNick Piggin 		}
19011da177e4SLinus Torvalds 		bh = next;
19021da177e4SLinus Torvalds 	} while (bh != head);
19031da177e4SLinus Torvalds 	goto done;
19041da177e4SLinus Torvalds }
19051da177e4SLinus Torvalds 
19061da177e4SLinus Torvalds static int __block_prepare_write(struct inode *inode, struct page *page,
19071da177e4SLinus Torvalds 		unsigned from, unsigned to, get_block_t *get_block)
19081da177e4SLinus Torvalds {
19091da177e4SLinus Torvalds 	unsigned block_start, block_end;
19101da177e4SLinus Torvalds 	sector_t block;
19111da177e4SLinus Torvalds 	int err = 0;
19121da177e4SLinus Torvalds 	unsigned blocksize, bbits;
19131da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
19141da177e4SLinus Torvalds 
19151da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
19161da177e4SLinus Torvalds 	BUG_ON(from > PAGE_CACHE_SIZE);
19171da177e4SLinus Torvalds 	BUG_ON(to > PAGE_CACHE_SIZE);
19181da177e4SLinus Torvalds 	BUG_ON(from > to);
19191da177e4SLinus Torvalds 
19201da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
19211da177e4SLinus Torvalds 	if (!page_has_buffers(page))
19221da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
19231da177e4SLinus Torvalds 	head = page_buffers(page);
19241da177e4SLinus Torvalds 
19251da177e4SLinus Torvalds 	bbits = inode->i_blkbits;
19261da177e4SLinus Torvalds 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
19271da177e4SLinus Torvalds 
19281da177e4SLinus Torvalds 	for(bh = head, block_start = 0; bh != head || !block_start;
19291da177e4SLinus Torvalds 	    block++, block_start=block_end, bh = bh->b_this_page) {
19301da177e4SLinus Torvalds 		block_end = block_start + blocksize;
19311da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
19321da177e4SLinus Torvalds 			if (PageUptodate(page)) {
19331da177e4SLinus Torvalds 				if (!buffer_uptodate(bh))
19341da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19351da177e4SLinus Torvalds 			}
19361da177e4SLinus Torvalds 			continue;
19371da177e4SLinus Torvalds 		}
19381da177e4SLinus Torvalds 		if (buffer_new(bh))
19391da177e4SLinus Torvalds 			clear_buffer_new(bh);
19401da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
19411da177e4SLinus Torvalds 			err = get_block(inode, block, bh, 1);
19421da177e4SLinus Torvalds 			if (err)
1943f3ddbdc6SNick Piggin 				break;
19441da177e4SLinus Torvalds 			if (buffer_new(bh)) {
19451da177e4SLinus Torvalds 				unmap_underlying_metadata(bh->b_bdev,
19461da177e4SLinus Torvalds 							bh->b_blocknr);
19471da177e4SLinus Torvalds 				if (PageUptodate(page)) {
19481da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
19491da177e4SLinus Torvalds 					continue;
19501da177e4SLinus Torvalds 				}
19511da177e4SLinus Torvalds 				if (block_end > to || block_start < from) {
19521da177e4SLinus Torvalds 					void *kaddr;
19531da177e4SLinus Torvalds 
19541da177e4SLinus Torvalds 					kaddr = kmap_atomic(page, KM_USER0);
19551da177e4SLinus Torvalds 					if (block_end > to)
19561da177e4SLinus Torvalds 						memset(kaddr+to, 0,
19571da177e4SLinus Torvalds 							block_end-to);
19581da177e4SLinus Torvalds 					if (block_start < from)
19591da177e4SLinus Torvalds 						memset(kaddr+block_start,
19601da177e4SLinus Torvalds 							0, from-block_start);
19611da177e4SLinus Torvalds 					flush_dcache_page(page);
19621da177e4SLinus Torvalds 					kunmap_atomic(kaddr, KM_USER0);
19631da177e4SLinus Torvalds 				}
19641da177e4SLinus Torvalds 				continue;
19651da177e4SLinus Torvalds 			}
19661da177e4SLinus Torvalds 		}
19671da177e4SLinus Torvalds 		if (PageUptodate(page)) {
19681da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
19691da177e4SLinus Torvalds 				set_buffer_uptodate(bh);
19701da177e4SLinus Torvalds 			continue;
19711da177e4SLinus Torvalds 		}
19721da177e4SLinus Torvalds 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
19731da177e4SLinus Torvalds 		     (block_start < from || block_end > to)) {
19741da177e4SLinus Torvalds 			ll_rw_block(READ, 1, &bh);
19751da177e4SLinus Torvalds 			*wait_bh++=bh;
19761da177e4SLinus Torvalds 		}
19771da177e4SLinus Torvalds 	}
19781da177e4SLinus Torvalds 	/*
19791da177e4SLinus Torvalds 	 * If we issued read requests - let them complete.
19801da177e4SLinus Torvalds 	 */
19811da177e4SLinus Torvalds 	while(wait_bh > wait) {
19821da177e4SLinus Torvalds 		wait_on_buffer(*--wait_bh);
19831da177e4SLinus Torvalds 		if (!buffer_uptodate(*wait_bh))
1984f3ddbdc6SNick Piggin 			err = -EIO;
19851da177e4SLinus Torvalds 	}
1986152becd2SAnton Altaparmakov 	if (!err) {
1987152becd2SAnton Altaparmakov 		bh = head;
1988152becd2SAnton Altaparmakov 		do {
1989152becd2SAnton Altaparmakov 			if (buffer_new(bh))
1990152becd2SAnton Altaparmakov 				clear_buffer_new(bh);
1991152becd2SAnton Altaparmakov 		} while ((bh = bh->b_this_page) != head);
1992152becd2SAnton Altaparmakov 		return 0;
1993152becd2SAnton Altaparmakov 	}
1994f3ddbdc6SNick Piggin 	/* Error case: */
19951da177e4SLinus Torvalds 	/*
19961da177e4SLinus Torvalds 	 * Zero out any newly allocated blocks to avoid exposing stale
19971da177e4SLinus Torvalds 	 * data.  If BH_New is set, we know that the block was newly
19981da177e4SLinus Torvalds 	 * allocated in the above loop.
19991da177e4SLinus Torvalds 	 */
20001da177e4SLinus Torvalds 	bh = head;
20011da177e4SLinus Torvalds 	block_start = 0;
20021da177e4SLinus Torvalds 	do {
20031da177e4SLinus Torvalds 		block_end = block_start+blocksize;
20041da177e4SLinus Torvalds 		if (block_end <= from)
20051da177e4SLinus Torvalds 			goto next_bh;
20061da177e4SLinus Torvalds 		if (block_start >= to)
20071da177e4SLinus Torvalds 			break;
20081da177e4SLinus Torvalds 		if (buffer_new(bh)) {
20091da177e4SLinus Torvalds 			void *kaddr;
20101da177e4SLinus Torvalds 
20111da177e4SLinus Torvalds 			clear_buffer_new(bh);
20121da177e4SLinus Torvalds 			kaddr = kmap_atomic(page, KM_USER0);
20131da177e4SLinus Torvalds 			memset(kaddr+block_start, 0, bh->b_size);
20141da177e4SLinus Torvalds 			kunmap_atomic(kaddr, KM_USER0);
20151da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
20161da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
20171da177e4SLinus Torvalds 		}
20181da177e4SLinus Torvalds next_bh:
20191da177e4SLinus Torvalds 		block_start = block_end;
20201da177e4SLinus Torvalds 		bh = bh->b_this_page;
20211da177e4SLinus Torvalds 	} while (bh != head);
20221da177e4SLinus Torvalds 	return err;
20231da177e4SLinus Torvalds }
20241da177e4SLinus Torvalds 
20251da177e4SLinus Torvalds static int __block_commit_write(struct inode *inode, struct page *page,
20261da177e4SLinus Torvalds 		unsigned from, unsigned to)
20271da177e4SLinus Torvalds {
20281da177e4SLinus Torvalds 	unsigned block_start, block_end;
20291da177e4SLinus Torvalds 	int partial = 0;
20301da177e4SLinus Torvalds 	unsigned blocksize;
20311da177e4SLinus Torvalds 	struct buffer_head *bh, *head;
20321da177e4SLinus Torvalds 
20331da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
20341da177e4SLinus Torvalds 
20351da177e4SLinus Torvalds 	for(bh = head = page_buffers(page), block_start = 0;
20361da177e4SLinus Torvalds 	    bh != head || !block_start;
20371da177e4SLinus Torvalds 	    block_start=block_end, bh = bh->b_this_page) {
20381da177e4SLinus Torvalds 		block_end = block_start + blocksize;
20391da177e4SLinus Torvalds 		if (block_end <= from || block_start >= to) {
20401da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
20411da177e4SLinus Torvalds 				partial = 1;
20421da177e4SLinus Torvalds 		} else {
20431da177e4SLinus Torvalds 			set_buffer_uptodate(bh);
20441da177e4SLinus Torvalds 			mark_buffer_dirty(bh);
20451da177e4SLinus Torvalds 		}
20461da177e4SLinus Torvalds 	}
20471da177e4SLinus Torvalds 
20481da177e4SLinus Torvalds 	/*
20491da177e4SLinus Torvalds 	 * If this is a partial write which happened to make all buffers
20501da177e4SLinus Torvalds 	 * uptodate then we can optimize away a bogus readpage() for
20511da177e4SLinus Torvalds 	 * the next read(). Here we 'discover' whether the page went
20521da177e4SLinus Torvalds 	 * uptodate as a result of this (potentially partial) write.
20531da177e4SLinus Torvalds 	 */
20541da177e4SLinus Torvalds 	if (!partial)
20551da177e4SLinus Torvalds 		SetPageUptodate(page);
20561da177e4SLinus Torvalds 	return 0;
20571da177e4SLinus Torvalds }
20581da177e4SLinus Torvalds 
20591da177e4SLinus Torvalds /*
20601da177e4SLinus Torvalds  * Generic "read page" function for block devices that have the normal
20611da177e4SLinus Torvalds  * get_block functionality. This is most of the block device filesystems.
20621da177e4SLinus Torvalds  * Reads the page asynchronously --- the unlock_buffer() and
20631da177e4SLinus Torvalds  * set/clear_buffer_uptodate() functions propagate buffer state into the
20641da177e4SLinus Torvalds  * page struct once IO has completed.
20651da177e4SLinus Torvalds  */
20661da177e4SLinus Torvalds int block_read_full_page(struct page *page, get_block_t *get_block)
20671da177e4SLinus Torvalds {
20681da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
20691da177e4SLinus Torvalds 	sector_t iblock, lblock;
20701da177e4SLinus Torvalds 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
20711da177e4SLinus Torvalds 	unsigned int blocksize;
20721da177e4SLinus Torvalds 	int nr, i;
20731da177e4SLinus Torvalds 	int fully_mapped = 1;
20741da177e4SLinus Torvalds 
2075cd7619d6SMatt Mackall 	BUG_ON(!PageLocked(page));
20761da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
20771da177e4SLinus Torvalds 	if (!page_has_buffers(page))
20781da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
20791da177e4SLinus Torvalds 	head = page_buffers(page);
20801da177e4SLinus Torvalds 
20811da177e4SLinus Torvalds 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
20821da177e4SLinus Torvalds 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
20831da177e4SLinus Torvalds 	bh = head;
20841da177e4SLinus Torvalds 	nr = 0;
20851da177e4SLinus Torvalds 	i = 0;
20861da177e4SLinus Torvalds 
20871da177e4SLinus Torvalds 	do {
20881da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
20891da177e4SLinus Torvalds 			continue;
20901da177e4SLinus Torvalds 
20911da177e4SLinus Torvalds 		if (!buffer_mapped(bh)) {
2092c64610baSAndrew Morton 			int err = 0;
2093c64610baSAndrew Morton 
20941da177e4SLinus Torvalds 			fully_mapped = 0;
20951da177e4SLinus Torvalds 			if (iblock < lblock) {
2096c64610baSAndrew Morton 				err = get_block(inode, iblock, bh, 0);
2097c64610baSAndrew Morton 				if (err)
20981da177e4SLinus Torvalds 					SetPageError(page);
20991da177e4SLinus Torvalds 			}
21001da177e4SLinus Torvalds 			if (!buffer_mapped(bh)) {
21011da177e4SLinus Torvalds 				void *kaddr = kmap_atomic(page, KM_USER0);
21021da177e4SLinus Torvalds 				memset(kaddr + i * blocksize, 0, blocksize);
21031da177e4SLinus Torvalds 				flush_dcache_page(page);
21041da177e4SLinus Torvalds 				kunmap_atomic(kaddr, KM_USER0);
2105c64610baSAndrew Morton 				if (!err)
21061da177e4SLinus Torvalds 					set_buffer_uptodate(bh);
21071da177e4SLinus Torvalds 				continue;
21081da177e4SLinus Torvalds 			}
21091da177e4SLinus Torvalds 			/*
21101da177e4SLinus Torvalds 			 * get_block() might have updated the buffer
21111da177e4SLinus Torvalds 			 * synchronously
21121da177e4SLinus Torvalds 			 */
21131da177e4SLinus Torvalds 			if (buffer_uptodate(bh))
21141da177e4SLinus Torvalds 				continue;
21151da177e4SLinus Torvalds 		}
21161da177e4SLinus Torvalds 		arr[nr++] = bh;
21171da177e4SLinus Torvalds 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds 	if (fully_mapped)
21201da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
21211da177e4SLinus Torvalds 
21221da177e4SLinus Torvalds 	if (!nr) {
21231da177e4SLinus Torvalds 		/*
21241da177e4SLinus Torvalds 		 * All buffers are uptodate - we can set the page uptodate
21251da177e4SLinus Torvalds 		 * as well. But not if get_block() returned an error.
21261da177e4SLinus Torvalds 		 */
21271da177e4SLinus Torvalds 		if (!PageError(page))
21281da177e4SLinus Torvalds 			SetPageUptodate(page);
21291da177e4SLinus Torvalds 		unlock_page(page);
21301da177e4SLinus Torvalds 		return 0;
21311da177e4SLinus Torvalds 	}
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds 	/* Stage two: lock the buffers */
21341da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21351da177e4SLinus Torvalds 		bh = arr[i];
21361da177e4SLinus Torvalds 		lock_buffer(bh);
21371da177e4SLinus Torvalds 		mark_buffer_async_read(bh);
21381da177e4SLinus Torvalds 	}
21391da177e4SLinus Torvalds 
21401da177e4SLinus Torvalds 	/*
21411da177e4SLinus Torvalds 	 * Stage 3: start the IO.  Check for uptodateness
21421da177e4SLinus Torvalds 	 * inside the buffer lock in case another process reading
21431da177e4SLinus Torvalds 	 * the underlying blockdev brought it uptodate (the sct fix).
21441da177e4SLinus Torvalds 	 */
21451da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
21461da177e4SLinus Torvalds 		bh = arr[i];
21471da177e4SLinus Torvalds 		if (buffer_uptodate(bh))
21481da177e4SLinus Torvalds 			end_buffer_async_read(bh, 1);
21491da177e4SLinus Torvalds 		else
21501da177e4SLinus Torvalds 			submit_bh(READ, bh);
21511da177e4SLinus Torvalds 	}
21521da177e4SLinus Torvalds 	return 0;
21531da177e4SLinus Torvalds }
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
21561da177e4SLinus Torvalds  * truncates.  Uses prepare/commit_write to allow the filesystem to
21571da177e4SLinus Torvalds  * deal with the hole.
21581da177e4SLinus Torvalds  */
215905eb0b51SOGAWA Hirofumi static int __generic_cont_expand(struct inode *inode, loff_t size,
216005eb0b51SOGAWA Hirofumi 				 pgoff_t index, unsigned int offset)
21611da177e4SLinus Torvalds {
21621da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
21631da177e4SLinus Torvalds 	struct page *page;
216405eb0b51SOGAWA Hirofumi 	unsigned long limit;
21651da177e4SLinus Torvalds 	int err;
21661da177e4SLinus Torvalds 
21671da177e4SLinus Torvalds 	err = -EFBIG;
21681da177e4SLinus Torvalds         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
21691da177e4SLinus Torvalds 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
21701da177e4SLinus Torvalds 		send_sig(SIGXFSZ, current, 0);
21711da177e4SLinus Torvalds 		goto out;
21721da177e4SLinus Torvalds 	}
21731da177e4SLinus Torvalds 	if (size > inode->i_sb->s_maxbytes)
21741da177e4SLinus Torvalds 		goto out;
21751da177e4SLinus Torvalds 
217605eb0b51SOGAWA Hirofumi 	err = -ENOMEM;
217705eb0b51SOGAWA Hirofumi 	page = grab_cache_page(mapping, index);
217805eb0b51SOGAWA Hirofumi 	if (!page)
217905eb0b51SOGAWA Hirofumi 		goto out;
218005eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
218105eb0b51SOGAWA Hirofumi 	if (err) {
218205eb0b51SOGAWA Hirofumi 		/*
218305eb0b51SOGAWA Hirofumi 		 * ->prepare_write() may have instantiated a few blocks
218405eb0b51SOGAWA Hirofumi 		 * outside i_size.  Trim these off again.
218505eb0b51SOGAWA Hirofumi 		 */
218605eb0b51SOGAWA Hirofumi 		unlock_page(page);
218705eb0b51SOGAWA Hirofumi 		page_cache_release(page);
218805eb0b51SOGAWA Hirofumi 		vmtruncate(inode, inode->i_size);
218905eb0b51SOGAWA Hirofumi 		goto out;
219005eb0b51SOGAWA Hirofumi 	}
219105eb0b51SOGAWA Hirofumi 
219205eb0b51SOGAWA Hirofumi 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
219305eb0b51SOGAWA Hirofumi 
219405eb0b51SOGAWA Hirofumi 	unlock_page(page);
219505eb0b51SOGAWA Hirofumi 	page_cache_release(page);
219605eb0b51SOGAWA Hirofumi 	if (err > 0)
219705eb0b51SOGAWA Hirofumi 		err = 0;
219805eb0b51SOGAWA Hirofumi out:
219905eb0b51SOGAWA Hirofumi 	return err;
220005eb0b51SOGAWA Hirofumi }
220105eb0b51SOGAWA Hirofumi 
220205eb0b51SOGAWA Hirofumi int generic_cont_expand(struct inode *inode, loff_t size)
220305eb0b51SOGAWA Hirofumi {
220405eb0b51SOGAWA Hirofumi 	pgoff_t index;
220505eb0b51SOGAWA Hirofumi 	unsigned int offset;
220605eb0b51SOGAWA Hirofumi 
22071da177e4SLinus Torvalds 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
22081da177e4SLinus Torvalds 
22091da177e4SLinus Torvalds 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
22101da177e4SLinus Torvalds 	** skip the prepare.  make sure we never send an offset for the start
22111da177e4SLinus Torvalds 	** of a block
22121da177e4SLinus Torvalds 	*/
22131da177e4SLinus Torvalds 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
221405eb0b51SOGAWA Hirofumi 		/* caller must handle this extra byte. */
22151da177e4SLinus Torvalds 		offset++;
22161da177e4SLinus Torvalds 	}
22171da177e4SLinus Torvalds 	index = size >> PAGE_CACHE_SHIFT;
221805eb0b51SOGAWA Hirofumi 
221905eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
22201da177e4SLinus Torvalds }
222105eb0b51SOGAWA Hirofumi 
222205eb0b51SOGAWA Hirofumi int generic_cont_expand_simple(struct inode *inode, loff_t size)
222305eb0b51SOGAWA Hirofumi {
222405eb0b51SOGAWA Hirofumi 	loff_t pos = size - 1;
222505eb0b51SOGAWA Hirofumi 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
222605eb0b51SOGAWA Hirofumi 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
222705eb0b51SOGAWA Hirofumi 
222805eb0b51SOGAWA Hirofumi 	/* prepare/commit_write can handle even if from==to==start of block. */
222905eb0b51SOGAWA Hirofumi 	return __generic_cont_expand(inode, size, index, offset);
22301da177e4SLinus Torvalds }
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds /*
22331da177e4SLinus Torvalds  * For moronic filesystems that do not allow holes in file.
22341da177e4SLinus Torvalds  * We may have to extend the file.
22351da177e4SLinus Torvalds  */
22361da177e4SLinus Torvalds 
22371da177e4SLinus Torvalds int cont_prepare_write(struct page *page, unsigned offset,
22381da177e4SLinus Torvalds 		unsigned to, get_block_t *get_block, loff_t *bytes)
22391da177e4SLinus Torvalds {
22401da177e4SLinus Torvalds 	struct address_space *mapping = page->mapping;
22411da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
22421da177e4SLinus Torvalds 	struct page *new_page;
22431da177e4SLinus Torvalds 	pgoff_t pgpos;
22441da177e4SLinus Torvalds 	long status;
22451da177e4SLinus Torvalds 	unsigned zerofrom;
22461da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
22471da177e4SLinus Torvalds 	void *kaddr;
22481da177e4SLinus Torvalds 
22491da177e4SLinus Torvalds 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
22501da177e4SLinus Torvalds 		status = -ENOMEM;
22511da177e4SLinus Torvalds 		new_page = grab_cache_page(mapping, pgpos);
22521da177e4SLinus Torvalds 		if (!new_page)
22531da177e4SLinus Torvalds 			goto out;
22541da177e4SLinus Torvalds 		/* we might sleep */
22551da177e4SLinus Torvalds 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
22561da177e4SLinus Torvalds 			unlock_page(new_page);
22571da177e4SLinus Torvalds 			page_cache_release(new_page);
22581da177e4SLinus Torvalds 			continue;
22591da177e4SLinus Torvalds 		}
22601da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
22611da177e4SLinus Torvalds 		if (zerofrom & (blocksize-1)) {
22621da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
22631da177e4SLinus Torvalds 			(*bytes)++;
22641da177e4SLinus Torvalds 		}
22651da177e4SLinus Torvalds 		status = __block_prepare_write(inode, new_page, zerofrom,
22661da177e4SLinus Torvalds 						PAGE_CACHE_SIZE, get_block);
22671da177e4SLinus Torvalds 		if (status)
22681da177e4SLinus Torvalds 			goto out_unmap;
22691da177e4SLinus Torvalds 		kaddr = kmap_atomic(new_page, KM_USER0);
22701da177e4SLinus Torvalds 		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
22711da177e4SLinus Torvalds 		flush_dcache_page(new_page);
22721da177e4SLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
22731da177e4SLinus Torvalds 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
22741da177e4SLinus Torvalds 		unlock_page(new_page);
22751da177e4SLinus Torvalds 		page_cache_release(new_page);
22761da177e4SLinus Torvalds 	}
22771da177e4SLinus Torvalds 
22781da177e4SLinus Torvalds 	if (page->index < pgpos) {
22791da177e4SLinus Torvalds 		/* completely inside the area */
22801da177e4SLinus Torvalds 		zerofrom = offset;
22811da177e4SLinus Torvalds 	} else {
22821da177e4SLinus Torvalds 		/* page covers the boundary, find the boundary offset */
22831da177e4SLinus Torvalds 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
22841da177e4SLinus Torvalds 
22851da177e4SLinus Torvalds 		/* if we will expand the thing last block will be filled */
22861da177e4SLinus Torvalds 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
22871da177e4SLinus Torvalds 			*bytes |= (blocksize-1);
22881da177e4SLinus Torvalds 			(*bytes)++;
22891da177e4SLinus Torvalds 		}
22901da177e4SLinus Torvalds 
22911da177e4SLinus Torvalds 		/* starting below the boundary? Nothing to zero out */
22921da177e4SLinus Torvalds 		if (offset <= zerofrom)
22931da177e4SLinus Torvalds 			zerofrom = offset;
22941da177e4SLinus Torvalds 	}
22951da177e4SLinus Torvalds 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
22961da177e4SLinus Torvalds 	if (status)
22971da177e4SLinus Torvalds 		goto out1;
22981da177e4SLinus Torvalds 	if (zerofrom < offset) {
22991da177e4SLinus Torvalds 		kaddr = kmap_atomic(page, KM_USER0);
23001da177e4SLinus Torvalds 		memset(kaddr+zerofrom, 0, offset-zerofrom);
23011da177e4SLinus Torvalds 		flush_dcache_page(page);
23021da177e4SLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
23031da177e4SLinus Torvalds 		__block_commit_write(inode, page, zerofrom, offset);
23041da177e4SLinus Torvalds 	}
23051da177e4SLinus Torvalds 	return 0;
23061da177e4SLinus Torvalds out1:
23071da177e4SLinus Torvalds 	ClearPageUptodate(page);
23081da177e4SLinus Torvalds 	return status;
23091da177e4SLinus Torvalds 
23101da177e4SLinus Torvalds out_unmap:
23111da177e4SLinus Torvalds 	ClearPageUptodate(new_page);
23121da177e4SLinus Torvalds 	unlock_page(new_page);
23131da177e4SLinus Torvalds 	page_cache_release(new_page);
23141da177e4SLinus Torvalds out:
23151da177e4SLinus Torvalds 	return status;
23161da177e4SLinus Torvalds }
23171da177e4SLinus Torvalds 
23181da177e4SLinus Torvalds int block_prepare_write(struct page *page, unsigned from, unsigned to,
23191da177e4SLinus Torvalds 			get_block_t *get_block)
23201da177e4SLinus Torvalds {
23211da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23221da177e4SLinus Torvalds 	int err = __block_prepare_write(inode, page, from, to, get_block);
23231da177e4SLinus Torvalds 	if (err)
23241da177e4SLinus Torvalds 		ClearPageUptodate(page);
23251da177e4SLinus Torvalds 	return err;
23261da177e4SLinus Torvalds }
23271da177e4SLinus Torvalds 
23281da177e4SLinus Torvalds int block_commit_write(struct page *page, unsigned from, unsigned to)
23291da177e4SLinus Torvalds {
23301da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23311da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
23321da177e4SLinus Torvalds 	return 0;
23331da177e4SLinus Torvalds }
23341da177e4SLinus Torvalds 
23351da177e4SLinus Torvalds int generic_commit_write(struct file *file, struct page *page,
23361da177e4SLinus Torvalds 		unsigned from, unsigned to)
23371da177e4SLinus Torvalds {
23381da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23391da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
23401da177e4SLinus Torvalds 	__block_commit_write(inode,page,from,to);
23411da177e4SLinus Torvalds 	/*
23421da177e4SLinus Torvalds 	 * No need to use i_size_read() here, the i_size
23431b1dcc1bSJes Sorensen 	 * cannot change under us because we hold i_mutex.
23441da177e4SLinus Torvalds 	 */
23451da177e4SLinus Torvalds 	if (pos > inode->i_size) {
23461da177e4SLinus Torvalds 		i_size_write(inode, pos);
23471da177e4SLinus Torvalds 		mark_inode_dirty(inode);
23481da177e4SLinus Torvalds 	}
23491da177e4SLinus Torvalds 	return 0;
23501da177e4SLinus Torvalds }
23511da177e4SLinus Torvalds 
23521da177e4SLinus Torvalds 
23531da177e4SLinus Torvalds /*
23541da177e4SLinus Torvalds  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
23551da177e4SLinus Torvalds  * immediately, while under the page lock.  So it needs a special end_io
23561da177e4SLinus Torvalds  * handler which does not touch the bh after unlocking it.
23571da177e4SLinus Torvalds  *
23581da177e4SLinus Torvalds  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
23591da177e4SLinus Torvalds  * a race there is benign: unlock_buffer() only use the bh's address for
23601da177e4SLinus Torvalds  * hashing after unlocking the buffer, so it doesn't actually touch the bh
23611da177e4SLinus Torvalds  * itself.
23621da177e4SLinus Torvalds  */
23631da177e4SLinus Torvalds static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
23641da177e4SLinus Torvalds {
23651da177e4SLinus Torvalds 	if (uptodate) {
23661da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
23671da177e4SLinus Torvalds 	} else {
23681da177e4SLinus Torvalds 		/* This happens, due to failed READA attempts. */
23691da177e4SLinus Torvalds 		clear_buffer_uptodate(bh);
23701da177e4SLinus Torvalds 	}
23711da177e4SLinus Torvalds 	unlock_buffer(bh);
23721da177e4SLinus Torvalds }
23731da177e4SLinus Torvalds 
23741da177e4SLinus Torvalds /*
23751da177e4SLinus Torvalds  * On entry, the page is fully not uptodate.
23761da177e4SLinus Torvalds  * On exit the page is fully uptodate in the areas outside (from,to)
23771da177e4SLinus Torvalds  */
23781da177e4SLinus Torvalds int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
23791da177e4SLinus Torvalds 			get_block_t *get_block)
23801da177e4SLinus Torvalds {
23811da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
23821da177e4SLinus Torvalds 	const unsigned blkbits = inode->i_blkbits;
23831da177e4SLinus Torvalds 	const unsigned blocksize = 1 << blkbits;
23841da177e4SLinus Torvalds 	struct buffer_head map_bh;
23851da177e4SLinus Torvalds 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
23861da177e4SLinus Torvalds 	unsigned block_in_page;
23871da177e4SLinus Torvalds 	unsigned block_start;
23881da177e4SLinus Torvalds 	sector_t block_in_file;
23891da177e4SLinus Torvalds 	char *kaddr;
23901da177e4SLinus Torvalds 	int nr_reads = 0;
23911da177e4SLinus Torvalds 	int i;
23921da177e4SLinus Torvalds 	int ret = 0;
23931da177e4SLinus Torvalds 	int is_mapped_to_disk = 1;
23941da177e4SLinus Torvalds 	int dirtied_it = 0;
23951da177e4SLinus Torvalds 
23961da177e4SLinus Torvalds 	if (PageMappedToDisk(page))
23971da177e4SLinus Torvalds 		return 0;
23981da177e4SLinus Torvalds 
23991da177e4SLinus Torvalds 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
24001da177e4SLinus Torvalds 	map_bh.b_page = page;
24011da177e4SLinus Torvalds 
24021da177e4SLinus Torvalds 	/*
24031da177e4SLinus Torvalds 	 * We loop across all blocks in the page, whether or not they are
24041da177e4SLinus Torvalds 	 * part of the affected region.  This is so we can discover if the
24051da177e4SLinus Torvalds 	 * page is fully mapped-to-disk.
24061da177e4SLinus Torvalds 	 */
24071da177e4SLinus Torvalds 	for (block_start = 0, block_in_page = 0;
24081da177e4SLinus Torvalds 		  block_start < PAGE_CACHE_SIZE;
24091da177e4SLinus Torvalds 		  block_in_page++, block_start += blocksize) {
24101da177e4SLinus Torvalds 		unsigned block_end = block_start + blocksize;
24111da177e4SLinus Torvalds 		int create;
24121da177e4SLinus Torvalds 
24131da177e4SLinus Torvalds 		map_bh.b_state = 0;
24141da177e4SLinus Torvalds 		create = 1;
24151da177e4SLinus Torvalds 		if (block_start >= to)
24161da177e4SLinus Torvalds 			create = 0;
24171da177e4SLinus Torvalds 		ret = get_block(inode, block_in_file + block_in_page,
24181da177e4SLinus Torvalds 					&map_bh, create);
24191da177e4SLinus Torvalds 		if (ret)
24201da177e4SLinus Torvalds 			goto failed;
24211da177e4SLinus Torvalds 		if (!buffer_mapped(&map_bh))
24221da177e4SLinus Torvalds 			is_mapped_to_disk = 0;
24231da177e4SLinus Torvalds 		if (buffer_new(&map_bh))
24241da177e4SLinus Torvalds 			unmap_underlying_metadata(map_bh.b_bdev,
24251da177e4SLinus Torvalds 							map_bh.b_blocknr);
24261da177e4SLinus Torvalds 		if (PageUptodate(page))
24271da177e4SLinus Torvalds 			continue;
24281da177e4SLinus Torvalds 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
24291da177e4SLinus Torvalds 			kaddr = kmap_atomic(page, KM_USER0);
24301da177e4SLinus Torvalds 			if (block_start < from) {
24311da177e4SLinus Torvalds 				memset(kaddr+block_start, 0, from-block_start);
24321da177e4SLinus Torvalds 				dirtied_it = 1;
24331da177e4SLinus Torvalds 			}
24341da177e4SLinus Torvalds 			if (block_end > to) {
24351da177e4SLinus Torvalds 				memset(kaddr + to, 0, block_end - to);
24361da177e4SLinus Torvalds 				dirtied_it = 1;
24371da177e4SLinus Torvalds 			}
24381da177e4SLinus Torvalds 			flush_dcache_page(page);
24391da177e4SLinus Torvalds 			kunmap_atomic(kaddr, KM_USER0);
24401da177e4SLinus Torvalds 			continue;
24411da177e4SLinus Torvalds 		}
24421da177e4SLinus Torvalds 		if (buffer_uptodate(&map_bh))
24431da177e4SLinus Torvalds 			continue;	/* reiserfs does this */
24441da177e4SLinus Torvalds 		if (block_start < from || block_end > to) {
24451da177e4SLinus Torvalds 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
24461da177e4SLinus Torvalds 
24471da177e4SLinus Torvalds 			if (!bh) {
24481da177e4SLinus Torvalds 				ret = -ENOMEM;
24491da177e4SLinus Torvalds 				goto failed;
24501da177e4SLinus Torvalds 			}
24511da177e4SLinus Torvalds 			bh->b_state = map_bh.b_state;
24521da177e4SLinus Torvalds 			atomic_set(&bh->b_count, 0);
24531da177e4SLinus Torvalds 			bh->b_this_page = NULL;
24541da177e4SLinus Torvalds 			bh->b_page = page;
24551da177e4SLinus Torvalds 			bh->b_blocknr = map_bh.b_blocknr;
24561da177e4SLinus Torvalds 			bh->b_size = blocksize;
24571da177e4SLinus Torvalds 			bh->b_data = (char *)(long)block_start;
24581da177e4SLinus Torvalds 			bh->b_bdev = map_bh.b_bdev;
24591da177e4SLinus Torvalds 			bh->b_private = NULL;
24601da177e4SLinus Torvalds 			read_bh[nr_reads++] = bh;
24611da177e4SLinus Torvalds 		}
24621da177e4SLinus Torvalds 	}
24631da177e4SLinus Torvalds 
24641da177e4SLinus Torvalds 	if (nr_reads) {
24651da177e4SLinus Torvalds 		struct buffer_head *bh;
24661da177e4SLinus Torvalds 
24671da177e4SLinus Torvalds 		/*
24681da177e4SLinus Torvalds 		 * The page is locked, so these buffers are protected from
24691da177e4SLinus Torvalds 		 * any VM or truncate activity.  Hence we don't need to care
24701da177e4SLinus Torvalds 		 * for the buffer_head refcounts.
24711da177e4SLinus Torvalds 		 */
24721da177e4SLinus Torvalds 		for (i = 0; i < nr_reads; i++) {
24731da177e4SLinus Torvalds 			bh = read_bh[i];
24741da177e4SLinus Torvalds 			lock_buffer(bh);
24751da177e4SLinus Torvalds 			bh->b_end_io = end_buffer_read_nobh;
24761da177e4SLinus Torvalds 			submit_bh(READ, bh);
24771da177e4SLinus Torvalds 		}
24781da177e4SLinus Torvalds 		for (i = 0; i < nr_reads; i++) {
24791da177e4SLinus Torvalds 			bh = read_bh[i];
24801da177e4SLinus Torvalds 			wait_on_buffer(bh);
24811da177e4SLinus Torvalds 			if (!buffer_uptodate(bh))
24821da177e4SLinus Torvalds 				ret = -EIO;
24831da177e4SLinus Torvalds 			free_buffer_head(bh);
24841da177e4SLinus Torvalds 			read_bh[i] = NULL;
24851da177e4SLinus Torvalds 		}
24861da177e4SLinus Torvalds 		if (ret)
24871da177e4SLinus Torvalds 			goto failed;
24881da177e4SLinus Torvalds 	}
24891da177e4SLinus Torvalds 
24901da177e4SLinus Torvalds 	if (is_mapped_to_disk)
24911da177e4SLinus Torvalds 		SetPageMappedToDisk(page);
24921da177e4SLinus Torvalds 	SetPageUptodate(page);
24931da177e4SLinus Torvalds 
24941da177e4SLinus Torvalds 	/*
24951da177e4SLinus Torvalds 	 * Setting the page dirty here isn't necessary for the prepare_write
24961da177e4SLinus Torvalds 	 * function - commit_write will do that.  But if/when this function is
24971da177e4SLinus Torvalds 	 * used within the pagefault handler to ensure that all mmapped pages
24981da177e4SLinus Torvalds 	 * have backing space in the filesystem, we will need to dirty the page
24991da177e4SLinus Torvalds 	 * if its contents were altered.
25001da177e4SLinus Torvalds 	 */
25011da177e4SLinus Torvalds 	if (dirtied_it)
25021da177e4SLinus Torvalds 		set_page_dirty(page);
25031da177e4SLinus Torvalds 
25041da177e4SLinus Torvalds 	return 0;
25051da177e4SLinus Torvalds 
25061da177e4SLinus Torvalds failed:
25071da177e4SLinus Torvalds 	for (i = 0; i < nr_reads; i++) {
25081da177e4SLinus Torvalds 		if (read_bh[i])
25091da177e4SLinus Torvalds 			free_buffer_head(read_bh[i]);
25101da177e4SLinus Torvalds 	}
25111da177e4SLinus Torvalds 
25121da177e4SLinus Torvalds 	/*
25131da177e4SLinus Torvalds 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
25141da177e4SLinus Torvalds 	 * so we'll later zero out any blocks which _were_ allocated.
25151da177e4SLinus Torvalds 	 */
25161da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
25171da177e4SLinus Torvalds 	memset(kaddr, 0, PAGE_CACHE_SIZE);
25181da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
25191da177e4SLinus Torvalds 	SetPageUptodate(page);
25201da177e4SLinus Torvalds 	set_page_dirty(page);
25211da177e4SLinus Torvalds 	return ret;
25221da177e4SLinus Torvalds }
25231da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_prepare_write);
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds int nobh_commit_write(struct file *file, struct page *page,
25261da177e4SLinus Torvalds 		unsigned from, unsigned to)
25271da177e4SLinus Torvalds {
25281da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
25291da177e4SLinus Torvalds 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
25301da177e4SLinus Torvalds 
25311da177e4SLinus Torvalds 	set_page_dirty(page);
25321da177e4SLinus Torvalds 	if (pos > inode->i_size) {
25331da177e4SLinus Torvalds 		i_size_write(inode, pos);
25341da177e4SLinus Torvalds 		mark_inode_dirty(inode);
25351da177e4SLinus Torvalds 	}
25361da177e4SLinus Torvalds 	return 0;
25371da177e4SLinus Torvalds }
25381da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_commit_write);
25391da177e4SLinus Torvalds 
25401da177e4SLinus Torvalds /*
25411da177e4SLinus Torvalds  * nobh_writepage() - based on block_full_write_page() except
25421da177e4SLinus Torvalds  * that it tries to operate without attaching bufferheads to
25431da177e4SLinus Torvalds  * the page.
25441da177e4SLinus Torvalds  */
25451da177e4SLinus Torvalds int nobh_writepage(struct page *page, get_block_t *get_block,
25461da177e4SLinus Torvalds 			struct writeback_control *wbc)
25471da177e4SLinus Torvalds {
25481da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
25491da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
25501da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
25511da177e4SLinus Torvalds 	unsigned offset;
25521da177e4SLinus Torvalds 	void *kaddr;
25531da177e4SLinus Torvalds 	int ret;
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
25561da177e4SLinus Torvalds 	if (page->index < end_index)
25571da177e4SLinus Torvalds 		goto out;
25581da177e4SLinus Torvalds 
25591da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
25601da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
25611da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
25621da177e4SLinus Torvalds 		/*
25631da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
25641da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
25651da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
25661da177e4SLinus Torvalds 		 */
25671da177e4SLinus Torvalds #if 0
25681da177e4SLinus Torvalds 		/* Not really sure about this  - do we need this ? */
25691da177e4SLinus Torvalds 		if (page->mapping->a_ops->invalidatepage)
25701da177e4SLinus Torvalds 			page->mapping->a_ops->invalidatepage(page, offset);
25711da177e4SLinus Torvalds #endif
25721da177e4SLinus Torvalds 		unlock_page(page);
25731da177e4SLinus Torvalds 		return 0; /* don't care */
25741da177e4SLinus Torvalds 	}
25751da177e4SLinus Torvalds 
25761da177e4SLinus Torvalds 	/*
25771da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
25781da177e4SLinus Torvalds 	 * writepage invocation because it may be mmapped.  "A file is mapped
25791da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
25801da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
25811da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
25821da177e4SLinus Torvalds 	 */
25831da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
25841da177e4SLinus Torvalds 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
25851da177e4SLinus Torvalds 	flush_dcache_page(page);
25861da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
25871da177e4SLinus Torvalds out:
25881da177e4SLinus Torvalds 	ret = mpage_writepage(page, get_block, wbc);
25891da177e4SLinus Torvalds 	if (ret == -EAGAIN)
25901da177e4SLinus Torvalds 		ret = __block_write_full_page(inode, page, get_block, wbc);
25911da177e4SLinus Torvalds 	return ret;
25921da177e4SLinus Torvalds }
25931da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_writepage);
25941da177e4SLinus Torvalds 
25951da177e4SLinus Torvalds /*
25961da177e4SLinus Torvalds  * This function assumes that ->prepare_write() uses nobh_prepare_write().
25971da177e4SLinus Torvalds  */
25981da177e4SLinus Torvalds int nobh_truncate_page(struct address_space *mapping, loff_t from)
25991da177e4SLinus Torvalds {
26001da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26011da177e4SLinus Torvalds 	unsigned blocksize = 1 << inode->i_blkbits;
26021da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
26031da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
26041da177e4SLinus Torvalds 	unsigned to;
26051da177e4SLinus Torvalds 	struct page *page;
26061da177e4SLinus Torvalds 	struct address_space_operations *a_ops = mapping->a_ops;
26071da177e4SLinus Torvalds 	char *kaddr;
26081da177e4SLinus Torvalds 	int ret = 0;
26091da177e4SLinus Torvalds 
26101da177e4SLinus Torvalds 	if ((offset & (blocksize - 1)) == 0)
26111da177e4SLinus Torvalds 		goto out;
26121da177e4SLinus Torvalds 
26131da177e4SLinus Torvalds 	ret = -ENOMEM;
26141da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
26151da177e4SLinus Torvalds 	if (!page)
26161da177e4SLinus Torvalds 		goto out;
26171da177e4SLinus Torvalds 
26181da177e4SLinus Torvalds 	to = (offset + blocksize) & ~(blocksize - 1);
26191da177e4SLinus Torvalds 	ret = a_ops->prepare_write(NULL, page, offset, to);
26201da177e4SLinus Torvalds 	if (ret == 0) {
26211da177e4SLinus Torvalds 		kaddr = kmap_atomic(page, KM_USER0);
26221da177e4SLinus Torvalds 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
26231da177e4SLinus Torvalds 		flush_dcache_page(page);
26241da177e4SLinus Torvalds 		kunmap_atomic(kaddr, KM_USER0);
26251da177e4SLinus Torvalds 		set_page_dirty(page);
26261da177e4SLinus Torvalds 	}
26271da177e4SLinus Torvalds 	unlock_page(page);
26281da177e4SLinus Torvalds 	page_cache_release(page);
26291da177e4SLinus Torvalds out:
26301da177e4SLinus Torvalds 	return ret;
26311da177e4SLinus Torvalds }
26321da177e4SLinus Torvalds EXPORT_SYMBOL(nobh_truncate_page);
26331da177e4SLinus Torvalds 
26341da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
26351da177e4SLinus Torvalds 			loff_t from, get_block_t *get_block)
26361da177e4SLinus Torvalds {
26371da177e4SLinus Torvalds 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
26381da177e4SLinus Torvalds 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
26391da177e4SLinus Torvalds 	unsigned blocksize;
264054b21a79SAndrew Morton 	sector_t iblock;
26411da177e4SLinus Torvalds 	unsigned length, pos;
26421da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
26431da177e4SLinus Torvalds 	struct page *page;
26441da177e4SLinus Torvalds 	struct buffer_head *bh;
26451da177e4SLinus Torvalds 	void *kaddr;
26461da177e4SLinus Torvalds 	int err;
26471da177e4SLinus Torvalds 
26481da177e4SLinus Torvalds 	blocksize = 1 << inode->i_blkbits;
26491da177e4SLinus Torvalds 	length = offset & (blocksize - 1);
26501da177e4SLinus Torvalds 
26511da177e4SLinus Torvalds 	/* Block boundary? Nothing to do */
26521da177e4SLinus Torvalds 	if (!length)
26531da177e4SLinus Torvalds 		return 0;
26541da177e4SLinus Torvalds 
26551da177e4SLinus Torvalds 	length = blocksize - length;
265654b21a79SAndrew Morton 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
26571da177e4SLinus Torvalds 
26581da177e4SLinus Torvalds 	page = grab_cache_page(mapping, index);
26591da177e4SLinus Torvalds 	err = -ENOMEM;
26601da177e4SLinus Torvalds 	if (!page)
26611da177e4SLinus Torvalds 		goto out;
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds 	if (!page_has_buffers(page))
26641da177e4SLinus Torvalds 		create_empty_buffers(page, blocksize, 0);
26651da177e4SLinus Torvalds 
26661da177e4SLinus Torvalds 	/* Find the buffer that contains "offset" */
26671da177e4SLinus Torvalds 	bh = page_buffers(page);
26681da177e4SLinus Torvalds 	pos = blocksize;
26691da177e4SLinus Torvalds 	while (offset >= pos) {
26701da177e4SLinus Torvalds 		bh = bh->b_this_page;
26711da177e4SLinus Torvalds 		iblock++;
26721da177e4SLinus Torvalds 		pos += blocksize;
26731da177e4SLinus Torvalds 	}
26741da177e4SLinus Torvalds 
26751da177e4SLinus Torvalds 	err = 0;
26761da177e4SLinus Torvalds 	if (!buffer_mapped(bh)) {
26771da177e4SLinus Torvalds 		err = get_block(inode, iblock, bh, 0);
26781da177e4SLinus Torvalds 		if (err)
26791da177e4SLinus Torvalds 			goto unlock;
26801da177e4SLinus Torvalds 		/* unmapped? It's a hole - nothing to do */
26811da177e4SLinus Torvalds 		if (!buffer_mapped(bh))
26821da177e4SLinus Torvalds 			goto unlock;
26831da177e4SLinus Torvalds 	}
26841da177e4SLinus Torvalds 
26851da177e4SLinus Torvalds 	/* Ok, it's mapped. Make sure it's up-to-date */
26861da177e4SLinus Torvalds 	if (PageUptodate(page))
26871da177e4SLinus Torvalds 		set_buffer_uptodate(bh);
26881da177e4SLinus Torvalds 
26891da177e4SLinus Torvalds 	if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
26901da177e4SLinus Torvalds 		err = -EIO;
26911da177e4SLinus Torvalds 		ll_rw_block(READ, 1, &bh);
26921da177e4SLinus Torvalds 		wait_on_buffer(bh);
26931da177e4SLinus Torvalds 		/* Uhhuh. Read error. Complain and punt. */
26941da177e4SLinus Torvalds 		if (!buffer_uptodate(bh))
26951da177e4SLinus Torvalds 			goto unlock;
26961da177e4SLinus Torvalds 	}
26971da177e4SLinus Torvalds 
26981da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
26991da177e4SLinus Torvalds 	memset(kaddr + offset, 0, length);
27001da177e4SLinus Torvalds 	flush_dcache_page(page);
27011da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
27021da177e4SLinus Torvalds 
27031da177e4SLinus Torvalds 	mark_buffer_dirty(bh);
27041da177e4SLinus Torvalds 	err = 0;
27051da177e4SLinus Torvalds 
27061da177e4SLinus Torvalds unlock:
27071da177e4SLinus Torvalds 	unlock_page(page);
27081da177e4SLinus Torvalds 	page_cache_release(page);
27091da177e4SLinus Torvalds out:
27101da177e4SLinus Torvalds 	return err;
27111da177e4SLinus Torvalds }
27121da177e4SLinus Torvalds 
27131da177e4SLinus Torvalds /*
27141da177e4SLinus Torvalds  * The generic ->writepage function for buffer-backed address_spaces
27151da177e4SLinus Torvalds  */
27161da177e4SLinus Torvalds int block_write_full_page(struct page *page, get_block_t *get_block,
27171da177e4SLinus Torvalds 			struct writeback_control *wbc)
27181da177e4SLinus Torvalds {
27191da177e4SLinus Torvalds 	struct inode * const inode = page->mapping->host;
27201da177e4SLinus Torvalds 	loff_t i_size = i_size_read(inode);
27211da177e4SLinus Torvalds 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
27221da177e4SLinus Torvalds 	unsigned offset;
27231da177e4SLinus Torvalds 	void *kaddr;
27241da177e4SLinus Torvalds 
27251da177e4SLinus Torvalds 	/* Is the page fully inside i_size? */
27261da177e4SLinus Torvalds 	if (page->index < end_index)
27271da177e4SLinus Torvalds 		return __block_write_full_page(inode, page, get_block, wbc);
27281da177e4SLinus Torvalds 
27291da177e4SLinus Torvalds 	/* Is the page fully outside i_size? (truncate in progress) */
27301da177e4SLinus Torvalds 	offset = i_size & (PAGE_CACHE_SIZE-1);
27311da177e4SLinus Torvalds 	if (page->index >= end_index+1 || !offset) {
27321da177e4SLinus Torvalds 		/*
27331da177e4SLinus Torvalds 		 * The page may have dirty, unmapped buffers.  For example,
27341da177e4SLinus Torvalds 		 * they may have been added in ext3_writepage().  Make them
27351da177e4SLinus Torvalds 		 * freeable here, so the page does not leak.
27361da177e4SLinus Torvalds 		 */
2737aaa4059bSJan Kara 		do_invalidatepage(page, 0);
27381da177e4SLinus Torvalds 		unlock_page(page);
27391da177e4SLinus Torvalds 		return 0; /* don't care */
27401da177e4SLinus Torvalds 	}
27411da177e4SLinus Torvalds 
27421da177e4SLinus Torvalds 	/*
27431da177e4SLinus Torvalds 	 * The page straddles i_size.  It must be zeroed out on each and every
27441da177e4SLinus Torvalds 	 * writepage invokation because it may be mmapped.  "A file is mapped
27451da177e4SLinus Torvalds 	 * in multiples of the page size.  For a file that is not a multiple of
27461da177e4SLinus Torvalds 	 * the  page size, the remaining memory is zeroed when mapped, and
27471da177e4SLinus Torvalds 	 * writes to that region are not written out to the file."
27481da177e4SLinus Torvalds 	 */
27491da177e4SLinus Torvalds 	kaddr = kmap_atomic(page, KM_USER0);
27501da177e4SLinus Torvalds 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
27511da177e4SLinus Torvalds 	flush_dcache_page(page);
27521da177e4SLinus Torvalds 	kunmap_atomic(kaddr, KM_USER0);
27531da177e4SLinus Torvalds 	return __block_write_full_page(inode, page, get_block, wbc);
27541da177e4SLinus Torvalds }
27551da177e4SLinus Torvalds 
27561da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
27571da177e4SLinus Torvalds 			    get_block_t *get_block)
27581da177e4SLinus Torvalds {
27591da177e4SLinus Torvalds 	struct buffer_head tmp;
27601da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
27611da177e4SLinus Torvalds 	tmp.b_state = 0;
27621da177e4SLinus Torvalds 	tmp.b_blocknr = 0;
27631da177e4SLinus Torvalds 	get_block(inode, block, &tmp, 0);
27641da177e4SLinus Torvalds 	return tmp.b_blocknr;
27651da177e4SLinus Torvalds }
27661da177e4SLinus Torvalds 
27671da177e4SLinus Torvalds static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
27681da177e4SLinus Torvalds {
27691da177e4SLinus Torvalds 	struct buffer_head *bh = bio->bi_private;
27701da177e4SLinus Torvalds 
27711da177e4SLinus Torvalds 	if (bio->bi_size)
27721da177e4SLinus Torvalds 		return 1;
27731da177e4SLinus Torvalds 
27741da177e4SLinus Torvalds 	if (err == -EOPNOTSUPP) {
27751da177e4SLinus Torvalds 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
27761da177e4SLinus Torvalds 		set_bit(BH_Eopnotsupp, &bh->b_state);
27771da177e4SLinus Torvalds 	}
27781da177e4SLinus Torvalds 
27791da177e4SLinus Torvalds 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
27801da177e4SLinus Torvalds 	bio_put(bio);
27811da177e4SLinus Torvalds 	return 0;
27821da177e4SLinus Torvalds }
27831da177e4SLinus Torvalds 
27841da177e4SLinus Torvalds int submit_bh(int rw, struct buffer_head * bh)
27851da177e4SLinus Torvalds {
27861da177e4SLinus Torvalds 	struct bio *bio;
27871da177e4SLinus Torvalds 	int ret = 0;
27881da177e4SLinus Torvalds 
27891da177e4SLinus Torvalds 	BUG_ON(!buffer_locked(bh));
27901da177e4SLinus Torvalds 	BUG_ON(!buffer_mapped(bh));
27911da177e4SLinus Torvalds 	BUG_ON(!bh->b_end_io);
27921da177e4SLinus Torvalds 
27931da177e4SLinus Torvalds 	if (buffer_ordered(bh) && (rw == WRITE))
27941da177e4SLinus Torvalds 		rw = WRITE_BARRIER;
27951da177e4SLinus Torvalds 
27961da177e4SLinus Torvalds 	/*
27971da177e4SLinus Torvalds 	 * Only clear out a write error when rewriting, should this
27981da177e4SLinus Torvalds 	 * include WRITE_SYNC as well?
27991da177e4SLinus Torvalds 	 */
28001da177e4SLinus Torvalds 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
28011da177e4SLinus Torvalds 		clear_buffer_write_io_error(bh);
28021da177e4SLinus Torvalds 
28031da177e4SLinus Torvalds 	/*
28041da177e4SLinus Torvalds 	 * from here on down, it's all bio -- do the initial mapping,
28051da177e4SLinus Torvalds 	 * submit_bio -> generic_make_request may further map this bio around
28061da177e4SLinus Torvalds 	 */
28071da177e4SLinus Torvalds 	bio = bio_alloc(GFP_NOIO, 1);
28081da177e4SLinus Torvalds 
28091da177e4SLinus Torvalds 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
28101da177e4SLinus Torvalds 	bio->bi_bdev = bh->b_bdev;
28111da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_page = bh->b_page;
28121da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_len = bh->b_size;
28131da177e4SLinus Torvalds 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
28141da177e4SLinus Torvalds 
28151da177e4SLinus Torvalds 	bio->bi_vcnt = 1;
28161da177e4SLinus Torvalds 	bio->bi_idx = 0;
28171da177e4SLinus Torvalds 	bio->bi_size = bh->b_size;
28181da177e4SLinus Torvalds 
28191da177e4SLinus Torvalds 	bio->bi_end_io = end_bio_bh_io_sync;
28201da177e4SLinus Torvalds 	bio->bi_private = bh;
28211da177e4SLinus Torvalds 
28221da177e4SLinus Torvalds 	bio_get(bio);
28231da177e4SLinus Torvalds 	submit_bio(rw, bio);
28241da177e4SLinus Torvalds 
28251da177e4SLinus Torvalds 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
28261da177e4SLinus Torvalds 		ret = -EOPNOTSUPP;
28271da177e4SLinus Torvalds 
28281da177e4SLinus Torvalds 	bio_put(bio);
28291da177e4SLinus Torvalds 	return ret;
28301da177e4SLinus Torvalds }
28311da177e4SLinus Torvalds 
28321da177e4SLinus Torvalds /**
28331da177e4SLinus Torvalds  * ll_rw_block: low-level access to block devices (DEPRECATED)
2834a7662236SJan Kara  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
28351da177e4SLinus Torvalds  * @nr: number of &struct buffer_heads in the array
28361da177e4SLinus Torvalds  * @bhs: array of pointers to &struct buffer_head
28371da177e4SLinus Torvalds  *
2838a7662236SJan Kara  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2839a7662236SJan Kara  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2840a7662236SJan Kara  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2841a7662236SJan Kara  * are sent to disk. The fourth %READA option is described in the documentation
2842a7662236SJan Kara  * for generic_make_request() which ll_rw_block() calls.
28431da177e4SLinus Torvalds  *
28441da177e4SLinus Torvalds  * This function drops any buffer that it cannot get a lock on (with the
2845a7662236SJan Kara  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2846a7662236SJan Kara  * clean when doing a write request, and any buffer that appears to be
2847a7662236SJan Kara  * up-to-date when doing read request.  Further it marks as clean buffers that
2848a7662236SJan Kara  * are processed for writing (the buffer cache won't assume that they are
2849a7662236SJan Kara  * actually clean until the buffer gets unlocked).
28501da177e4SLinus Torvalds  *
28511da177e4SLinus Torvalds  * ll_rw_block sets b_end_io to simple completion handler that marks
28521da177e4SLinus Torvalds  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
28531da177e4SLinus Torvalds  * any waiters.
28541da177e4SLinus Torvalds  *
28551da177e4SLinus Torvalds  * All of the buffers must be for the same device, and must also be a
28561da177e4SLinus Torvalds  * multiple of the current approved size for the device.
28571da177e4SLinus Torvalds  */
28581da177e4SLinus Torvalds void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
28591da177e4SLinus Torvalds {
28601da177e4SLinus Torvalds 	int i;
28611da177e4SLinus Torvalds 
28621da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
28631da177e4SLinus Torvalds 		struct buffer_head *bh = bhs[i];
28641da177e4SLinus Torvalds 
2865a7662236SJan Kara 		if (rw == SWRITE)
2866a7662236SJan Kara 			lock_buffer(bh);
2867a7662236SJan Kara 		else if (test_set_buffer_locked(bh))
28681da177e4SLinus Torvalds 			continue;
28691da177e4SLinus Torvalds 
2870a7662236SJan Kara 		if (rw == WRITE || rw == SWRITE) {
28711da177e4SLinus Torvalds 			if (test_clear_buffer_dirty(bh)) {
287276c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_write_sync;
2873*e60e5c50SOGAWA Hirofumi 				get_bh(bh);
28741da177e4SLinus Torvalds 				submit_bh(WRITE, bh);
28751da177e4SLinus Torvalds 				continue;
28761da177e4SLinus Torvalds 			}
28771da177e4SLinus Torvalds 		} else {
28781da177e4SLinus Torvalds 			if (!buffer_uptodate(bh)) {
287976c3073aSakpm@osdl.org 				bh->b_end_io = end_buffer_read_sync;
2880*e60e5c50SOGAWA Hirofumi 				get_bh(bh);
28811da177e4SLinus Torvalds 				submit_bh(rw, bh);
28821da177e4SLinus Torvalds 				continue;
28831da177e4SLinus Torvalds 			}
28841da177e4SLinus Torvalds 		}
28851da177e4SLinus Torvalds 		unlock_buffer(bh);
28861da177e4SLinus Torvalds 	}
28871da177e4SLinus Torvalds }
28881da177e4SLinus Torvalds 
28891da177e4SLinus Torvalds /*
28901da177e4SLinus Torvalds  * For a data-integrity writeout, we need to wait upon any in-progress I/O
28911da177e4SLinus Torvalds  * and then start new I/O and then wait upon it.  The caller must have a ref on
28921da177e4SLinus Torvalds  * the buffer_head.
28931da177e4SLinus Torvalds  */
28941da177e4SLinus Torvalds int sync_dirty_buffer(struct buffer_head *bh)
28951da177e4SLinus Torvalds {
28961da177e4SLinus Torvalds 	int ret = 0;
28971da177e4SLinus Torvalds 
28981da177e4SLinus Torvalds 	WARN_ON(atomic_read(&bh->b_count) < 1);
28991da177e4SLinus Torvalds 	lock_buffer(bh);
29001da177e4SLinus Torvalds 	if (test_clear_buffer_dirty(bh)) {
29011da177e4SLinus Torvalds 		get_bh(bh);
29021da177e4SLinus Torvalds 		bh->b_end_io = end_buffer_write_sync;
29031da177e4SLinus Torvalds 		ret = submit_bh(WRITE, bh);
29041da177e4SLinus Torvalds 		wait_on_buffer(bh);
29051da177e4SLinus Torvalds 		if (buffer_eopnotsupp(bh)) {
29061da177e4SLinus Torvalds 			clear_buffer_eopnotsupp(bh);
29071da177e4SLinus Torvalds 			ret = -EOPNOTSUPP;
29081da177e4SLinus Torvalds 		}
29091da177e4SLinus Torvalds 		if (!ret && !buffer_uptodate(bh))
29101da177e4SLinus Torvalds 			ret = -EIO;
29111da177e4SLinus Torvalds 	} else {
29121da177e4SLinus Torvalds 		unlock_buffer(bh);
29131da177e4SLinus Torvalds 	}
29141da177e4SLinus Torvalds 	return ret;
29151da177e4SLinus Torvalds }
29161da177e4SLinus Torvalds 
29171da177e4SLinus Torvalds /*
29181da177e4SLinus Torvalds  * try_to_free_buffers() checks if all the buffers on this particular page
29191da177e4SLinus Torvalds  * are unused, and releases them if so.
29201da177e4SLinus Torvalds  *
29211da177e4SLinus Torvalds  * Exclusion against try_to_free_buffers may be obtained by either
29221da177e4SLinus Torvalds  * locking the page or by holding its mapping's private_lock.
29231da177e4SLinus Torvalds  *
29241da177e4SLinus Torvalds  * If the page is dirty but all the buffers are clean then we need to
29251da177e4SLinus Torvalds  * be sure to mark the page clean as well.  This is because the page
29261da177e4SLinus Torvalds  * may be against a block device, and a later reattachment of buffers
29271da177e4SLinus Torvalds  * to a dirty page will set *all* buffers dirty.  Which would corrupt
29281da177e4SLinus Torvalds  * filesystem data on the same device.
29291da177e4SLinus Torvalds  *
29301da177e4SLinus Torvalds  * The same applies to regular filesystem pages: if all the buffers are
29311da177e4SLinus Torvalds  * clean then we set the page clean and proceed.  To do that, we require
29321da177e4SLinus Torvalds  * total exclusion from __set_page_dirty_buffers().  That is obtained with
29331da177e4SLinus Torvalds  * private_lock.
29341da177e4SLinus Torvalds  *
29351da177e4SLinus Torvalds  * try_to_free_buffers() is non-blocking.
29361da177e4SLinus Torvalds  */
29371da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
29381da177e4SLinus Torvalds {
29391da177e4SLinus Torvalds 	return atomic_read(&bh->b_count) |
29401da177e4SLinus Torvalds 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
29411da177e4SLinus Torvalds }
29421da177e4SLinus Torvalds 
29431da177e4SLinus Torvalds static int
29441da177e4SLinus Torvalds drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
29451da177e4SLinus Torvalds {
29461da177e4SLinus Torvalds 	struct buffer_head *head = page_buffers(page);
29471da177e4SLinus Torvalds 	struct buffer_head *bh;
29481da177e4SLinus Torvalds 
29491da177e4SLinus Torvalds 	bh = head;
29501da177e4SLinus Torvalds 	do {
2951de7d5a3bSakpm@osdl.org 		if (buffer_write_io_error(bh) && page->mapping)
29521da177e4SLinus Torvalds 			set_bit(AS_EIO, &page->mapping->flags);
29531da177e4SLinus Torvalds 		if (buffer_busy(bh))
29541da177e4SLinus Torvalds 			goto failed;
29551da177e4SLinus Torvalds 		bh = bh->b_this_page;
29561da177e4SLinus Torvalds 	} while (bh != head);
29571da177e4SLinus Torvalds 
29581da177e4SLinus Torvalds 	do {
29591da177e4SLinus Torvalds 		struct buffer_head *next = bh->b_this_page;
29601da177e4SLinus Torvalds 
29611da177e4SLinus Torvalds 		if (!list_empty(&bh->b_assoc_buffers))
29621da177e4SLinus Torvalds 			__remove_assoc_queue(bh);
29631da177e4SLinus Torvalds 		bh = next;
29641da177e4SLinus Torvalds 	} while (bh != head);
29651da177e4SLinus Torvalds 	*buffers_to_free = head;
29661da177e4SLinus Torvalds 	__clear_page_buffers(page);
29671da177e4SLinus Torvalds 	return 1;
29681da177e4SLinus Torvalds failed:
29691da177e4SLinus Torvalds 	return 0;
29701da177e4SLinus Torvalds }
29711da177e4SLinus Torvalds 
29721da177e4SLinus Torvalds int try_to_free_buffers(struct page *page)
29731da177e4SLinus Torvalds {
29741da177e4SLinus Torvalds 	struct address_space * const mapping = page->mapping;
29751da177e4SLinus Torvalds 	struct buffer_head *buffers_to_free = NULL;
29761da177e4SLinus Torvalds 	int ret = 0;
29771da177e4SLinus Torvalds 
29781da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
29791da177e4SLinus Torvalds 	if (PageWriteback(page))
29801da177e4SLinus Torvalds 		return 0;
29811da177e4SLinus Torvalds 
29821da177e4SLinus Torvalds 	if (mapping == NULL) {		/* can this still happen? */
29831da177e4SLinus Torvalds 		ret = drop_buffers(page, &buffers_to_free);
29841da177e4SLinus Torvalds 		goto out;
29851da177e4SLinus Torvalds 	}
29861da177e4SLinus Torvalds 
29871da177e4SLinus Torvalds 	spin_lock(&mapping->private_lock);
29881da177e4SLinus Torvalds 	ret = drop_buffers(page, &buffers_to_free);
29891da177e4SLinus Torvalds 	if (ret) {
29901da177e4SLinus Torvalds 		/*
29911da177e4SLinus Torvalds 		 * If the filesystem writes its buffers by hand (eg ext3)
29921da177e4SLinus Torvalds 		 * then we can have clean buffers against a dirty page.  We
29931da177e4SLinus Torvalds 		 * clean the page here; otherwise later reattachment of buffers
29941da177e4SLinus Torvalds 		 * could encounter a non-uptodate page, which is unresolvable.
29951da177e4SLinus Torvalds 		 * This only applies in the rare case where try_to_free_buffers
29961da177e4SLinus Torvalds 		 * succeeds but the page is not freed.
29971da177e4SLinus Torvalds 		 */
29981da177e4SLinus Torvalds 		clear_page_dirty(page);
29991da177e4SLinus Torvalds 	}
30001da177e4SLinus Torvalds 	spin_unlock(&mapping->private_lock);
30011da177e4SLinus Torvalds out:
30021da177e4SLinus Torvalds 	if (buffers_to_free) {
30031da177e4SLinus Torvalds 		struct buffer_head *bh = buffers_to_free;
30041da177e4SLinus Torvalds 
30051da177e4SLinus Torvalds 		do {
30061da177e4SLinus Torvalds 			struct buffer_head *next = bh->b_this_page;
30071da177e4SLinus Torvalds 			free_buffer_head(bh);
30081da177e4SLinus Torvalds 			bh = next;
30091da177e4SLinus Torvalds 		} while (bh != buffers_to_free);
30101da177e4SLinus Torvalds 	}
30111da177e4SLinus Torvalds 	return ret;
30121da177e4SLinus Torvalds }
30131da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
30141da177e4SLinus Torvalds 
30151da177e4SLinus Torvalds int block_sync_page(struct page *page)
30161da177e4SLinus Torvalds {
30171da177e4SLinus Torvalds 	struct address_space *mapping;
30181da177e4SLinus Torvalds 
30191da177e4SLinus Torvalds 	smp_mb();
30201da177e4SLinus Torvalds 	mapping = page_mapping(page);
30211da177e4SLinus Torvalds 	if (mapping)
30221da177e4SLinus Torvalds 		blk_run_backing_dev(mapping->backing_dev_info, page);
30231da177e4SLinus Torvalds 	return 0;
30241da177e4SLinus Torvalds }
30251da177e4SLinus Torvalds 
30261da177e4SLinus Torvalds /*
30271da177e4SLinus Torvalds  * There are no bdflush tunables left.  But distributions are
30281da177e4SLinus Torvalds  * still running obsolete flush daemons, so we terminate them here.
30291da177e4SLinus Torvalds  *
30301da177e4SLinus Torvalds  * Use of bdflush() is deprecated and will be removed in a future kernel.
30311da177e4SLinus Torvalds  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
30321da177e4SLinus Torvalds  */
30331da177e4SLinus Torvalds asmlinkage long sys_bdflush(int func, long data)
30341da177e4SLinus Torvalds {
30351da177e4SLinus Torvalds 	static int msg_count;
30361da177e4SLinus Torvalds 
30371da177e4SLinus Torvalds 	if (!capable(CAP_SYS_ADMIN))
30381da177e4SLinus Torvalds 		return -EPERM;
30391da177e4SLinus Torvalds 
30401da177e4SLinus Torvalds 	if (msg_count < 5) {
30411da177e4SLinus Torvalds 		msg_count++;
30421da177e4SLinus Torvalds 		printk(KERN_INFO
30431da177e4SLinus Torvalds 			"warning: process `%s' used the obsolete bdflush"
30441da177e4SLinus Torvalds 			" system call\n", current->comm);
30451da177e4SLinus Torvalds 		printk(KERN_INFO "Fix your initscripts?\n");
30461da177e4SLinus Torvalds 	}
30471da177e4SLinus Torvalds 
30481da177e4SLinus Torvalds 	if (func == 1)
30491da177e4SLinus Torvalds 		do_exit(0);
30501da177e4SLinus Torvalds 	return 0;
30511da177e4SLinus Torvalds }
30521da177e4SLinus Torvalds 
30531da177e4SLinus Torvalds /*
3054e965f963SChristoph Lameter  * Migration function for pages with buffers. This function can only be used
3055e965f963SChristoph Lameter  * if the underlying filesystem guarantees that no other references to "page"
3056e965f963SChristoph Lameter  * exist.
3057e965f963SChristoph Lameter  */
3058e965f963SChristoph Lameter #ifdef CONFIG_MIGRATION
3059e965f963SChristoph Lameter int buffer_migrate_page(struct page *newpage, struct page *page)
3060e965f963SChristoph Lameter {
3061e965f963SChristoph Lameter 	struct address_space *mapping = page->mapping;
3062e965f963SChristoph Lameter 	struct buffer_head *bh, *head;
3063e965f963SChristoph Lameter 
3064e965f963SChristoph Lameter 	if (!mapping)
3065e965f963SChristoph Lameter 		return -EAGAIN;
3066e965f963SChristoph Lameter 
3067e965f963SChristoph Lameter 	if (!page_has_buffers(page))
3068e965f963SChristoph Lameter 		return migrate_page(newpage, page);
3069e965f963SChristoph Lameter 
3070e965f963SChristoph Lameter 	head = page_buffers(page);
3071e965f963SChristoph Lameter 
3072e965f963SChristoph Lameter 	if (migrate_page_remove_references(newpage, page, 3))
3073e965f963SChristoph Lameter 		return -EAGAIN;
3074e965f963SChristoph Lameter 
3075e965f963SChristoph Lameter 	bh = head;
3076e965f963SChristoph Lameter 	do {
3077e965f963SChristoph Lameter 		get_bh(bh);
3078e965f963SChristoph Lameter 		lock_buffer(bh);
3079e965f963SChristoph Lameter 		bh = bh->b_this_page;
3080e965f963SChristoph Lameter 
3081e965f963SChristoph Lameter 	} while (bh != head);
3082e965f963SChristoph Lameter 
3083e965f963SChristoph Lameter 	ClearPagePrivate(page);
3084e965f963SChristoph Lameter 	set_page_private(newpage, page_private(page));
3085e965f963SChristoph Lameter 	set_page_private(page, 0);
3086e965f963SChristoph Lameter 	put_page(page);
3087e965f963SChristoph Lameter 	get_page(newpage);
3088e965f963SChristoph Lameter 
3089e965f963SChristoph Lameter 	bh = head;
3090e965f963SChristoph Lameter 	do {
3091e965f963SChristoph Lameter 		set_bh_page(bh, newpage, bh_offset(bh));
3092e965f963SChristoph Lameter 		bh = bh->b_this_page;
3093e965f963SChristoph Lameter 
3094e965f963SChristoph Lameter 	} while (bh != head);
3095e965f963SChristoph Lameter 
3096e965f963SChristoph Lameter 	SetPagePrivate(newpage);
3097e965f963SChristoph Lameter 
3098e965f963SChristoph Lameter 	migrate_page_copy(newpage, page);
3099e965f963SChristoph Lameter 
3100e965f963SChristoph Lameter 	bh = head;
3101e965f963SChristoph Lameter 	do {
3102e965f963SChristoph Lameter 		unlock_buffer(bh);
3103e965f963SChristoph Lameter  		put_bh(bh);
3104e965f963SChristoph Lameter 		bh = bh->b_this_page;
3105e965f963SChristoph Lameter 
3106e965f963SChristoph Lameter 	} while (bh != head);
3107e965f963SChristoph Lameter 
3108e965f963SChristoph Lameter 	return 0;
3109e965f963SChristoph Lameter }
3110e965f963SChristoph Lameter EXPORT_SYMBOL(buffer_migrate_page);
3111e965f963SChristoph Lameter #endif
3112e965f963SChristoph Lameter 
3113e965f963SChristoph Lameter /*
31141da177e4SLinus Torvalds  * Buffer-head allocation
31151da177e4SLinus Torvalds  */
31161da177e4SLinus Torvalds static kmem_cache_t *bh_cachep;
31171da177e4SLinus Torvalds 
31181da177e4SLinus Torvalds /*
31191da177e4SLinus Torvalds  * Once the number of bh's in the machine exceeds this level, we start
31201da177e4SLinus Torvalds  * stripping them in writeback.
31211da177e4SLinus Torvalds  */
31221da177e4SLinus Torvalds static int max_buffer_heads;
31231da177e4SLinus Torvalds 
31241da177e4SLinus Torvalds int buffer_heads_over_limit;
31251da177e4SLinus Torvalds 
31261da177e4SLinus Torvalds struct bh_accounting {
31271da177e4SLinus Torvalds 	int nr;			/* Number of live bh's */
31281da177e4SLinus Torvalds 	int ratelimit;		/* Limit cacheline bouncing */
31291da177e4SLinus Torvalds };
31301da177e4SLinus Torvalds 
31311da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
31321da177e4SLinus Torvalds 
31331da177e4SLinus Torvalds static void recalc_bh_state(void)
31341da177e4SLinus Torvalds {
31351da177e4SLinus Torvalds 	int i;
31361da177e4SLinus Torvalds 	int tot = 0;
31371da177e4SLinus Torvalds 
31381da177e4SLinus Torvalds 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
31391da177e4SLinus Torvalds 		return;
31401da177e4SLinus Torvalds 	__get_cpu_var(bh_accounting).ratelimit = 0;
31411da177e4SLinus Torvalds 	for_each_cpu(i)
31421da177e4SLinus Torvalds 		tot += per_cpu(bh_accounting, i).nr;
31431da177e4SLinus Torvalds 	buffer_heads_over_limit = (tot > max_buffer_heads);
31441da177e4SLinus Torvalds }
31451da177e4SLinus Torvalds 
3146dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
31471da177e4SLinus Torvalds {
31481da177e4SLinus Torvalds 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
31491da177e4SLinus Torvalds 	if (ret) {
3150736c7b80SCoywolf Qi Hunt 		get_cpu_var(bh_accounting).nr++;
31511da177e4SLinus Torvalds 		recalc_bh_state();
3152736c7b80SCoywolf Qi Hunt 		put_cpu_var(bh_accounting);
31531da177e4SLinus Torvalds 	}
31541da177e4SLinus Torvalds 	return ret;
31551da177e4SLinus Torvalds }
31561da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
31571da177e4SLinus Torvalds 
31581da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
31591da177e4SLinus Torvalds {
31601da177e4SLinus Torvalds 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
31611da177e4SLinus Torvalds 	kmem_cache_free(bh_cachep, bh);
3162736c7b80SCoywolf Qi Hunt 	get_cpu_var(bh_accounting).nr--;
31631da177e4SLinus Torvalds 	recalc_bh_state();
3164736c7b80SCoywolf Qi Hunt 	put_cpu_var(bh_accounting);
31651da177e4SLinus Torvalds }
31661da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
31671da177e4SLinus Torvalds 
31681da177e4SLinus Torvalds static void
31691da177e4SLinus Torvalds init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
31701da177e4SLinus Torvalds {
31711da177e4SLinus Torvalds 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
31721da177e4SLinus Torvalds 			    SLAB_CTOR_CONSTRUCTOR) {
31731da177e4SLinus Torvalds 		struct buffer_head * bh = (struct buffer_head *)data;
31741da177e4SLinus Torvalds 
31751da177e4SLinus Torvalds 		memset(bh, 0, sizeof(*bh));
31761da177e4SLinus Torvalds 		INIT_LIST_HEAD(&bh->b_assoc_buffers);
31771da177e4SLinus Torvalds 	}
31781da177e4SLinus Torvalds }
31791da177e4SLinus Torvalds 
31801da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
31811da177e4SLinus Torvalds static void buffer_exit_cpu(int cpu)
31821da177e4SLinus Torvalds {
31831da177e4SLinus Torvalds 	int i;
31841da177e4SLinus Torvalds 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
31851da177e4SLinus Torvalds 
31861da177e4SLinus Torvalds 	for (i = 0; i < BH_LRU_SIZE; i++) {
31871da177e4SLinus Torvalds 		brelse(b->bhs[i]);
31881da177e4SLinus Torvalds 		b->bhs[i] = NULL;
31891da177e4SLinus Torvalds 	}
31901da177e4SLinus Torvalds }
31911da177e4SLinus Torvalds 
31921da177e4SLinus Torvalds static int buffer_cpu_notify(struct notifier_block *self,
31931da177e4SLinus Torvalds 			      unsigned long action, void *hcpu)
31941da177e4SLinus Torvalds {
31951da177e4SLinus Torvalds 	if (action == CPU_DEAD)
31961da177e4SLinus Torvalds 		buffer_exit_cpu((unsigned long)hcpu);
31971da177e4SLinus Torvalds 	return NOTIFY_OK;
31981da177e4SLinus Torvalds }
31991da177e4SLinus Torvalds #endif /* CONFIG_HOTPLUG_CPU */
32001da177e4SLinus Torvalds 
32011da177e4SLinus Torvalds void __init buffer_init(void)
32021da177e4SLinus Torvalds {
32031da177e4SLinus Torvalds 	int nrpages;
32041da177e4SLinus Torvalds 
32051da177e4SLinus Torvalds 	bh_cachep = kmem_cache_create("buffer_head",
32061da177e4SLinus Torvalds 			sizeof(struct buffer_head), 0,
3207e422fd2cSAndrea Arcangeli 			SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
32081da177e4SLinus Torvalds 
32091da177e4SLinus Torvalds 	/*
32101da177e4SLinus Torvalds 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
32111da177e4SLinus Torvalds 	 */
32121da177e4SLinus Torvalds 	nrpages = (nr_free_buffer_pages() * 10) / 100;
32131da177e4SLinus Torvalds 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
32141da177e4SLinus Torvalds 	hotcpu_notifier(buffer_cpu_notify, 0);
32151da177e4SLinus Torvalds }
32161da177e4SLinus Torvalds 
32171da177e4SLinus Torvalds EXPORT_SYMBOL(__bforget);
32181da177e4SLinus Torvalds EXPORT_SYMBOL(__brelse);
32191da177e4SLinus Torvalds EXPORT_SYMBOL(__wait_on_buffer);
32201da177e4SLinus Torvalds EXPORT_SYMBOL(block_commit_write);
32211da177e4SLinus Torvalds EXPORT_SYMBOL(block_prepare_write);
32221da177e4SLinus Torvalds EXPORT_SYMBOL(block_read_full_page);
32231da177e4SLinus Torvalds EXPORT_SYMBOL(block_sync_page);
32241da177e4SLinus Torvalds EXPORT_SYMBOL(block_truncate_page);
32251da177e4SLinus Torvalds EXPORT_SYMBOL(block_write_full_page);
32261da177e4SLinus Torvalds EXPORT_SYMBOL(cont_prepare_write);
32271da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_async_write);
32281da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_read_sync);
32291da177e4SLinus Torvalds EXPORT_SYMBOL(end_buffer_write_sync);
32301da177e4SLinus Torvalds EXPORT_SYMBOL(file_fsync);
32311da177e4SLinus Torvalds EXPORT_SYMBOL(fsync_bdev);
32321da177e4SLinus Torvalds EXPORT_SYMBOL(generic_block_bmap);
32331da177e4SLinus Torvalds EXPORT_SYMBOL(generic_commit_write);
32341da177e4SLinus Torvalds EXPORT_SYMBOL(generic_cont_expand);
323505eb0b51SOGAWA Hirofumi EXPORT_SYMBOL(generic_cont_expand_simple);
32361da177e4SLinus Torvalds EXPORT_SYMBOL(init_buffer);
32371da177e4SLinus Torvalds EXPORT_SYMBOL(invalidate_bdev);
32381da177e4SLinus Torvalds EXPORT_SYMBOL(ll_rw_block);
32391da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty);
32401da177e4SLinus Torvalds EXPORT_SYMBOL(submit_bh);
32411da177e4SLinus Torvalds EXPORT_SYMBOL(sync_dirty_buffer);
32421da177e4SLinus Torvalds EXPORT_SYMBOL(unlock_buffer);
3243