xref: /linux/fs/buffer.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
45 
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
48 
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 
51 inline void
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 {
54 	bh->b_end_io = handler;
55 	bh->b_private = private;
56 }
57 
58 static int sync_buffer(void *word)
59 {
60 	struct block_device *bd;
61 	struct buffer_head *bh
62 		= container_of(word, struct buffer_head, b_state);
63 
64 	smp_mb();
65 	bd = bh->b_bdev;
66 	if (bd)
67 		blk_run_address_space(bd->bd_inode->i_mapping);
68 	io_schedule();
69 	return 0;
70 }
71 
72 void fastcall __lock_buffer(struct buffer_head *bh)
73 {
74 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 							TASK_UNINTERRUPTIBLE);
76 }
77 EXPORT_SYMBOL(__lock_buffer);
78 
79 void fastcall unlock_buffer(struct buffer_head *bh)
80 {
81 	clear_buffer_locked(bh);
82 	smp_mb__after_clear_bit();
83 	wake_up_bit(&bh->b_state, BH_Lock);
84 }
85 
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95 
96 static void
97 __clear_page_buffers(struct page *page)
98 {
99 	ClearPagePrivate(page);
100 	set_page_private(page, 0);
101 	page_cache_release(page);
102 }
103 
104 static void buffer_io_error(struct buffer_head *bh)
105 {
106 	char b[BDEVNAME_SIZE];
107 
108 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 			bdevname(bh->b_bdev, b),
110 			(unsigned long long)bh->b_blocknr);
111 }
112 
113 /*
114  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
115  * unlock the buffer. This is what ll_rw_block uses too.
116  */
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
118 {
119 	if (uptodate) {
120 		set_buffer_uptodate(bh);
121 	} else {
122 		/* This happens, due to failed READA attempts. */
123 		clear_buffer_uptodate(bh);
124 	}
125 	unlock_buffer(bh);
126 	put_bh(bh);
127 }
128 
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 {
131 	char b[BDEVNAME_SIZE];
132 
133 	if (uptodate) {
134 		set_buffer_uptodate(bh);
135 	} else {
136 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 			buffer_io_error(bh);
138 			printk(KERN_WARNING "lost page write due to "
139 					"I/O error on %s\n",
140 				       bdevname(bh->b_bdev, b));
141 		}
142 		set_buffer_write_io_error(bh);
143 		clear_buffer_uptodate(bh);
144 	}
145 	unlock_buffer(bh);
146 	put_bh(bh);
147 }
148 
149 /*
150  * Write out and wait upon all the dirty data associated with a block
151  * device via its mapping.  Does not take the superblock lock.
152  */
153 int sync_blockdev(struct block_device *bdev)
154 {
155 	int ret = 0;
156 
157 	if (bdev)
158 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
159 	return ret;
160 }
161 EXPORT_SYMBOL(sync_blockdev);
162 
163 /*
164  * Write out and wait upon all dirty data associated with this
165  * superblock.  Filesystem data as well as the underlying block
166  * device.  Takes the superblock lock.
167  */
168 int fsync_super(struct super_block *sb)
169 {
170 	sync_inodes_sb(sb, 0);
171 	DQUOT_SYNC(sb);
172 	lock_super(sb);
173 	if (sb->s_dirt && sb->s_op->write_super)
174 		sb->s_op->write_super(sb);
175 	unlock_super(sb);
176 	if (sb->s_op->sync_fs)
177 		sb->s_op->sync_fs(sb, 1);
178 	sync_blockdev(sb->s_bdev);
179 	sync_inodes_sb(sb, 1);
180 
181 	return sync_blockdev(sb->s_bdev);
182 }
183 
184 /*
185  * Write out and wait upon all dirty data associated with this
186  * device.   Filesystem data as well as the underlying block
187  * device.  Takes the superblock lock.
188  */
189 int fsync_bdev(struct block_device *bdev)
190 {
191 	struct super_block *sb = get_super(bdev);
192 	if (sb) {
193 		int res = fsync_super(sb);
194 		drop_super(sb);
195 		return res;
196 	}
197 	return sync_blockdev(bdev);
198 }
199 
200 /**
201  * freeze_bdev  --  lock a filesystem and force it into a consistent state
202  * @bdev:	blockdevice to lock
203  *
204  * This takes the block device bd_mount_sem to make sure no new mounts
205  * happen on bdev until thaw_bdev() is called.
206  * If a superblock is found on this device, we take the s_umount semaphore
207  * on it to make sure nobody unmounts until the snapshot creation is done.
208  */
209 struct super_block *freeze_bdev(struct block_device *bdev)
210 {
211 	struct super_block *sb;
212 
213 	down(&bdev->bd_mount_sem);
214 	sb = get_super(bdev);
215 	if (sb && !(sb->s_flags & MS_RDONLY)) {
216 		sb->s_frozen = SB_FREEZE_WRITE;
217 		smp_wmb();
218 
219 		sync_inodes_sb(sb, 0);
220 		DQUOT_SYNC(sb);
221 
222 		lock_super(sb);
223 		if (sb->s_dirt && sb->s_op->write_super)
224 			sb->s_op->write_super(sb);
225 		unlock_super(sb);
226 
227 		if (sb->s_op->sync_fs)
228 			sb->s_op->sync_fs(sb, 1);
229 
230 		sync_blockdev(sb->s_bdev);
231 		sync_inodes_sb(sb, 1);
232 
233 		sb->s_frozen = SB_FREEZE_TRANS;
234 		smp_wmb();
235 
236 		sync_blockdev(sb->s_bdev);
237 
238 		if (sb->s_op->write_super_lockfs)
239 			sb->s_op->write_super_lockfs(sb);
240 	}
241 
242 	sync_blockdev(bdev);
243 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
244 }
245 EXPORT_SYMBOL(freeze_bdev);
246 
247 /**
248  * thaw_bdev  -- unlock filesystem
249  * @bdev:	blockdevice to unlock
250  * @sb:		associated superblock
251  *
252  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
253  */
254 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
255 {
256 	if (sb) {
257 		BUG_ON(sb->s_bdev != bdev);
258 
259 		if (sb->s_op->unlockfs)
260 			sb->s_op->unlockfs(sb);
261 		sb->s_frozen = SB_UNFROZEN;
262 		smp_wmb();
263 		wake_up(&sb->s_wait_unfrozen);
264 		drop_super(sb);
265 	}
266 
267 	up(&bdev->bd_mount_sem);
268 }
269 EXPORT_SYMBOL(thaw_bdev);
270 
271 /*
272  * sync everything.  Start out by waking pdflush, because that writes back
273  * all queues in parallel.
274  */
275 static void do_sync(unsigned long wait)
276 {
277 	wakeup_pdflush(0);
278 	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
279 	DQUOT_SYNC(NULL);
280 	sync_supers();		/* Write the superblocks */
281 	sync_filesystems(0);	/* Start syncing the filesystems */
282 	sync_filesystems(wait);	/* Waitingly sync the filesystems */
283 	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
284 	if (!wait)
285 		printk("Emergency Sync complete\n");
286 	if (unlikely(laptop_mode))
287 		laptop_sync_completion();
288 }
289 
290 asmlinkage long sys_sync(void)
291 {
292 	do_sync(1);
293 	return 0;
294 }
295 
296 void emergency_sync(void)
297 {
298 	pdflush_operation(do_sync, 0);
299 }
300 
301 /*
302  * Generic function to fsync a file.
303  *
304  * filp may be NULL if called via the msync of a vma.
305  */
306 
307 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
308 {
309 	struct inode * inode = dentry->d_inode;
310 	struct super_block * sb;
311 	int ret, err;
312 
313 	/* sync the inode to buffers */
314 	ret = write_inode_now(inode, 0);
315 
316 	/* sync the superblock to buffers */
317 	sb = inode->i_sb;
318 	lock_super(sb);
319 	if (sb->s_op->write_super)
320 		sb->s_op->write_super(sb);
321 	unlock_super(sb);
322 
323 	/* .. finally sync the buffers to disk */
324 	err = sync_blockdev(sb->s_bdev);
325 	if (!ret)
326 		ret = err;
327 	return ret;
328 }
329 
330 static long do_fsync(unsigned int fd, int datasync)
331 {
332 	struct file * file;
333 	struct address_space *mapping;
334 	int ret, err;
335 
336 	ret = -EBADF;
337 	file = fget(fd);
338 	if (!file)
339 		goto out;
340 
341 	ret = -EINVAL;
342 	if (!file->f_op || !file->f_op->fsync) {
343 		/* Why?  We can still call filemap_fdatawrite */
344 		goto out_putf;
345 	}
346 
347 	mapping = file->f_mapping;
348 
349 	current->flags |= PF_SYNCWRITE;
350 	ret = filemap_fdatawrite(mapping);
351 
352 	/*
353 	 * We need to protect against concurrent writers,
354 	 * which could cause livelocks in fsync_buffers_list
355 	 */
356 	mutex_lock(&mapping->host->i_mutex);
357 	err = file->f_op->fsync(file, file->f_dentry, datasync);
358 	if (!ret)
359 		ret = err;
360 	mutex_unlock(&mapping->host->i_mutex);
361 	err = filemap_fdatawait(mapping);
362 	if (!ret)
363 		ret = err;
364 	current->flags &= ~PF_SYNCWRITE;
365 
366 out_putf:
367 	fput(file);
368 out:
369 	return ret;
370 }
371 
372 asmlinkage long sys_fsync(unsigned int fd)
373 {
374 	return do_fsync(fd, 0);
375 }
376 
377 asmlinkage long sys_fdatasync(unsigned int fd)
378 {
379 	return do_fsync(fd, 1);
380 }
381 
382 /*
383  * Various filesystems appear to want __find_get_block to be non-blocking.
384  * But it's the page lock which protects the buffers.  To get around this,
385  * we get exclusion from try_to_free_buffers with the blockdev mapping's
386  * private_lock.
387  *
388  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
389  * may be quite high.  This code could TryLock the page, and if that
390  * succeeds, there is no need to take private_lock. (But if
391  * private_lock is contended then so is mapping->tree_lock).
392  */
393 static struct buffer_head *
394 __find_get_block_slow(struct block_device *bdev, sector_t block)
395 {
396 	struct inode *bd_inode = bdev->bd_inode;
397 	struct address_space *bd_mapping = bd_inode->i_mapping;
398 	struct buffer_head *ret = NULL;
399 	pgoff_t index;
400 	struct buffer_head *bh;
401 	struct buffer_head *head;
402 	struct page *page;
403 	int all_mapped = 1;
404 
405 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
406 	page = find_get_page(bd_mapping, index);
407 	if (!page)
408 		goto out;
409 
410 	spin_lock(&bd_mapping->private_lock);
411 	if (!page_has_buffers(page))
412 		goto out_unlock;
413 	head = page_buffers(page);
414 	bh = head;
415 	do {
416 		if (bh->b_blocknr == block) {
417 			ret = bh;
418 			get_bh(bh);
419 			goto out_unlock;
420 		}
421 		if (!buffer_mapped(bh))
422 			all_mapped = 0;
423 		bh = bh->b_this_page;
424 	} while (bh != head);
425 
426 	/* we might be here because some of the buffers on this page are
427 	 * not mapped.  This is due to various races between
428 	 * file io on the block device and getblk.  It gets dealt with
429 	 * elsewhere, don't buffer_error if we had some unmapped buffers
430 	 */
431 	if (all_mapped) {
432 		printk("__find_get_block_slow() failed. "
433 			"block=%llu, b_blocknr=%llu\n",
434 			(unsigned long long)block, (unsigned long long)bh->b_blocknr);
435 		printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
436 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
437 	}
438 out_unlock:
439 	spin_unlock(&bd_mapping->private_lock);
440 	page_cache_release(page);
441 out:
442 	return ret;
443 }
444 
445 /* If invalidate_buffers() will trash dirty buffers, it means some kind
446    of fs corruption is going on. Trashing dirty data always imply losing
447    information that was supposed to be just stored on the physical layer
448    by the user.
449 
450    Thus invalidate_buffers in general usage is not allwowed to trash
451    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
452    be preserved.  These buffers are simply skipped.
453 
454    We also skip buffers which are still in use.  For example this can
455    happen if a userspace program is reading the block device.
456 
457    NOTE: In the case where the user removed a removable-media-disk even if
458    there's still dirty data not synced on disk (due a bug in the device driver
459    or due an error of the user), by not destroying the dirty buffers we could
460    generate corruption also on the next media inserted, thus a parameter is
461    necessary to handle this case in the most safe way possible (trying
462    to not corrupt also the new disk inserted with the data belonging to
463    the old now corrupted disk). Also for the ramdisk the natural thing
464    to do in order to release the ramdisk memory is to destroy dirty buffers.
465 
466    These are two special cases. Normal usage imply the device driver
467    to issue a sync on the device (without waiting I/O completion) and
468    then an invalidate_buffers call that doesn't trash dirty buffers.
469 
470    For handling cache coherency with the blkdev pagecache the 'update' case
471    is been introduced. It is needed to re-read from disk any pinned
472    buffer. NOTE: re-reading from disk is destructive so we can do it only
473    when we assume nobody is changing the buffercache under our I/O and when
474    we think the disk contains more recent information than the buffercache.
475    The update == 1 pass marks the buffers we need to update, the update == 2
476    pass does the actual I/O. */
477 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
478 {
479 	invalidate_bh_lrus();
480 	/*
481 	 * FIXME: what about destroy_dirty_buffers?
482 	 * We really want to use invalidate_inode_pages2() for
483 	 * that, but not until that's cleaned up.
484 	 */
485 	invalidate_inode_pages(bdev->bd_inode->i_mapping);
486 }
487 
488 /*
489  * Kick pdflush then try to free up some ZONE_NORMAL memory.
490  */
491 static void free_more_memory(void)
492 {
493 	struct zone **zones;
494 	pg_data_t *pgdat;
495 
496 	wakeup_pdflush(1024);
497 	yield();
498 
499 	for_each_pgdat(pgdat) {
500 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
501 		if (*zones)
502 			try_to_free_pages(zones, GFP_NOFS);
503 	}
504 }
505 
506 /*
507  * I/O completion handler for block_read_full_page() - pages
508  * which come unlocked at the end of I/O.
509  */
510 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
511 {
512 	unsigned long flags;
513 	struct buffer_head *first;
514 	struct buffer_head *tmp;
515 	struct page *page;
516 	int page_uptodate = 1;
517 
518 	BUG_ON(!buffer_async_read(bh));
519 
520 	page = bh->b_page;
521 	if (uptodate) {
522 		set_buffer_uptodate(bh);
523 	} else {
524 		clear_buffer_uptodate(bh);
525 		if (printk_ratelimit())
526 			buffer_io_error(bh);
527 		SetPageError(page);
528 	}
529 
530 	/*
531 	 * Be _very_ careful from here on. Bad things can happen if
532 	 * two buffer heads end IO at almost the same time and both
533 	 * decide that the page is now completely done.
534 	 */
535 	first = page_buffers(page);
536 	local_irq_save(flags);
537 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
538 	clear_buffer_async_read(bh);
539 	unlock_buffer(bh);
540 	tmp = bh;
541 	do {
542 		if (!buffer_uptodate(tmp))
543 			page_uptodate = 0;
544 		if (buffer_async_read(tmp)) {
545 			BUG_ON(!buffer_locked(tmp));
546 			goto still_busy;
547 		}
548 		tmp = tmp->b_this_page;
549 	} while (tmp != bh);
550 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
551 	local_irq_restore(flags);
552 
553 	/*
554 	 * If none of the buffers had errors and they are all
555 	 * uptodate then we can set the page uptodate.
556 	 */
557 	if (page_uptodate && !PageError(page))
558 		SetPageUptodate(page);
559 	unlock_page(page);
560 	return;
561 
562 still_busy:
563 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
564 	local_irq_restore(flags);
565 	return;
566 }
567 
568 /*
569  * Completion handler for block_write_full_page() - pages which are unlocked
570  * during I/O, and which have PageWriteback cleared upon I/O completion.
571  */
572 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
573 {
574 	char b[BDEVNAME_SIZE];
575 	unsigned long flags;
576 	struct buffer_head *first;
577 	struct buffer_head *tmp;
578 	struct page *page;
579 
580 	BUG_ON(!buffer_async_write(bh));
581 
582 	page = bh->b_page;
583 	if (uptodate) {
584 		set_buffer_uptodate(bh);
585 	} else {
586 		if (printk_ratelimit()) {
587 			buffer_io_error(bh);
588 			printk(KERN_WARNING "lost page write due to "
589 					"I/O error on %s\n",
590 			       bdevname(bh->b_bdev, b));
591 		}
592 		set_bit(AS_EIO, &page->mapping->flags);
593 		clear_buffer_uptodate(bh);
594 		SetPageError(page);
595 	}
596 
597 	first = page_buffers(page);
598 	local_irq_save(flags);
599 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
600 
601 	clear_buffer_async_write(bh);
602 	unlock_buffer(bh);
603 	tmp = bh->b_this_page;
604 	while (tmp != bh) {
605 		if (buffer_async_write(tmp)) {
606 			BUG_ON(!buffer_locked(tmp));
607 			goto still_busy;
608 		}
609 		tmp = tmp->b_this_page;
610 	}
611 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
612 	local_irq_restore(flags);
613 	end_page_writeback(page);
614 	return;
615 
616 still_busy:
617 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
618 	local_irq_restore(flags);
619 	return;
620 }
621 
622 /*
623  * If a page's buffers are under async readin (end_buffer_async_read
624  * completion) then there is a possibility that another thread of
625  * control could lock one of the buffers after it has completed
626  * but while some of the other buffers have not completed.  This
627  * locked buffer would confuse end_buffer_async_read() into not unlocking
628  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
629  * that this buffer is not under async I/O.
630  *
631  * The page comes unlocked when it has no locked buffer_async buffers
632  * left.
633  *
634  * PageLocked prevents anyone starting new async I/O reads any of
635  * the buffers.
636  *
637  * PageWriteback is used to prevent simultaneous writeout of the same
638  * page.
639  *
640  * PageLocked prevents anyone from starting writeback of a page which is
641  * under read I/O (PageWriteback is only ever set against a locked page).
642  */
643 static void mark_buffer_async_read(struct buffer_head *bh)
644 {
645 	bh->b_end_io = end_buffer_async_read;
646 	set_buffer_async_read(bh);
647 }
648 
649 void mark_buffer_async_write(struct buffer_head *bh)
650 {
651 	bh->b_end_io = end_buffer_async_write;
652 	set_buffer_async_write(bh);
653 }
654 EXPORT_SYMBOL(mark_buffer_async_write);
655 
656 
657 /*
658  * fs/buffer.c contains helper functions for buffer-backed address space's
659  * fsync functions.  A common requirement for buffer-based filesystems is
660  * that certain data from the backing blockdev needs to be written out for
661  * a successful fsync().  For example, ext2 indirect blocks need to be
662  * written back and waited upon before fsync() returns.
663  *
664  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
665  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
666  * management of a list of dependent buffers at ->i_mapping->private_list.
667  *
668  * Locking is a little subtle: try_to_free_buffers() will remove buffers
669  * from their controlling inode's queue when they are being freed.  But
670  * try_to_free_buffers() will be operating against the *blockdev* mapping
671  * at the time, not against the S_ISREG file which depends on those buffers.
672  * So the locking for private_list is via the private_lock in the address_space
673  * which backs the buffers.  Which is different from the address_space
674  * against which the buffers are listed.  So for a particular address_space,
675  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
676  * mapping->private_list will always be protected by the backing blockdev's
677  * ->private_lock.
678  *
679  * Which introduces a requirement: all buffers on an address_space's
680  * ->private_list must be from the same address_space: the blockdev's.
681  *
682  * address_spaces which do not place buffers at ->private_list via these
683  * utility functions are free to use private_lock and private_list for
684  * whatever they want.  The only requirement is that list_empty(private_list)
685  * be true at clear_inode() time.
686  *
687  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
688  * filesystems should do that.  invalidate_inode_buffers() should just go
689  * BUG_ON(!list_empty).
690  *
691  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
692  * take an address_space, not an inode.  And it should be called
693  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
694  * queued up.
695  *
696  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
697  * list if it is already on a list.  Because if the buffer is on a list,
698  * it *must* already be on the right one.  If not, the filesystem is being
699  * silly.  This will save a ton of locking.  But first we have to ensure
700  * that buffers are taken *off* the old inode's list when they are freed
701  * (presumably in truncate).  That requires careful auditing of all
702  * filesystems (do it inside bforget()).  It could also be done by bringing
703  * b_inode back.
704  */
705 
706 /*
707  * The buffer's backing address_space's private_lock must be held
708  */
709 static inline void __remove_assoc_queue(struct buffer_head *bh)
710 {
711 	list_del_init(&bh->b_assoc_buffers);
712 }
713 
714 int inode_has_buffers(struct inode *inode)
715 {
716 	return !list_empty(&inode->i_data.private_list);
717 }
718 
719 /*
720  * osync is designed to support O_SYNC io.  It waits synchronously for
721  * all already-submitted IO to complete, but does not queue any new
722  * writes to the disk.
723  *
724  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
725  * you dirty the buffers, and then use osync_inode_buffers to wait for
726  * completion.  Any other dirty buffers which are not yet queued for
727  * write will not be flushed to disk by the osync.
728  */
729 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
730 {
731 	struct buffer_head *bh;
732 	struct list_head *p;
733 	int err = 0;
734 
735 	spin_lock(lock);
736 repeat:
737 	list_for_each_prev(p, list) {
738 		bh = BH_ENTRY(p);
739 		if (buffer_locked(bh)) {
740 			get_bh(bh);
741 			spin_unlock(lock);
742 			wait_on_buffer(bh);
743 			if (!buffer_uptodate(bh))
744 				err = -EIO;
745 			brelse(bh);
746 			spin_lock(lock);
747 			goto repeat;
748 		}
749 	}
750 	spin_unlock(lock);
751 	return err;
752 }
753 
754 /**
755  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
756  *                        buffers
757  * @mapping: the mapping which wants those buffers written
758  *
759  * Starts I/O against the buffers at mapping->private_list, and waits upon
760  * that I/O.
761  *
762  * Basically, this is a convenience function for fsync().
763  * @mapping is a file or directory which needs those buffers to be written for
764  * a successful fsync().
765  */
766 int sync_mapping_buffers(struct address_space *mapping)
767 {
768 	struct address_space *buffer_mapping = mapping->assoc_mapping;
769 
770 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
771 		return 0;
772 
773 	return fsync_buffers_list(&buffer_mapping->private_lock,
774 					&mapping->private_list);
775 }
776 EXPORT_SYMBOL(sync_mapping_buffers);
777 
778 /*
779  * Called when we've recently written block `bblock', and it is known that
780  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
781  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
782  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
783  */
784 void write_boundary_block(struct block_device *bdev,
785 			sector_t bblock, unsigned blocksize)
786 {
787 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
788 	if (bh) {
789 		if (buffer_dirty(bh))
790 			ll_rw_block(WRITE, 1, &bh);
791 		put_bh(bh);
792 	}
793 }
794 
795 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
796 {
797 	struct address_space *mapping = inode->i_mapping;
798 	struct address_space *buffer_mapping = bh->b_page->mapping;
799 
800 	mark_buffer_dirty(bh);
801 	if (!mapping->assoc_mapping) {
802 		mapping->assoc_mapping = buffer_mapping;
803 	} else {
804 		if (mapping->assoc_mapping != buffer_mapping)
805 			BUG();
806 	}
807 	if (list_empty(&bh->b_assoc_buffers)) {
808 		spin_lock(&buffer_mapping->private_lock);
809 		list_move_tail(&bh->b_assoc_buffers,
810 				&mapping->private_list);
811 		spin_unlock(&buffer_mapping->private_lock);
812 	}
813 }
814 EXPORT_SYMBOL(mark_buffer_dirty_inode);
815 
816 /*
817  * Add a page to the dirty page list.
818  *
819  * It is a sad fact of life that this function is called from several places
820  * deeply under spinlocking.  It may not sleep.
821  *
822  * If the page has buffers, the uptodate buffers are set dirty, to preserve
823  * dirty-state coherency between the page and the buffers.  It the page does
824  * not have buffers then when they are later attached they will all be set
825  * dirty.
826  *
827  * The buffers are dirtied before the page is dirtied.  There's a small race
828  * window in which a writepage caller may see the page cleanness but not the
829  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
830  * before the buffers, a concurrent writepage caller could clear the page dirty
831  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
832  * page on the dirty page list.
833  *
834  * We use private_lock to lock against try_to_free_buffers while using the
835  * page's buffer list.  Also use this to protect against clean buffers being
836  * added to the page after it was set dirty.
837  *
838  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
839  * address_space though.
840  */
841 int __set_page_dirty_buffers(struct page *page)
842 {
843 	struct address_space * const mapping = page->mapping;
844 
845 	spin_lock(&mapping->private_lock);
846 	if (page_has_buffers(page)) {
847 		struct buffer_head *head = page_buffers(page);
848 		struct buffer_head *bh = head;
849 
850 		do {
851 			set_buffer_dirty(bh);
852 			bh = bh->b_this_page;
853 		} while (bh != head);
854 	}
855 	spin_unlock(&mapping->private_lock);
856 
857 	if (!TestSetPageDirty(page)) {
858 		write_lock_irq(&mapping->tree_lock);
859 		if (page->mapping) {	/* Race with truncate? */
860 			if (mapping_cap_account_dirty(mapping))
861 				inc_page_state(nr_dirty);
862 			radix_tree_tag_set(&mapping->page_tree,
863 						page_index(page),
864 						PAGECACHE_TAG_DIRTY);
865 		}
866 		write_unlock_irq(&mapping->tree_lock);
867 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
868 	}
869 
870 	return 0;
871 }
872 EXPORT_SYMBOL(__set_page_dirty_buffers);
873 
874 /*
875  * Write out and wait upon a list of buffers.
876  *
877  * We have conflicting pressures: we want to make sure that all
878  * initially dirty buffers get waited on, but that any subsequently
879  * dirtied buffers don't.  After all, we don't want fsync to last
880  * forever if somebody is actively writing to the file.
881  *
882  * Do this in two main stages: first we copy dirty buffers to a
883  * temporary inode list, queueing the writes as we go.  Then we clean
884  * up, waiting for those writes to complete.
885  *
886  * During this second stage, any subsequent updates to the file may end
887  * up refiling the buffer on the original inode's dirty list again, so
888  * there is a chance we will end up with a buffer queued for write but
889  * not yet completed on that list.  So, as a final cleanup we go through
890  * the osync code to catch these locked, dirty buffers without requeuing
891  * any newly dirty buffers for write.
892  */
893 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
894 {
895 	struct buffer_head *bh;
896 	struct list_head tmp;
897 	int err = 0, err2;
898 
899 	INIT_LIST_HEAD(&tmp);
900 
901 	spin_lock(lock);
902 	while (!list_empty(list)) {
903 		bh = BH_ENTRY(list->next);
904 		list_del_init(&bh->b_assoc_buffers);
905 		if (buffer_dirty(bh) || buffer_locked(bh)) {
906 			list_add(&bh->b_assoc_buffers, &tmp);
907 			if (buffer_dirty(bh)) {
908 				get_bh(bh);
909 				spin_unlock(lock);
910 				/*
911 				 * Ensure any pending I/O completes so that
912 				 * ll_rw_block() actually writes the current
913 				 * contents - it is a noop if I/O is still in
914 				 * flight on potentially older contents.
915 				 */
916 				ll_rw_block(SWRITE, 1, &bh);
917 				brelse(bh);
918 				spin_lock(lock);
919 			}
920 		}
921 	}
922 
923 	while (!list_empty(&tmp)) {
924 		bh = BH_ENTRY(tmp.prev);
925 		__remove_assoc_queue(bh);
926 		get_bh(bh);
927 		spin_unlock(lock);
928 		wait_on_buffer(bh);
929 		if (!buffer_uptodate(bh))
930 			err = -EIO;
931 		brelse(bh);
932 		spin_lock(lock);
933 	}
934 
935 	spin_unlock(lock);
936 	err2 = osync_buffers_list(lock, list);
937 	if (err)
938 		return err;
939 	else
940 		return err2;
941 }
942 
943 /*
944  * Invalidate any and all dirty buffers on a given inode.  We are
945  * probably unmounting the fs, but that doesn't mean we have already
946  * done a sync().  Just drop the buffers from the inode list.
947  *
948  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
949  * assumes that all the buffers are against the blockdev.  Not true
950  * for reiserfs.
951  */
952 void invalidate_inode_buffers(struct inode *inode)
953 {
954 	if (inode_has_buffers(inode)) {
955 		struct address_space *mapping = &inode->i_data;
956 		struct list_head *list = &mapping->private_list;
957 		struct address_space *buffer_mapping = mapping->assoc_mapping;
958 
959 		spin_lock(&buffer_mapping->private_lock);
960 		while (!list_empty(list))
961 			__remove_assoc_queue(BH_ENTRY(list->next));
962 		spin_unlock(&buffer_mapping->private_lock);
963 	}
964 }
965 
966 /*
967  * Remove any clean buffers from the inode's buffer list.  This is called
968  * when we're trying to free the inode itself.  Those buffers can pin it.
969  *
970  * Returns true if all buffers were removed.
971  */
972 int remove_inode_buffers(struct inode *inode)
973 {
974 	int ret = 1;
975 
976 	if (inode_has_buffers(inode)) {
977 		struct address_space *mapping = &inode->i_data;
978 		struct list_head *list = &mapping->private_list;
979 		struct address_space *buffer_mapping = mapping->assoc_mapping;
980 
981 		spin_lock(&buffer_mapping->private_lock);
982 		while (!list_empty(list)) {
983 			struct buffer_head *bh = BH_ENTRY(list->next);
984 			if (buffer_dirty(bh)) {
985 				ret = 0;
986 				break;
987 			}
988 			__remove_assoc_queue(bh);
989 		}
990 		spin_unlock(&buffer_mapping->private_lock);
991 	}
992 	return ret;
993 }
994 
995 /*
996  * Create the appropriate buffers when given a page for data area and
997  * the size of each buffer.. Use the bh->b_this_page linked list to
998  * follow the buffers created.  Return NULL if unable to create more
999  * buffers.
1000  *
1001  * The retry flag is used to differentiate async IO (paging, swapping)
1002  * which may not fail from ordinary buffer allocations.
1003  */
1004 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1005 		int retry)
1006 {
1007 	struct buffer_head *bh, *head;
1008 	long offset;
1009 
1010 try_again:
1011 	head = NULL;
1012 	offset = PAGE_SIZE;
1013 	while ((offset -= size) >= 0) {
1014 		bh = alloc_buffer_head(GFP_NOFS);
1015 		if (!bh)
1016 			goto no_grow;
1017 
1018 		bh->b_bdev = NULL;
1019 		bh->b_this_page = head;
1020 		bh->b_blocknr = -1;
1021 		head = bh;
1022 
1023 		bh->b_state = 0;
1024 		atomic_set(&bh->b_count, 0);
1025 		bh->b_private = NULL;
1026 		bh->b_size = size;
1027 
1028 		/* Link the buffer to its page */
1029 		set_bh_page(bh, page, offset);
1030 
1031 		init_buffer(bh, NULL, NULL);
1032 	}
1033 	return head;
1034 /*
1035  * In case anything failed, we just free everything we got.
1036  */
1037 no_grow:
1038 	if (head) {
1039 		do {
1040 			bh = head;
1041 			head = head->b_this_page;
1042 			free_buffer_head(bh);
1043 		} while (head);
1044 	}
1045 
1046 	/*
1047 	 * Return failure for non-async IO requests.  Async IO requests
1048 	 * are not allowed to fail, so we have to wait until buffer heads
1049 	 * become available.  But we don't want tasks sleeping with
1050 	 * partially complete buffers, so all were released above.
1051 	 */
1052 	if (!retry)
1053 		return NULL;
1054 
1055 	/* We're _really_ low on memory. Now we just
1056 	 * wait for old buffer heads to become free due to
1057 	 * finishing IO.  Since this is an async request and
1058 	 * the reserve list is empty, we're sure there are
1059 	 * async buffer heads in use.
1060 	 */
1061 	free_more_memory();
1062 	goto try_again;
1063 }
1064 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1065 
1066 static inline void
1067 link_dev_buffers(struct page *page, struct buffer_head *head)
1068 {
1069 	struct buffer_head *bh, *tail;
1070 
1071 	bh = head;
1072 	do {
1073 		tail = bh;
1074 		bh = bh->b_this_page;
1075 	} while (bh);
1076 	tail->b_this_page = head;
1077 	attach_page_buffers(page, head);
1078 }
1079 
1080 /*
1081  * Initialise the state of a blockdev page's buffers.
1082  */
1083 static void
1084 init_page_buffers(struct page *page, struct block_device *bdev,
1085 			sector_t block, int size)
1086 {
1087 	struct buffer_head *head = page_buffers(page);
1088 	struct buffer_head *bh = head;
1089 	int uptodate = PageUptodate(page);
1090 
1091 	do {
1092 		if (!buffer_mapped(bh)) {
1093 			init_buffer(bh, NULL, NULL);
1094 			bh->b_bdev = bdev;
1095 			bh->b_blocknr = block;
1096 			if (uptodate)
1097 				set_buffer_uptodate(bh);
1098 			set_buffer_mapped(bh);
1099 		}
1100 		block++;
1101 		bh = bh->b_this_page;
1102 	} while (bh != head);
1103 }
1104 
1105 /*
1106  * Create the page-cache page that contains the requested block.
1107  *
1108  * This is user purely for blockdev mappings.
1109  */
1110 static struct page *
1111 grow_dev_page(struct block_device *bdev, sector_t block,
1112 		pgoff_t index, int size)
1113 {
1114 	struct inode *inode = bdev->bd_inode;
1115 	struct page *page;
1116 	struct buffer_head *bh;
1117 
1118 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1119 	if (!page)
1120 		return NULL;
1121 
1122 	if (!PageLocked(page))
1123 		BUG();
1124 
1125 	if (page_has_buffers(page)) {
1126 		bh = page_buffers(page);
1127 		if (bh->b_size == size) {
1128 			init_page_buffers(page, bdev, block, size);
1129 			return page;
1130 		}
1131 		if (!try_to_free_buffers(page))
1132 			goto failed;
1133 	}
1134 
1135 	/*
1136 	 * Allocate some buffers for this page
1137 	 */
1138 	bh = alloc_page_buffers(page, size, 0);
1139 	if (!bh)
1140 		goto failed;
1141 
1142 	/*
1143 	 * Link the page to the buffers and initialise them.  Take the
1144 	 * lock to be atomic wrt __find_get_block(), which does not
1145 	 * run under the page lock.
1146 	 */
1147 	spin_lock(&inode->i_mapping->private_lock);
1148 	link_dev_buffers(page, bh);
1149 	init_page_buffers(page, bdev, block, size);
1150 	spin_unlock(&inode->i_mapping->private_lock);
1151 	return page;
1152 
1153 failed:
1154 	BUG();
1155 	unlock_page(page);
1156 	page_cache_release(page);
1157 	return NULL;
1158 }
1159 
1160 /*
1161  * Create buffers for the specified block device block's page.  If
1162  * that page was dirty, the buffers are set dirty also.
1163  *
1164  * Except that's a bug.  Attaching dirty buffers to a dirty
1165  * blockdev's page can result in filesystem corruption, because
1166  * some of those buffers may be aliases of filesystem data.
1167  * grow_dev_page() will go BUG() if this happens.
1168  */
1169 static int
1170 grow_buffers(struct block_device *bdev, sector_t block, int size)
1171 {
1172 	struct page *page;
1173 	pgoff_t index;
1174 	int sizebits;
1175 
1176 	sizebits = -1;
1177 	do {
1178 		sizebits++;
1179 	} while ((size << sizebits) < PAGE_SIZE);
1180 
1181 	index = block >> sizebits;
1182 	block = index << sizebits;
1183 
1184 	/* Create a page with the proper size buffers.. */
1185 	page = grow_dev_page(bdev, block, index, size);
1186 	if (!page)
1187 		return 0;
1188 	unlock_page(page);
1189 	page_cache_release(page);
1190 	return 1;
1191 }
1192 
1193 static struct buffer_head *
1194 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1195 {
1196 	/* Size must be multiple of hard sectorsize */
1197 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1198 			(size < 512 || size > PAGE_SIZE))) {
1199 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1200 					size);
1201 		printk(KERN_ERR "hardsect size: %d\n",
1202 					bdev_hardsect_size(bdev));
1203 
1204 		dump_stack();
1205 		return NULL;
1206 	}
1207 
1208 	for (;;) {
1209 		struct buffer_head * bh;
1210 
1211 		bh = __find_get_block(bdev, block, size);
1212 		if (bh)
1213 			return bh;
1214 
1215 		if (!grow_buffers(bdev, block, size))
1216 			free_more_memory();
1217 	}
1218 }
1219 
1220 /*
1221  * The relationship between dirty buffers and dirty pages:
1222  *
1223  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1224  * the page is tagged dirty in its radix tree.
1225  *
1226  * At all times, the dirtiness of the buffers represents the dirtiness of
1227  * subsections of the page.  If the page has buffers, the page dirty bit is
1228  * merely a hint about the true dirty state.
1229  *
1230  * When a page is set dirty in its entirety, all its buffers are marked dirty
1231  * (if the page has buffers).
1232  *
1233  * When a buffer is marked dirty, its page is dirtied, but the page's other
1234  * buffers are not.
1235  *
1236  * Also.  When blockdev buffers are explicitly read with bread(), they
1237  * individually become uptodate.  But their backing page remains not
1238  * uptodate - even if all of its buffers are uptodate.  A subsequent
1239  * block_read_full_page() against that page will discover all the uptodate
1240  * buffers, will set the page uptodate and will perform no I/O.
1241  */
1242 
1243 /**
1244  * mark_buffer_dirty - mark a buffer_head as needing writeout
1245  * @bh: the buffer_head to mark dirty
1246  *
1247  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1248  * backing page dirty, then tag the page as dirty in its address_space's radix
1249  * tree and then attach the address_space's inode to its superblock's dirty
1250  * inode list.
1251  *
1252  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1253  * mapping->tree_lock and the global inode_lock.
1254  */
1255 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1256 {
1257 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1258 		__set_page_dirty_nobuffers(bh->b_page);
1259 }
1260 
1261 /*
1262  * Decrement a buffer_head's reference count.  If all buffers against a page
1263  * have zero reference count, are clean and unlocked, and if the page is clean
1264  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1265  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1266  * a page but it ends up not being freed, and buffers may later be reattached).
1267  */
1268 void __brelse(struct buffer_head * buf)
1269 {
1270 	if (atomic_read(&buf->b_count)) {
1271 		put_bh(buf);
1272 		return;
1273 	}
1274 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1275 	WARN_ON(1);
1276 }
1277 
1278 /*
1279  * bforget() is like brelse(), except it discards any
1280  * potentially dirty data.
1281  */
1282 void __bforget(struct buffer_head *bh)
1283 {
1284 	clear_buffer_dirty(bh);
1285 	if (!list_empty(&bh->b_assoc_buffers)) {
1286 		struct address_space *buffer_mapping = bh->b_page->mapping;
1287 
1288 		spin_lock(&buffer_mapping->private_lock);
1289 		list_del_init(&bh->b_assoc_buffers);
1290 		spin_unlock(&buffer_mapping->private_lock);
1291 	}
1292 	__brelse(bh);
1293 }
1294 
1295 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1296 {
1297 	lock_buffer(bh);
1298 	if (buffer_uptodate(bh)) {
1299 		unlock_buffer(bh);
1300 		return bh;
1301 	} else {
1302 		get_bh(bh);
1303 		bh->b_end_io = end_buffer_read_sync;
1304 		submit_bh(READ, bh);
1305 		wait_on_buffer(bh);
1306 		if (buffer_uptodate(bh))
1307 			return bh;
1308 	}
1309 	brelse(bh);
1310 	return NULL;
1311 }
1312 
1313 /*
1314  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1315  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1316  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1317  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1318  * CPU's LRUs at the same time.
1319  *
1320  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1321  * sb_find_get_block().
1322  *
1323  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1324  * a local interrupt disable for that.
1325  */
1326 
1327 #define BH_LRU_SIZE	8
1328 
1329 struct bh_lru {
1330 	struct buffer_head *bhs[BH_LRU_SIZE];
1331 };
1332 
1333 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1334 
1335 #ifdef CONFIG_SMP
1336 #define bh_lru_lock()	local_irq_disable()
1337 #define bh_lru_unlock()	local_irq_enable()
1338 #else
1339 #define bh_lru_lock()	preempt_disable()
1340 #define bh_lru_unlock()	preempt_enable()
1341 #endif
1342 
1343 static inline void check_irqs_on(void)
1344 {
1345 #ifdef irqs_disabled
1346 	BUG_ON(irqs_disabled());
1347 #endif
1348 }
1349 
1350 /*
1351  * The LRU management algorithm is dopey-but-simple.  Sorry.
1352  */
1353 static void bh_lru_install(struct buffer_head *bh)
1354 {
1355 	struct buffer_head *evictee = NULL;
1356 	struct bh_lru *lru;
1357 
1358 	check_irqs_on();
1359 	bh_lru_lock();
1360 	lru = &__get_cpu_var(bh_lrus);
1361 	if (lru->bhs[0] != bh) {
1362 		struct buffer_head *bhs[BH_LRU_SIZE];
1363 		int in;
1364 		int out = 0;
1365 
1366 		get_bh(bh);
1367 		bhs[out++] = bh;
1368 		for (in = 0; in < BH_LRU_SIZE; in++) {
1369 			struct buffer_head *bh2 = lru->bhs[in];
1370 
1371 			if (bh2 == bh) {
1372 				__brelse(bh2);
1373 			} else {
1374 				if (out >= BH_LRU_SIZE) {
1375 					BUG_ON(evictee != NULL);
1376 					evictee = bh2;
1377 				} else {
1378 					bhs[out++] = bh2;
1379 				}
1380 			}
1381 		}
1382 		while (out < BH_LRU_SIZE)
1383 			bhs[out++] = NULL;
1384 		memcpy(lru->bhs, bhs, sizeof(bhs));
1385 	}
1386 	bh_lru_unlock();
1387 
1388 	if (evictee)
1389 		__brelse(evictee);
1390 }
1391 
1392 /*
1393  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1394  */
1395 static struct buffer_head *
1396 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1397 {
1398 	struct buffer_head *ret = NULL;
1399 	struct bh_lru *lru;
1400 	int i;
1401 
1402 	check_irqs_on();
1403 	bh_lru_lock();
1404 	lru = &__get_cpu_var(bh_lrus);
1405 	for (i = 0; i < BH_LRU_SIZE; i++) {
1406 		struct buffer_head *bh = lru->bhs[i];
1407 
1408 		if (bh && bh->b_bdev == bdev &&
1409 				bh->b_blocknr == block && bh->b_size == size) {
1410 			if (i) {
1411 				while (i) {
1412 					lru->bhs[i] = lru->bhs[i - 1];
1413 					i--;
1414 				}
1415 				lru->bhs[0] = bh;
1416 			}
1417 			get_bh(bh);
1418 			ret = bh;
1419 			break;
1420 		}
1421 	}
1422 	bh_lru_unlock();
1423 	return ret;
1424 }
1425 
1426 /*
1427  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1428  * it in the LRU and mark it as accessed.  If it is not present then return
1429  * NULL
1430  */
1431 struct buffer_head *
1432 __find_get_block(struct block_device *bdev, sector_t block, int size)
1433 {
1434 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1435 
1436 	if (bh == NULL) {
1437 		bh = __find_get_block_slow(bdev, block);
1438 		if (bh)
1439 			bh_lru_install(bh);
1440 	}
1441 	if (bh)
1442 		touch_buffer(bh);
1443 	return bh;
1444 }
1445 EXPORT_SYMBOL(__find_get_block);
1446 
1447 /*
1448  * __getblk will locate (and, if necessary, create) the buffer_head
1449  * which corresponds to the passed block_device, block and size. The
1450  * returned buffer has its reference count incremented.
1451  *
1452  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1453  * illegal block number, __getblk() will happily return a buffer_head
1454  * which represents the non-existent block.  Very weird.
1455  *
1456  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1457  * attempt is failing.  FIXME, perhaps?
1458  */
1459 struct buffer_head *
1460 __getblk(struct block_device *bdev, sector_t block, int size)
1461 {
1462 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1463 
1464 	might_sleep();
1465 	if (bh == NULL)
1466 		bh = __getblk_slow(bdev, block, size);
1467 	return bh;
1468 }
1469 EXPORT_SYMBOL(__getblk);
1470 
1471 /*
1472  * Do async read-ahead on a buffer..
1473  */
1474 void __breadahead(struct block_device *bdev, sector_t block, int size)
1475 {
1476 	struct buffer_head *bh = __getblk(bdev, block, size);
1477 	if (likely(bh)) {
1478 		ll_rw_block(READA, 1, &bh);
1479 		brelse(bh);
1480 	}
1481 }
1482 EXPORT_SYMBOL(__breadahead);
1483 
1484 /**
1485  *  __bread() - reads a specified block and returns the bh
1486  *  @bdev: the block_device to read from
1487  *  @block: number of block
1488  *  @size: size (in bytes) to read
1489  *
1490  *  Reads a specified block, and returns buffer head that contains it.
1491  *  It returns NULL if the block was unreadable.
1492  */
1493 struct buffer_head *
1494 __bread(struct block_device *bdev, sector_t block, int size)
1495 {
1496 	struct buffer_head *bh = __getblk(bdev, block, size);
1497 
1498 	if (likely(bh) && !buffer_uptodate(bh))
1499 		bh = __bread_slow(bh);
1500 	return bh;
1501 }
1502 EXPORT_SYMBOL(__bread);
1503 
1504 /*
1505  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1506  * This doesn't race because it runs in each cpu either in irq
1507  * or with preempt disabled.
1508  */
1509 static void invalidate_bh_lru(void *arg)
1510 {
1511 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1512 	int i;
1513 
1514 	for (i = 0; i < BH_LRU_SIZE; i++) {
1515 		brelse(b->bhs[i]);
1516 		b->bhs[i] = NULL;
1517 	}
1518 	put_cpu_var(bh_lrus);
1519 }
1520 
1521 static void invalidate_bh_lrus(void)
1522 {
1523 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1524 }
1525 
1526 void set_bh_page(struct buffer_head *bh,
1527 		struct page *page, unsigned long offset)
1528 {
1529 	bh->b_page = page;
1530 	if (offset >= PAGE_SIZE)
1531 		BUG();
1532 	if (PageHighMem(page))
1533 		/*
1534 		 * This catches illegal uses and preserves the offset:
1535 		 */
1536 		bh->b_data = (char *)(0 + offset);
1537 	else
1538 		bh->b_data = page_address(page) + offset;
1539 }
1540 EXPORT_SYMBOL(set_bh_page);
1541 
1542 /*
1543  * Called when truncating a buffer on a page completely.
1544  */
1545 static void discard_buffer(struct buffer_head * bh)
1546 {
1547 	lock_buffer(bh);
1548 	clear_buffer_dirty(bh);
1549 	bh->b_bdev = NULL;
1550 	clear_buffer_mapped(bh);
1551 	clear_buffer_req(bh);
1552 	clear_buffer_new(bh);
1553 	clear_buffer_delay(bh);
1554 	unlock_buffer(bh);
1555 }
1556 
1557 /**
1558  * try_to_release_page() - release old fs-specific metadata on a page
1559  *
1560  * @page: the page which the kernel is trying to free
1561  * @gfp_mask: memory allocation flags (and I/O mode)
1562  *
1563  * The address_space is to try to release any data against the page
1564  * (presumably at page->private).  If the release was successful, return `1'.
1565  * Otherwise return zero.
1566  *
1567  * The @gfp_mask argument specifies whether I/O may be performed to release
1568  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1569  *
1570  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1571  */
1572 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1573 {
1574 	struct address_space * const mapping = page->mapping;
1575 
1576 	BUG_ON(!PageLocked(page));
1577 	if (PageWriteback(page))
1578 		return 0;
1579 
1580 	if (mapping && mapping->a_ops->releasepage)
1581 		return mapping->a_ops->releasepage(page, gfp_mask);
1582 	return try_to_free_buffers(page);
1583 }
1584 EXPORT_SYMBOL(try_to_release_page);
1585 
1586 /**
1587  * block_invalidatepage - invalidate part of all of a buffer-backed page
1588  *
1589  * @page: the page which is affected
1590  * @offset: the index of the truncation point
1591  *
1592  * block_invalidatepage() is called when all or part of the page has become
1593  * invalidatedby a truncate operation.
1594  *
1595  * block_invalidatepage() does not have to release all buffers, but it must
1596  * ensure that no dirty buffer is left outside @offset and that no I/O
1597  * is underway against any of the blocks which are outside the truncation
1598  * point.  Because the caller is about to free (and possibly reuse) those
1599  * blocks on-disk.
1600  */
1601 int block_invalidatepage(struct page *page, unsigned long offset)
1602 {
1603 	struct buffer_head *head, *bh, *next;
1604 	unsigned int curr_off = 0;
1605 	int ret = 1;
1606 
1607 	BUG_ON(!PageLocked(page));
1608 	if (!page_has_buffers(page))
1609 		goto out;
1610 
1611 	head = page_buffers(page);
1612 	bh = head;
1613 	do {
1614 		unsigned int next_off = curr_off + bh->b_size;
1615 		next = bh->b_this_page;
1616 
1617 		/*
1618 		 * is this block fully invalidated?
1619 		 */
1620 		if (offset <= curr_off)
1621 			discard_buffer(bh);
1622 		curr_off = next_off;
1623 		bh = next;
1624 	} while (bh != head);
1625 
1626 	/*
1627 	 * We release buffers only if the entire page is being invalidated.
1628 	 * The get_block cached value has been unconditionally invalidated,
1629 	 * so real IO is not possible anymore.
1630 	 */
1631 	if (offset == 0)
1632 		ret = try_to_release_page(page, 0);
1633 out:
1634 	return ret;
1635 }
1636 EXPORT_SYMBOL(block_invalidatepage);
1637 
1638 int do_invalidatepage(struct page *page, unsigned long offset)
1639 {
1640 	int (*invalidatepage)(struct page *, unsigned long);
1641 	invalidatepage = page->mapping->a_ops->invalidatepage;
1642 	if (invalidatepage == NULL)
1643 		invalidatepage = block_invalidatepage;
1644 	return (*invalidatepage)(page, offset);
1645 }
1646 
1647 /*
1648  * We attach and possibly dirty the buffers atomically wrt
1649  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1650  * is already excluded via the page lock.
1651  */
1652 void create_empty_buffers(struct page *page,
1653 			unsigned long blocksize, unsigned long b_state)
1654 {
1655 	struct buffer_head *bh, *head, *tail;
1656 
1657 	head = alloc_page_buffers(page, blocksize, 1);
1658 	bh = head;
1659 	do {
1660 		bh->b_state |= b_state;
1661 		tail = bh;
1662 		bh = bh->b_this_page;
1663 	} while (bh);
1664 	tail->b_this_page = head;
1665 
1666 	spin_lock(&page->mapping->private_lock);
1667 	if (PageUptodate(page) || PageDirty(page)) {
1668 		bh = head;
1669 		do {
1670 			if (PageDirty(page))
1671 				set_buffer_dirty(bh);
1672 			if (PageUptodate(page))
1673 				set_buffer_uptodate(bh);
1674 			bh = bh->b_this_page;
1675 		} while (bh != head);
1676 	}
1677 	attach_page_buffers(page, head);
1678 	spin_unlock(&page->mapping->private_lock);
1679 }
1680 EXPORT_SYMBOL(create_empty_buffers);
1681 
1682 /*
1683  * We are taking a block for data and we don't want any output from any
1684  * buffer-cache aliases starting from return from that function and
1685  * until the moment when something will explicitly mark the buffer
1686  * dirty (hopefully that will not happen until we will free that block ;-)
1687  * We don't even need to mark it not-uptodate - nobody can expect
1688  * anything from a newly allocated buffer anyway. We used to used
1689  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1690  * don't want to mark the alias unmapped, for example - it would confuse
1691  * anyone who might pick it with bread() afterwards...
1692  *
1693  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1694  * be writeout I/O going on against recently-freed buffers.  We don't
1695  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1696  * only if we really need to.  That happens here.
1697  */
1698 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1699 {
1700 	struct buffer_head *old_bh;
1701 
1702 	might_sleep();
1703 
1704 	old_bh = __find_get_block_slow(bdev, block);
1705 	if (old_bh) {
1706 		clear_buffer_dirty(old_bh);
1707 		wait_on_buffer(old_bh);
1708 		clear_buffer_req(old_bh);
1709 		__brelse(old_bh);
1710 	}
1711 }
1712 EXPORT_SYMBOL(unmap_underlying_metadata);
1713 
1714 /*
1715  * NOTE! All mapped/uptodate combinations are valid:
1716  *
1717  *	Mapped	Uptodate	Meaning
1718  *
1719  *	No	No		"unknown" - must do get_block()
1720  *	No	Yes		"hole" - zero-filled
1721  *	Yes	No		"allocated" - allocated on disk, not read in
1722  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1723  *
1724  * "Dirty" is valid only with the last case (mapped+uptodate).
1725  */
1726 
1727 /*
1728  * While block_write_full_page is writing back the dirty buffers under
1729  * the page lock, whoever dirtied the buffers may decide to clean them
1730  * again at any time.  We handle that by only looking at the buffer
1731  * state inside lock_buffer().
1732  *
1733  * If block_write_full_page() is called for regular writeback
1734  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1735  * locked buffer.   This only can happen if someone has written the buffer
1736  * directly, with submit_bh().  At the address_space level PageWriteback
1737  * prevents this contention from occurring.
1738  */
1739 static int __block_write_full_page(struct inode *inode, struct page *page,
1740 			get_block_t *get_block, struct writeback_control *wbc)
1741 {
1742 	int err;
1743 	sector_t block;
1744 	sector_t last_block;
1745 	struct buffer_head *bh, *head;
1746 	int nr_underway = 0;
1747 
1748 	BUG_ON(!PageLocked(page));
1749 
1750 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1751 
1752 	if (!page_has_buffers(page)) {
1753 		create_empty_buffers(page, 1 << inode->i_blkbits,
1754 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1755 	}
1756 
1757 	/*
1758 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1759 	 * here, and the (potentially unmapped) buffers may become dirty at
1760 	 * any time.  If a buffer becomes dirty here after we've inspected it
1761 	 * then we just miss that fact, and the page stays dirty.
1762 	 *
1763 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1764 	 * handle that here by just cleaning them.
1765 	 */
1766 
1767 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1768 	head = page_buffers(page);
1769 	bh = head;
1770 
1771 	/*
1772 	 * Get all the dirty buffers mapped to disk addresses and
1773 	 * handle any aliases from the underlying blockdev's mapping.
1774 	 */
1775 	do {
1776 		if (block > last_block) {
1777 			/*
1778 			 * mapped buffers outside i_size will occur, because
1779 			 * this page can be outside i_size when there is a
1780 			 * truncate in progress.
1781 			 */
1782 			/*
1783 			 * The buffer was zeroed by block_write_full_page()
1784 			 */
1785 			clear_buffer_dirty(bh);
1786 			set_buffer_uptodate(bh);
1787 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1788 			err = get_block(inode, block, bh, 1);
1789 			if (err)
1790 				goto recover;
1791 			if (buffer_new(bh)) {
1792 				/* blockdev mappings never come here */
1793 				clear_buffer_new(bh);
1794 				unmap_underlying_metadata(bh->b_bdev,
1795 							bh->b_blocknr);
1796 			}
1797 		}
1798 		bh = bh->b_this_page;
1799 		block++;
1800 	} while (bh != head);
1801 
1802 	do {
1803 		if (!buffer_mapped(bh))
1804 			continue;
1805 		/*
1806 		 * If it's a fully non-blocking write attempt and we cannot
1807 		 * lock the buffer then redirty the page.  Note that this can
1808 		 * potentially cause a busy-wait loop from pdflush and kswapd
1809 		 * activity, but those code paths have their own higher-level
1810 		 * throttling.
1811 		 */
1812 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1813 			lock_buffer(bh);
1814 		} else if (test_set_buffer_locked(bh)) {
1815 			redirty_page_for_writepage(wbc, page);
1816 			continue;
1817 		}
1818 		if (test_clear_buffer_dirty(bh)) {
1819 			mark_buffer_async_write(bh);
1820 		} else {
1821 			unlock_buffer(bh);
1822 		}
1823 	} while ((bh = bh->b_this_page) != head);
1824 
1825 	/*
1826 	 * The page and its buffers are protected by PageWriteback(), so we can
1827 	 * drop the bh refcounts early.
1828 	 */
1829 	BUG_ON(PageWriteback(page));
1830 	set_page_writeback(page);
1831 
1832 	do {
1833 		struct buffer_head *next = bh->b_this_page;
1834 		if (buffer_async_write(bh)) {
1835 			submit_bh(WRITE, bh);
1836 			nr_underway++;
1837 		}
1838 		bh = next;
1839 	} while (bh != head);
1840 	unlock_page(page);
1841 
1842 	err = 0;
1843 done:
1844 	if (nr_underway == 0) {
1845 		/*
1846 		 * The page was marked dirty, but the buffers were
1847 		 * clean.  Someone wrote them back by hand with
1848 		 * ll_rw_block/submit_bh.  A rare case.
1849 		 */
1850 		int uptodate = 1;
1851 		do {
1852 			if (!buffer_uptodate(bh)) {
1853 				uptodate = 0;
1854 				break;
1855 			}
1856 			bh = bh->b_this_page;
1857 		} while (bh != head);
1858 		if (uptodate)
1859 			SetPageUptodate(page);
1860 		end_page_writeback(page);
1861 		/*
1862 		 * The page and buffer_heads can be released at any time from
1863 		 * here on.
1864 		 */
1865 		wbc->pages_skipped++;	/* We didn't write this page */
1866 	}
1867 	return err;
1868 
1869 recover:
1870 	/*
1871 	 * ENOSPC, or some other error.  We may already have added some
1872 	 * blocks to the file, so we need to write these out to avoid
1873 	 * exposing stale data.
1874 	 * The page is currently locked and not marked for writeback
1875 	 */
1876 	bh = head;
1877 	/* Recovery: lock and submit the mapped buffers */
1878 	do {
1879 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1880 			lock_buffer(bh);
1881 			mark_buffer_async_write(bh);
1882 		} else {
1883 			/*
1884 			 * The buffer may have been set dirty during
1885 			 * attachment to a dirty page.
1886 			 */
1887 			clear_buffer_dirty(bh);
1888 		}
1889 	} while ((bh = bh->b_this_page) != head);
1890 	SetPageError(page);
1891 	BUG_ON(PageWriteback(page));
1892 	set_page_writeback(page);
1893 	unlock_page(page);
1894 	do {
1895 		struct buffer_head *next = bh->b_this_page;
1896 		if (buffer_async_write(bh)) {
1897 			clear_buffer_dirty(bh);
1898 			submit_bh(WRITE, bh);
1899 			nr_underway++;
1900 		}
1901 		bh = next;
1902 	} while (bh != head);
1903 	goto done;
1904 }
1905 
1906 static int __block_prepare_write(struct inode *inode, struct page *page,
1907 		unsigned from, unsigned to, get_block_t *get_block)
1908 {
1909 	unsigned block_start, block_end;
1910 	sector_t block;
1911 	int err = 0;
1912 	unsigned blocksize, bbits;
1913 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1914 
1915 	BUG_ON(!PageLocked(page));
1916 	BUG_ON(from > PAGE_CACHE_SIZE);
1917 	BUG_ON(to > PAGE_CACHE_SIZE);
1918 	BUG_ON(from > to);
1919 
1920 	blocksize = 1 << inode->i_blkbits;
1921 	if (!page_has_buffers(page))
1922 		create_empty_buffers(page, blocksize, 0);
1923 	head = page_buffers(page);
1924 
1925 	bbits = inode->i_blkbits;
1926 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1927 
1928 	for(bh = head, block_start = 0; bh != head || !block_start;
1929 	    block++, block_start=block_end, bh = bh->b_this_page) {
1930 		block_end = block_start + blocksize;
1931 		if (block_end <= from || block_start >= to) {
1932 			if (PageUptodate(page)) {
1933 				if (!buffer_uptodate(bh))
1934 					set_buffer_uptodate(bh);
1935 			}
1936 			continue;
1937 		}
1938 		if (buffer_new(bh))
1939 			clear_buffer_new(bh);
1940 		if (!buffer_mapped(bh)) {
1941 			err = get_block(inode, block, bh, 1);
1942 			if (err)
1943 				break;
1944 			if (buffer_new(bh)) {
1945 				unmap_underlying_metadata(bh->b_bdev,
1946 							bh->b_blocknr);
1947 				if (PageUptodate(page)) {
1948 					set_buffer_uptodate(bh);
1949 					continue;
1950 				}
1951 				if (block_end > to || block_start < from) {
1952 					void *kaddr;
1953 
1954 					kaddr = kmap_atomic(page, KM_USER0);
1955 					if (block_end > to)
1956 						memset(kaddr+to, 0,
1957 							block_end-to);
1958 					if (block_start < from)
1959 						memset(kaddr+block_start,
1960 							0, from-block_start);
1961 					flush_dcache_page(page);
1962 					kunmap_atomic(kaddr, KM_USER0);
1963 				}
1964 				continue;
1965 			}
1966 		}
1967 		if (PageUptodate(page)) {
1968 			if (!buffer_uptodate(bh))
1969 				set_buffer_uptodate(bh);
1970 			continue;
1971 		}
1972 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1973 		     (block_start < from || block_end > to)) {
1974 			ll_rw_block(READ, 1, &bh);
1975 			*wait_bh++=bh;
1976 		}
1977 	}
1978 	/*
1979 	 * If we issued read requests - let them complete.
1980 	 */
1981 	while(wait_bh > wait) {
1982 		wait_on_buffer(*--wait_bh);
1983 		if (!buffer_uptodate(*wait_bh))
1984 			err = -EIO;
1985 	}
1986 	if (!err) {
1987 		bh = head;
1988 		do {
1989 			if (buffer_new(bh))
1990 				clear_buffer_new(bh);
1991 		} while ((bh = bh->b_this_page) != head);
1992 		return 0;
1993 	}
1994 	/* Error case: */
1995 	/*
1996 	 * Zero out any newly allocated blocks to avoid exposing stale
1997 	 * data.  If BH_New is set, we know that the block was newly
1998 	 * allocated in the above loop.
1999 	 */
2000 	bh = head;
2001 	block_start = 0;
2002 	do {
2003 		block_end = block_start+blocksize;
2004 		if (block_end <= from)
2005 			goto next_bh;
2006 		if (block_start >= to)
2007 			break;
2008 		if (buffer_new(bh)) {
2009 			void *kaddr;
2010 
2011 			clear_buffer_new(bh);
2012 			kaddr = kmap_atomic(page, KM_USER0);
2013 			memset(kaddr+block_start, 0, bh->b_size);
2014 			kunmap_atomic(kaddr, KM_USER0);
2015 			set_buffer_uptodate(bh);
2016 			mark_buffer_dirty(bh);
2017 		}
2018 next_bh:
2019 		block_start = block_end;
2020 		bh = bh->b_this_page;
2021 	} while (bh != head);
2022 	return err;
2023 }
2024 
2025 static int __block_commit_write(struct inode *inode, struct page *page,
2026 		unsigned from, unsigned to)
2027 {
2028 	unsigned block_start, block_end;
2029 	int partial = 0;
2030 	unsigned blocksize;
2031 	struct buffer_head *bh, *head;
2032 
2033 	blocksize = 1 << inode->i_blkbits;
2034 
2035 	for(bh = head = page_buffers(page), block_start = 0;
2036 	    bh != head || !block_start;
2037 	    block_start=block_end, bh = bh->b_this_page) {
2038 		block_end = block_start + blocksize;
2039 		if (block_end <= from || block_start >= to) {
2040 			if (!buffer_uptodate(bh))
2041 				partial = 1;
2042 		} else {
2043 			set_buffer_uptodate(bh);
2044 			mark_buffer_dirty(bh);
2045 		}
2046 	}
2047 
2048 	/*
2049 	 * If this is a partial write which happened to make all buffers
2050 	 * uptodate then we can optimize away a bogus readpage() for
2051 	 * the next read(). Here we 'discover' whether the page went
2052 	 * uptodate as a result of this (potentially partial) write.
2053 	 */
2054 	if (!partial)
2055 		SetPageUptodate(page);
2056 	return 0;
2057 }
2058 
2059 /*
2060  * Generic "read page" function for block devices that have the normal
2061  * get_block functionality. This is most of the block device filesystems.
2062  * Reads the page asynchronously --- the unlock_buffer() and
2063  * set/clear_buffer_uptodate() functions propagate buffer state into the
2064  * page struct once IO has completed.
2065  */
2066 int block_read_full_page(struct page *page, get_block_t *get_block)
2067 {
2068 	struct inode *inode = page->mapping->host;
2069 	sector_t iblock, lblock;
2070 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2071 	unsigned int blocksize;
2072 	int nr, i;
2073 	int fully_mapped = 1;
2074 
2075 	BUG_ON(!PageLocked(page));
2076 	blocksize = 1 << inode->i_blkbits;
2077 	if (!page_has_buffers(page))
2078 		create_empty_buffers(page, blocksize, 0);
2079 	head = page_buffers(page);
2080 
2081 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2082 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2083 	bh = head;
2084 	nr = 0;
2085 	i = 0;
2086 
2087 	do {
2088 		if (buffer_uptodate(bh))
2089 			continue;
2090 
2091 		if (!buffer_mapped(bh)) {
2092 			int err = 0;
2093 
2094 			fully_mapped = 0;
2095 			if (iblock < lblock) {
2096 				err = get_block(inode, iblock, bh, 0);
2097 				if (err)
2098 					SetPageError(page);
2099 			}
2100 			if (!buffer_mapped(bh)) {
2101 				void *kaddr = kmap_atomic(page, KM_USER0);
2102 				memset(kaddr + i * blocksize, 0, blocksize);
2103 				flush_dcache_page(page);
2104 				kunmap_atomic(kaddr, KM_USER0);
2105 				if (!err)
2106 					set_buffer_uptodate(bh);
2107 				continue;
2108 			}
2109 			/*
2110 			 * get_block() might have updated the buffer
2111 			 * synchronously
2112 			 */
2113 			if (buffer_uptodate(bh))
2114 				continue;
2115 		}
2116 		arr[nr++] = bh;
2117 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2118 
2119 	if (fully_mapped)
2120 		SetPageMappedToDisk(page);
2121 
2122 	if (!nr) {
2123 		/*
2124 		 * All buffers are uptodate - we can set the page uptodate
2125 		 * as well. But not if get_block() returned an error.
2126 		 */
2127 		if (!PageError(page))
2128 			SetPageUptodate(page);
2129 		unlock_page(page);
2130 		return 0;
2131 	}
2132 
2133 	/* Stage two: lock the buffers */
2134 	for (i = 0; i < nr; i++) {
2135 		bh = arr[i];
2136 		lock_buffer(bh);
2137 		mark_buffer_async_read(bh);
2138 	}
2139 
2140 	/*
2141 	 * Stage 3: start the IO.  Check for uptodateness
2142 	 * inside the buffer lock in case another process reading
2143 	 * the underlying blockdev brought it uptodate (the sct fix).
2144 	 */
2145 	for (i = 0; i < nr; i++) {
2146 		bh = arr[i];
2147 		if (buffer_uptodate(bh))
2148 			end_buffer_async_read(bh, 1);
2149 		else
2150 			submit_bh(READ, bh);
2151 	}
2152 	return 0;
2153 }
2154 
2155 /* utility function for filesystems that need to do work on expanding
2156  * truncates.  Uses prepare/commit_write to allow the filesystem to
2157  * deal with the hole.
2158  */
2159 static int __generic_cont_expand(struct inode *inode, loff_t size,
2160 				 pgoff_t index, unsigned int offset)
2161 {
2162 	struct address_space *mapping = inode->i_mapping;
2163 	struct page *page;
2164 	unsigned long limit;
2165 	int err;
2166 
2167 	err = -EFBIG;
2168         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2169 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2170 		send_sig(SIGXFSZ, current, 0);
2171 		goto out;
2172 	}
2173 	if (size > inode->i_sb->s_maxbytes)
2174 		goto out;
2175 
2176 	err = -ENOMEM;
2177 	page = grab_cache_page(mapping, index);
2178 	if (!page)
2179 		goto out;
2180 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2181 	if (err) {
2182 		/*
2183 		 * ->prepare_write() may have instantiated a few blocks
2184 		 * outside i_size.  Trim these off again.
2185 		 */
2186 		unlock_page(page);
2187 		page_cache_release(page);
2188 		vmtruncate(inode, inode->i_size);
2189 		goto out;
2190 	}
2191 
2192 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2193 
2194 	unlock_page(page);
2195 	page_cache_release(page);
2196 	if (err > 0)
2197 		err = 0;
2198 out:
2199 	return err;
2200 }
2201 
2202 int generic_cont_expand(struct inode *inode, loff_t size)
2203 {
2204 	pgoff_t index;
2205 	unsigned int offset;
2206 
2207 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2208 
2209 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
2210 	** skip the prepare.  make sure we never send an offset for the start
2211 	** of a block
2212 	*/
2213 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2214 		/* caller must handle this extra byte. */
2215 		offset++;
2216 	}
2217 	index = size >> PAGE_CACHE_SHIFT;
2218 
2219 	return __generic_cont_expand(inode, size, index, offset);
2220 }
2221 
2222 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2223 {
2224 	loff_t pos = size - 1;
2225 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2226 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2227 
2228 	/* prepare/commit_write can handle even if from==to==start of block. */
2229 	return __generic_cont_expand(inode, size, index, offset);
2230 }
2231 
2232 /*
2233  * For moronic filesystems that do not allow holes in file.
2234  * We may have to extend the file.
2235  */
2236 
2237 int cont_prepare_write(struct page *page, unsigned offset,
2238 		unsigned to, get_block_t *get_block, loff_t *bytes)
2239 {
2240 	struct address_space *mapping = page->mapping;
2241 	struct inode *inode = mapping->host;
2242 	struct page *new_page;
2243 	pgoff_t pgpos;
2244 	long status;
2245 	unsigned zerofrom;
2246 	unsigned blocksize = 1 << inode->i_blkbits;
2247 	void *kaddr;
2248 
2249 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2250 		status = -ENOMEM;
2251 		new_page = grab_cache_page(mapping, pgpos);
2252 		if (!new_page)
2253 			goto out;
2254 		/* we might sleep */
2255 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2256 			unlock_page(new_page);
2257 			page_cache_release(new_page);
2258 			continue;
2259 		}
2260 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2261 		if (zerofrom & (blocksize-1)) {
2262 			*bytes |= (blocksize-1);
2263 			(*bytes)++;
2264 		}
2265 		status = __block_prepare_write(inode, new_page, zerofrom,
2266 						PAGE_CACHE_SIZE, get_block);
2267 		if (status)
2268 			goto out_unmap;
2269 		kaddr = kmap_atomic(new_page, KM_USER0);
2270 		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2271 		flush_dcache_page(new_page);
2272 		kunmap_atomic(kaddr, KM_USER0);
2273 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2274 		unlock_page(new_page);
2275 		page_cache_release(new_page);
2276 	}
2277 
2278 	if (page->index < pgpos) {
2279 		/* completely inside the area */
2280 		zerofrom = offset;
2281 	} else {
2282 		/* page covers the boundary, find the boundary offset */
2283 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2284 
2285 		/* if we will expand the thing last block will be filled */
2286 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
2287 			*bytes |= (blocksize-1);
2288 			(*bytes)++;
2289 		}
2290 
2291 		/* starting below the boundary? Nothing to zero out */
2292 		if (offset <= zerofrom)
2293 			zerofrom = offset;
2294 	}
2295 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2296 	if (status)
2297 		goto out1;
2298 	if (zerofrom < offset) {
2299 		kaddr = kmap_atomic(page, KM_USER0);
2300 		memset(kaddr+zerofrom, 0, offset-zerofrom);
2301 		flush_dcache_page(page);
2302 		kunmap_atomic(kaddr, KM_USER0);
2303 		__block_commit_write(inode, page, zerofrom, offset);
2304 	}
2305 	return 0;
2306 out1:
2307 	ClearPageUptodate(page);
2308 	return status;
2309 
2310 out_unmap:
2311 	ClearPageUptodate(new_page);
2312 	unlock_page(new_page);
2313 	page_cache_release(new_page);
2314 out:
2315 	return status;
2316 }
2317 
2318 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2319 			get_block_t *get_block)
2320 {
2321 	struct inode *inode = page->mapping->host;
2322 	int err = __block_prepare_write(inode, page, from, to, get_block);
2323 	if (err)
2324 		ClearPageUptodate(page);
2325 	return err;
2326 }
2327 
2328 int block_commit_write(struct page *page, unsigned from, unsigned to)
2329 {
2330 	struct inode *inode = page->mapping->host;
2331 	__block_commit_write(inode,page,from,to);
2332 	return 0;
2333 }
2334 
2335 int generic_commit_write(struct file *file, struct page *page,
2336 		unsigned from, unsigned to)
2337 {
2338 	struct inode *inode = page->mapping->host;
2339 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2340 	__block_commit_write(inode,page,from,to);
2341 	/*
2342 	 * No need to use i_size_read() here, the i_size
2343 	 * cannot change under us because we hold i_mutex.
2344 	 */
2345 	if (pos > inode->i_size) {
2346 		i_size_write(inode, pos);
2347 		mark_inode_dirty(inode);
2348 	}
2349 	return 0;
2350 }
2351 
2352 
2353 /*
2354  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2355  * immediately, while under the page lock.  So it needs a special end_io
2356  * handler which does not touch the bh after unlocking it.
2357  *
2358  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2359  * a race there is benign: unlock_buffer() only use the bh's address for
2360  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2361  * itself.
2362  */
2363 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2364 {
2365 	if (uptodate) {
2366 		set_buffer_uptodate(bh);
2367 	} else {
2368 		/* This happens, due to failed READA attempts. */
2369 		clear_buffer_uptodate(bh);
2370 	}
2371 	unlock_buffer(bh);
2372 }
2373 
2374 /*
2375  * On entry, the page is fully not uptodate.
2376  * On exit the page is fully uptodate in the areas outside (from,to)
2377  */
2378 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2379 			get_block_t *get_block)
2380 {
2381 	struct inode *inode = page->mapping->host;
2382 	const unsigned blkbits = inode->i_blkbits;
2383 	const unsigned blocksize = 1 << blkbits;
2384 	struct buffer_head map_bh;
2385 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2386 	unsigned block_in_page;
2387 	unsigned block_start;
2388 	sector_t block_in_file;
2389 	char *kaddr;
2390 	int nr_reads = 0;
2391 	int i;
2392 	int ret = 0;
2393 	int is_mapped_to_disk = 1;
2394 	int dirtied_it = 0;
2395 
2396 	if (PageMappedToDisk(page))
2397 		return 0;
2398 
2399 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2400 	map_bh.b_page = page;
2401 
2402 	/*
2403 	 * We loop across all blocks in the page, whether or not they are
2404 	 * part of the affected region.  This is so we can discover if the
2405 	 * page is fully mapped-to-disk.
2406 	 */
2407 	for (block_start = 0, block_in_page = 0;
2408 		  block_start < PAGE_CACHE_SIZE;
2409 		  block_in_page++, block_start += blocksize) {
2410 		unsigned block_end = block_start + blocksize;
2411 		int create;
2412 
2413 		map_bh.b_state = 0;
2414 		create = 1;
2415 		if (block_start >= to)
2416 			create = 0;
2417 		ret = get_block(inode, block_in_file + block_in_page,
2418 					&map_bh, create);
2419 		if (ret)
2420 			goto failed;
2421 		if (!buffer_mapped(&map_bh))
2422 			is_mapped_to_disk = 0;
2423 		if (buffer_new(&map_bh))
2424 			unmap_underlying_metadata(map_bh.b_bdev,
2425 							map_bh.b_blocknr);
2426 		if (PageUptodate(page))
2427 			continue;
2428 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2429 			kaddr = kmap_atomic(page, KM_USER0);
2430 			if (block_start < from) {
2431 				memset(kaddr+block_start, 0, from-block_start);
2432 				dirtied_it = 1;
2433 			}
2434 			if (block_end > to) {
2435 				memset(kaddr + to, 0, block_end - to);
2436 				dirtied_it = 1;
2437 			}
2438 			flush_dcache_page(page);
2439 			kunmap_atomic(kaddr, KM_USER0);
2440 			continue;
2441 		}
2442 		if (buffer_uptodate(&map_bh))
2443 			continue;	/* reiserfs does this */
2444 		if (block_start < from || block_end > to) {
2445 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2446 
2447 			if (!bh) {
2448 				ret = -ENOMEM;
2449 				goto failed;
2450 			}
2451 			bh->b_state = map_bh.b_state;
2452 			atomic_set(&bh->b_count, 0);
2453 			bh->b_this_page = NULL;
2454 			bh->b_page = page;
2455 			bh->b_blocknr = map_bh.b_blocknr;
2456 			bh->b_size = blocksize;
2457 			bh->b_data = (char *)(long)block_start;
2458 			bh->b_bdev = map_bh.b_bdev;
2459 			bh->b_private = NULL;
2460 			read_bh[nr_reads++] = bh;
2461 		}
2462 	}
2463 
2464 	if (nr_reads) {
2465 		struct buffer_head *bh;
2466 
2467 		/*
2468 		 * The page is locked, so these buffers are protected from
2469 		 * any VM or truncate activity.  Hence we don't need to care
2470 		 * for the buffer_head refcounts.
2471 		 */
2472 		for (i = 0; i < nr_reads; i++) {
2473 			bh = read_bh[i];
2474 			lock_buffer(bh);
2475 			bh->b_end_io = end_buffer_read_nobh;
2476 			submit_bh(READ, bh);
2477 		}
2478 		for (i = 0; i < nr_reads; i++) {
2479 			bh = read_bh[i];
2480 			wait_on_buffer(bh);
2481 			if (!buffer_uptodate(bh))
2482 				ret = -EIO;
2483 			free_buffer_head(bh);
2484 			read_bh[i] = NULL;
2485 		}
2486 		if (ret)
2487 			goto failed;
2488 	}
2489 
2490 	if (is_mapped_to_disk)
2491 		SetPageMappedToDisk(page);
2492 	SetPageUptodate(page);
2493 
2494 	/*
2495 	 * Setting the page dirty here isn't necessary for the prepare_write
2496 	 * function - commit_write will do that.  But if/when this function is
2497 	 * used within the pagefault handler to ensure that all mmapped pages
2498 	 * have backing space in the filesystem, we will need to dirty the page
2499 	 * if its contents were altered.
2500 	 */
2501 	if (dirtied_it)
2502 		set_page_dirty(page);
2503 
2504 	return 0;
2505 
2506 failed:
2507 	for (i = 0; i < nr_reads; i++) {
2508 		if (read_bh[i])
2509 			free_buffer_head(read_bh[i]);
2510 	}
2511 
2512 	/*
2513 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
2514 	 * so we'll later zero out any blocks which _were_ allocated.
2515 	 */
2516 	kaddr = kmap_atomic(page, KM_USER0);
2517 	memset(kaddr, 0, PAGE_CACHE_SIZE);
2518 	kunmap_atomic(kaddr, KM_USER0);
2519 	SetPageUptodate(page);
2520 	set_page_dirty(page);
2521 	return ret;
2522 }
2523 EXPORT_SYMBOL(nobh_prepare_write);
2524 
2525 int nobh_commit_write(struct file *file, struct page *page,
2526 		unsigned from, unsigned to)
2527 {
2528 	struct inode *inode = page->mapping->host;
2529 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2530 
2531 	set_page_dirty(page);
2532 	if (pos > inode->i_size) {
2533 		i_size_write(inode, pos);
2534 		mark_inode_dirty(inode);
2535 	}
2536 	return 0;
2537 }
2538 EXPORT_SYMBOL(nobh_commit_write);
2539 
2540 /*
2541  * nobh_writepage() - based on block_full_write_page() except
2542  * that it tries to operate without attaching bufferheads to
2543  * the page.
2544  */
2545 int nobh_writepage(struct page *page, get_block_t *get_block,
2546 			struct writeback_control *wbc)
2547 {
2548 	struct inode * const inode = page->mapping->host;
2549 	loff_t i_size = i_size_read(inode);
2550 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2551 	unsigned offset;
2552 	void *kaddr;
2553 	int ret;
2554 
2555 	/* Is the page fully inside i_size? */
2556 	if (page->index < end_index)
2557 		goto out;
2558 
2559 	/* Is the page fully outside i_size? (truncate in progress) */
2560 	offset = i_size & (PAGE_CACHE_SIZE-1);
2561 	if (page->index >= end_index+1 || !offset) {
2562 		/*
2563 		 * The page may have dirty, unmapped buffers.  For example,
2564 		 * they may have been added in ext3_writepage().  Make them
2565 		 * freeable here, so the page does not leak.
2566 		 */
2567 #if 0
2568 		/* Not really sure about this  - do we need this ? */
2569 		if (page->mapping->a_ops->invalidatepage)
2570 			page->mapping->a_ops->invalidatepage(page, offset);
2571 #endif
2572 		unlock_page(page);
2573 		return 0; /* don't care */
2574 	}
2575 
2576 	/*
2577 	 * The page straddles i_size.  It must be zeroed out on each and every
2578 	 * writepage invocation because it may be mmapped.  "A file is mapped
2579 	 * in multiples of the page size.  For a file that is not a multiple of
2580 	 * the  page size, the remaining memory is zeroed when mapped, and
2581 	 * writes to that region are not written out to the file."
2582 	 */
2583 	kaddr = kmap_atomic(page, KM_USER0);
2584 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2585 	flush_dcache_page(page);
2586 	kunmap_atomic(kaddr, KM_USER0);
2587 out:
2588 	ret = mpage_writepage(page, get_block, wbc);
2589 	if (ret == -EAGAIN)
2590 		ret = __block_write_full_page(inode, page, get_block, wbc);
2591 	return ret;
2592 }
2593 EXPORT_SYMBOL(nobh_writepage);
2594 
2595 /*
2596  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2597  */
2598 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2599 {
2600 	struct inode *inode = mapping->host;
2601 	unsigned blocksize = 1 << inode->i_blkbits;
2602 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2603 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2604 	unsigned to;
2605 	struct page *page;
2606 	struct address_space_operations *a_ops = mapping->a_ops;
2607 	char *kaddr;
2608 	int ret = 0;
2609 
2610 	if ((offset & (blocksize - 1)) == 0)
2611 		goto out;
2612 
2613 	ret = -ENOMEM;
2614 	page = grab_cache_page(mapping, index);
2615 	if (!page)
2616 		goto out;
2617 
2618 	to = (offset + blocksize) & ~(blocksize - 1);
2619 	ret = a_ops->prepare_write(NULL, page, offset, to);
2620 	if (ret == 0) {
2621 		kaddr = kmap_atomic(page, KM_USER0);
2622 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2623 		flush_dcache_page(page);
2624 		kunmap_atomic(kaddr, KM_USER0);
2625 		set_page_dirty(page);
2626 	}
2627 	unlock_page(page);
2628 	page_cache_release(page);
2629 out:
2630 	return ret;
2631 }
2632 EXPORT_SYMBOL(nobh_truncate_page);
2633 
2634 int block_truncate_page(struct address_space *mapping,
2635 			loff_t from, get_block_t *get_block)
2636 {
2637 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2638 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2639 	unsigned blocksize;
2640 	sector_t iblock;
2641 	unsigned length, pos;
2642 	struct inode *inode = mapping->host;
2643 	struct page *page;
2644 	struct buffer_head *bh;
2645 	void *kaddr;
2646 	int err;
2647 
2648 	blocksize = 1 << inode->i_blkbits;
2649 	length = offset & (blocksize - 1);
2650 
2651 	/* Block boundary? Nothing to do */
2652 	if (!length)
2653 		return 0;
2654 
2655 	length = blocksize - length;
2656 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2657 
2658 	page = grab_cache_page(mapping, index);
2659 	err = -ENOMEM;
2660 	if (!page)
2661 		goto out;
2662 
2663 	if (!page_has_buffers(page))
2664 		create_empty_buffers(page, blocksize, 0);
2665 
2666 	/* Find the buffer that contains "offset" */
2667 	bh = page_buffers(page);
2668 	pos = blocksize;
2669 	while (offset >= pos) {
2670 		bh = bh->b_this_page;
2671 		iblock++;
2672 		pos += blocksize;
2673 	}
2674 
2675 	err = 0;
2676 	if (!buffer_mapped(bh)) {
2677 		err = get_block(inode, iblock, bh, 0);
2678 		if (err)
2679 			goto unlock;
2680 		/* unmapped? It's a hole - nothing to do */
2681 		if (!buffer_mapped(bh))
2682 			goto unlock;
2683 	}
2684 
2685 	/* Ok, it's mapped. Make sure it's up-to-date */
2686 	if (PageUptodate(page))
2687 		set_buffer_uptodate(bh);
2688 
2689 	if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2690 		err = -EIO;
2691 		ll_rw_block(READ, 1, &bh);
2692 		wait_on_buffer(bh);
2693 		/* Uhhuh. Read error. Complain and punt. */
2694 		if (!buffer_uptodate(bh))
2695 			goto unlock;
2696 	}
2697 
2698 	kaddr = kmap_atomic(page, KM_USER0);
2699 	memset(kaddr + offset, 0, length);
2700 	flush_dcache_page(page);
2701 	kunmap_atomic(kaddr, KM_USER0);
2702 
2703 	mark_buffer_dirty(bh);
2704 	err = 0;
2705 
2706 unlock:
2707 	unlock_page(page);
2708 	page_cache_release(page);
2709 out:
2710 	return err;
2711 }
2712 
2713 /*
2714  * The generic ->writepage function for buffer-backed address_spaces
2715  */
2716 int block_write_full_page(struct page *page, get_block_t *get_block,
2717 			struct writeback_control *wbc)
2718 {
2719 	struct inode * const inode = page->mapping->host;
2720 	loff_t i_size = i_size_read(inode);
2721 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2722 	unsigned offset;
2723 	void *kaddr;
2724 
2725 	/* Is the page fully inside i_size? */
2726 	if (page->index < end_index)
2727 		return __block_write_full_page(inode, page, get_block, wbc);
2728 
2729 	/* Is the page fully outside i_size? (truncate in progress) */
2730 	offset = i_size & (PAGE_CACHE_SIZE-1);
2731 	if (page->index >= end_index+1 || !offset) {
2732 		/*
2733 		 * The page may have dirty, unmapped buffers.  For example,
2734 		 * they may have been added in ext3_writepage().  Make them
2735 		 * freeable here, so the page does not leak.
2736 		 */
2737 		do_invalidatepage(page, 0);
2738 		unlock_page(page);
2739 		return 0; /* don't care */
2740 	}
2741 
2742 	/*
2743 	 * The page straddles i_size.  It must be zeroed out on each and every
2744 	 * writepage invokation because it may be mmapped.  "A file is mapped
2745 	 * in multiples of the page size.  For a file that is not a multiple of
2746 	 * the  page size, the remaining memory is zeroed when mapped, and
2747 	 * writes to that region are not written out to the file."
2748 	 */
2749 	kaddr = kmap_atomic(page, KM_USER0);
2750 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2751 	flush_dcache_page(page);
2752 	kunmap_atomic(kaddr, KM_USER0);
2753 	return __block_write_full_page(inode, page, get_block, wbc);
2754 }
2755 
2756 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2757 			    get_block_t *get_block)
2758 {
2759 	struct buffer_head tmp;
2760 	struct inode *inode = mapping->host;
2761 	tmp.b_state = 0;
2762 	tmp.b_blocknr = 0;
2763 	get_block(inode, block, &tmp, 0);
2764 	return tmp.b_blocknr;
2765 }
2766 
2767 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2768 {
2769 	struct buffer_head *bh = bio->bi_private;
2770 
2771 	if (bio->bi_size)
2772 		return 1;
2773 
2774 	if (err == -EOPNOTSUPP) {
2775 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2776 		set_bit(BH_Eopnotsupp, &bh->b_state);
2777 	}
2778 
2779 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2780 	bio_put(bio);
2781 	return 0;
2782 }
2783 
2784 int submit_bh(int rw, struct buffer_head * bh)
2785 {
2786 	struct bio *bio;
2787 	int ret = 0;
2788 
2789 	BUG_ON(!buffer_locked(bh));
2790 	BUG_ON(!buffer_mapped(bh));
2791 	BUG_ON(!bh->b_end_io);
2792 
2793 	if (buffer_ordered(bh) && (rw == WRITE))
2794 		rw = WRITE_BARRIER;
2795 
2796 	/*
2797 	 * Only clear out a write error when rewriting, should this
2798 	 * include WRITE_SYNC as well?
2799 	 */
2800 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2801 		clear_buffer_write_io_error(bh);
2802 
2803 	/*
2804 	 * from here on down, it's all bio -- do the initial mapping,
2805 	 * submit_bio -> generic_make_request may further map this bio around
2806 	 */
2807 	bio = bio_alloc(GFP_NOIO, 1);
2808 
2809 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2810 	bio->bi_bdev = bh->b_bdev;
2811 	bio->bi_io_vec[0].bv_page = bh->b_page;
2812 	bio->bi_io_vec[0].bv_len = bh->b_size;
2813 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2814 
2815 	bio->bi_vcnt = 1;
2816 	bio->bi_idx = 0;
2817 	bio->bi_size = bh->b_size;
2818 
2819 	bio->bi_end_io = end_bio_bh_io_sync;
2820 	bio->bi_private = bh;
2821 
2822 	bio_get(bio);
2823 	submit_bio(rw, bio);
2824 
2825 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2826 		ret = -EOPNOTSUPP;
2827 
2828 	bio_put(bio);
2829 	return ret;
2830 }
2831 
2832 /**
2833  * ll_rw_block: low-level access to block devices (DEPRECATED)
2834  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2835  * @nr: number of &struct buffer_heads in the array
2836  * @bhs: array of pointers to &struct buffer_head
2837  *
2838  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2839  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2840  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2841  * are sent to disk. The fourth %READA option is described in the documentation
2842  * for generic_make_request() which ll_rw_block() calls.
2843  *
2844  * This function drops any buffer that it cannot get a lock on (with the
2845  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2846  * clean when doing a write request, and any buffer that appears to be
2847  * up-to-date when doing read request.  Further it marks as clean buffers that
2848  * are processed for writing (the buffer cache won't assume that they are
2849  * actually clean until the buffer gets unlocked).
2850  *
2851  * ll_rw_block sets b_end_io to simple completion handler that marks
2852  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2853  * any waiters.
2854  *
2855  * All of the buffers must be for the same device, and must also be a
2856  * multiple of the current approved size for the device.
2857  */
2858 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2859 {
2860 	int i;
2861 
2862 	for (i = 0; i < nr; i++) {
2863 		struct buffer_head *bh = bhs[i];
2864 
2865 		if (rw == SWRITE)
2866 			lock_buffer(bh);
2867 		else if (test_set_buffer_locked(bh))
2868 			continue;
2869 
2870 		if (rw == WRITE || rw == SWRITE) {
2871 			if (test_clear_buffer_dirty(bh)) {
2872 				bh->b_end_io = end_buffer_write_sync;
2873 				get_bh(bh);
2874 				submit_bh(WRITE, bh);
2875 				continue;
2876 			}
2877 		} else {
2878 			if (!buffer_uptodate(bh)) {
2879 				bh->b_end_io = end_buffer_read_sync;
2880 				get_bh(bh);
2881 				submit_bh(rw, bh);
2882 				continue;
2883 			}
2884 		}
2885 		unlock_buffer(bh);
2886 	}
2887 }
2888 
2889 /*
2890  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2891  * and then start new I/O and then wait upon it.  The caller must have a ref on
2892  * the buffer_head.
2893  */
2894 int sync_dirty_buffer(struct buffer_head *bh)
2895 {
2896 	int ret = 0;
2897 
2898 	WARN_ON(atomic_read(&bh->b_count) < 1);
2899 	lock_buffer(bh);
2900 	if (test_clear_buffer_dirty(bh)) {
2901 		get_bh(bh);
2902 		bh->b_end_io = end_buffer_write_sync;
2903 		ret = submit_bh(WRITE, bh);
2904 		wait_on_buffer(bh);
2905 		if (buffer_eopnotsupp(bh)) {
2906 			clear_buffer_eopnotsupp(bh);
2907 			ret = -EOPNOTSUPP;
2908 		}
2909 		if (!ret && !buffer_uptodate(bh))
2910 			ret = -EIO;
2911 	} else {
2912 		unlock_buffer(bh);
2913 	}
2914 	return ret;
2915 }
2916 
2917 /*
2918  * try_to_free_buffers() checks if all the buffers on this particular page
2919  * are unused, and releases them if so.
2920  *
2921  * Exclusion against try_to_free_buffers may be obtained by either
2922  * locking the page or by holding its mapping's private_lock.
2923  *
2924  * If the page is dirty but all the buffers are clean then we need to
2925  * be sure to mark the page clean as well.  This is because the page
2926  * may be against a block device, and a later reattachment of buffers
2927  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2928  * filesystem data on the same device.
2929  *
2930  * The same applies to regular filesystem pages: if all the buffers are
2931  * clean then we set the page clean and proceed.  To do that, we require
2932  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2933  * private_lock.
2934  *
2935  * try_to_free_buffers() is non-blocking.
2936  */
2937 static inline int buffer_busy(struct buffer_head *bh)
2938 {
2939 	return atomic_read(&bh->b_count) |
2940 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2941 }
2942 
2943 static int
2944 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2945 {
2946 	struct buffer_head *head = page_buffers(page);
2947 	struct buffer_head *bh;
2948 
2949 	bh = head;
2950 	do {
2951 		if (buffer_write_io_error(bh) && page->mapping)
2952 			set_bit(AS_EIO, &page->mapping->flags);
2953 		if (buffer_busy(bh))
2954 			goto failed;
2955 		bh = bh->b_this_page;
2956 	} while (bh != head);
2957 
2958 	do {
2959 		struct buffer_head *next = bh->b_this_page;
2960 
2961 		if (!list_empty(&bh->b_assoc_buffers))
2962 			__remove_assoc_queue(bh);
2963 		bh = next;
2964 	} while (bh != head);
2965 	*buffers_to_free = head;
2966 	__clear_page_buffers(page);
2967 	return 1;
2968 failed:
2969 	return 0;
2970 }
2971 
2972 int try_to_free_buffers(struct page *page)
2973 {
2974 	struct address_space * const mapping = page->mapping;
2975 	struct buffer_head *buffers_to_free = NULL;
2976 	int ret = 0;
2977 
2978 	BUG_ON(!PageLocked(page));
2979 	if (PageWriteback(page))
2980 		return 0;
2981 
2982 	if (mapping == NULL) {		/* can this still happen? */
2983 		ret = drop_buffers(page, &buffers_to_free);
2984 		goto out;
2985 	}
2986 
2987 	spin_lock(&mapping->private_lock);
2988 	ret = drop_buffers(page, &buffers_to_free);
2989 	if (ret) {
2990 		/*
2991 		 * If the filesystem writes its buffers by hand (eg ext3)
2992 		 * then we can have clean buffers against a dirty page.  We
2993 		 * clean the page here; otherwise later reattachment of buffers
2994 		 * could encounter a non-uptodate page, which is unresolvable.
2995 		 * This only applies in the rare case where try_to_free_buffers
2996 		 * succeeds but the page is not freed.
2997 		 */
2998 		clear_page_dirty(page);
2999 	}
3000 	spin_unlock(&mapping->private_lock);
3001 out:
3002 	if (buffers_to_free) {
3003 		struct buffer_head *bh = buffers_to_free;
3004 
3005 		do {
3006 			struct buffer_head *next = bh->b_this_page;
3007 			free_buffer_head(bh);
3008 			bh = next;
3009 		} while (bh != buffers_to_free);
3010 	}
3011 	return ret;
3012 }
3013 EXPORT_SYMBOL(try_to_free_buffers);
3014 
3015 int block_sync_page(struct page *page)
3016 {
3017 	struct address_space *mapping;
3018 
3019 	smp_mb();
3020 	mapping = page_mapping(page);
3021 	if (mapping)
3022 		blk_run_backing_dev(mapping->backing_dev_info, page);
3023 	return 0;
3024 }
3025 
3026 /*
3027  * There are no bdflush tunables left.  But distributions are
3028  * still running obsolete flush daemons, so we terminate them here.
3029  *
3030  * Use of bdflush() is deprecated and will be removed in a future kernel.
3031  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3032  */
3033 asmlinkage long sys_bdflush(int func, long data)
3034 {
3035 	static int msg_count;
3036 
3037 	if (!capable(CAP_SYS_ADMIN))
3038 		return -EPERM;
3039 
3040 	if (msg_count < 5) {
3041 		msg_count++;
3042 		printk(KERN_INFO
3043 			"warning: process `%s' used the obsolete bdflush"
3044 			" system call\n", current->comm);
3045 		printk(KERN_INFO "Fix your initscripts?\n");
3046 	}
3047 
3048 	if (func == 1)
3049 		do_exit(0);
3050 	return 0;
3051 }
3052 
3053 /*
3054  * Migration function for pages with buffers. This function can only be used
3055  * if the underlying filesystem guarantees that no other references to "page"
3056  * exist.
3057  */
3058 #ifdef CONFIG_MIGRATION
3059 int buffer_migrate_page(struct page *newpage, struct page *page)
3060 {
3061 	struct address_space *mapping = page->mapping;
3062 	struct buffer_head *bh, *head;
3063 	int rc;
3064 
3065 	if (!mapping)
3066 		return -EAGAIN;
3067 
3068 	if (!page_has_buffers(page))
3069 		return migrate_page(newpage, page);
3070 
3071 	head = page_buffers(page);
3072 
3073 	rc = migrate_page_remove_references(newpage, page, 3);
3074 	if (rc)
3075 		return rc;
3076 
3077 	bh = head;
3078 	do {
3079 		get_bh(bh);
3080 		lock_buffer(bh);
3081 		bh = bh->b_this_page;
3082 
3083 	} while (bh != head);
3084 
3085 	ClearPagePrivate(page);
3086 	set_page_private(newpage, page_private(page));
3087 	set_page_private(page, 0);
3088 	put_page(page);
3089 	get_page(newpage);
3090 
3091 	bh = head;
3092 	do {
3093 		set_bh_page(bh, newpage, bh_offset(bh));
3094 		bh = bh->b_this_page;
3095 
3096 	} while (bh != head);
3097 
3098 	SetPagePrivate(newpage);
3099 
3100 	migrate_page_copy(newpage, page);
3101 
3102 	bh = head;
3103 	do {
3104 		unlock_buffer(bh);
3105  		put_bh(bh);
3106 		bh = bh->b_this_page;
3107 
3108 	} while (bh != head);
3109 
3110 	return 0;
3111 }
3112 EXPORT_SYMBOL(buffer_migrate_page);
3113 #endif
3114 
3115 /*
3116  * Buffer-head allocation
3117  */
3118 static kmem_cache_t *bh_cachep;
3119 
3120 /*
3121  * Once the number of bh's in the machine exceeds this level, we start
3122  * stripping them in writeback.
3123  */
3124 static int max_buffer_heads;
3125 
3126 int buffer_heads_over_limit;
3127 
3128 struct bh_accounting {
3129 	int nr;			/* Number of live bh's */
3130 	int ratelimit;		/* Limit cacheline bouncing */
3131 };
3132 
3133 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3134 
3135 static void recalc_bh_state(void)
3136 {
3137 	int i;
3138 	int tot = 0;
3139 
3140 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3141 		return;
3142 	__get_cpu_var(bh_accounting).ratelimit = 0;
3143 	for_each_cpu(i)
3144 		tot += per_cpu(bh_accounting, i).nr;
3145 	buffer_heads_over_limit = (tot > max_buffer_heads);
3146 }
3147 
3148 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3149 {
3150 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3151 	if (ret) {
3152 		get_cpu_var(bh_accounting).nr++;
3153 		recalc_bh_state();
3154 		put_cpu_var(bh_accounting);
3155 	}
3156 	return ret;
3157 }
3158 EXPORT_SYMBOL(alloc_buffer_head);
3159 
3160 void free_buffer_head(struct buffer_head *bh)
3161 {
3162 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3163 	kmem_cache_free(bh_cachep, bh);
3164 	get_cpu_var(bh_accounting).nr--;
3165 	recalc_bh_state();
3166 	put_cpu_var(bh_accounting);
3167 }
3168 EXPORT_SYMBOL(free_buffer_head);
3169 
3170 static void
3171 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3172 {
3173 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3174 			    SLAB_CTOR_CONSTRUCTOR) {
3175 		struct buffer_head * bh = (struct buffer_head *)data;
3176 
3177 		memset(bh, 0, sizeof(*bh));
3178 		INIT_LIST_HEAD(&bh->b_assoc_buffers);
3179 	}
3180 }
3181 
3182 #ifdef CONFIG_HOTPLUG_CPU
3183 static void buffer_exit_cpu(int cpu)
3184 {
3185 	int i;
3186 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3187 
3188 	for (i = 0; i < BH_LRU_SIZE; i++) {
3189 		brelse(b->bhs[i]);
3190 		b->bhs[i] = NULL;
3191 	}
3192 }
3193 
3194 static int buffer_cpu_notify(struct notifier_block *self,
3195 			      unsigned long action, void *hcpu)
3196 {
3197 	if (action == CPU_DEAD)
3198 		buffer_exit_cpu((unsigned long)hcpu);
3199 	return NOTIFY_OK;
3200 }
3201 #endif /* CONFIG_HOTPLUG_CPU */
3202 
3203 void __init buffer_init(void)
3204 {
3205 	int nrpages;
3206 
3207 	bh_cachep = kmem_cache_create("buffer_head",
3208 			sizeof(struct buffer_head), 0,
3209 			SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3210 
3211 	/*
3212 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3213 	 */
3214 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3215 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3216 	hotcpu_notifier(buffer_cpu_notify, 0);
3217 }
3218 
3219 EXPORT_SYMBOL(__bforget);
3220 EXPORT_SYMBOL(__brelse);
3221 EXPORT_SYMBOL(__wait_on_buffer);
3222 EXPORT_SYMBOL(block_commit_write);
3223 EXPORT_SYMBOL(block_prepare_write);
3224 EXPORT_SYMBOL(block_read_full_page);
3225 EXPORT_SYMBOL(block_sync_page);
3226 EXPORT_SYMBOL(block_truncate_page);
3227 EXPORT_SYMBOL(block_write_full_page);
3228 EXPORT_SYMBOL(cont_prepare_write);
3229 EXPORT_SYMBOL(end_buffer_async_write);
3230 EXPORT_SYMBOL(end_buffer_read_sync);
3231 EXPORT_SYMBOL(end_buffer_write_sync);
3232 EXPORT_SYMBOL(file_fsync);
3233 EXPORT_SYMBOL(fsync_bdev);
3234 EXPORT_SYMBOL(generic_block_bmap);
3235 EXPORT_SYMBOL(generic_commit_write);
3236 EXPORT_SYMBOL(generic_cont_expand);
3237 EXPORT_SYMBOL(generic_cont_expand_simple);
3238 EXPORT_SYMBOL(init_buffer);
3239 EXPORT_SYMBOL(invalidate_bdev);
3240 EXPORT_SYMBOL(ll_rw_block);
3241 EXPORT_SYMBOL(mark_buffer_dirty);
3242 EXPORT_SYMBOL(submit_bh);
3243 EXPORT_SYMBOL(sync_dirty_buffer);
3244 EXPORT_SYMBOL(unlock_buffer);
3245