xref: /linux/fs/buffer.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void fastcall __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void fastcall unlock_buffer(struct buffer_head *bh)
78 {
79 	smp_mb__before_clear_bit();
80 	clear_buffer_locked(bh);
81 	smp_mb__after_clear_bit();
82 	wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94 
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98 	ClearPagePrivate(page);
99 	set_page_private(page, 0);
100 	page_cache_release(page);
101 }
102 
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105 	char b[BDEVNAME_SIZE];
106 
107 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 			bdevname(bh->b_bdev, b),
109 			(unsigned long long)bh->b_blocknr);
110 }
111 
112 /*
113  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
114  * unlock the buffer. This is what ll_rw_block uses too.
115  */
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117 {
118 	if (uptodate) {
119 		set_buffer_uptodate(bh);
120 	} else {
121 		/* This happens, due to failed READA attempts. */
122 		clear_buffer_uptodate(bh);
123 	}
124 	unlock_buffer(bh);
125 	put_bh(bh);
126 }
127 
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
129 {
130 	char b[BDEVNAME_SIZE];
131 
132 	if (uptodate) {
133 		set_buffer_uptodate(bh);
134 	} else {
135 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
136 			buffer_io_error(bh);
137 			printk(KERN_WARNING "lost page write due to "
138 					"I/O error on %s\n",
139 				       bdevname(bh->b_bdev, b));
140 		}
141 		set_buffer_write_io_error(bh);
142 		clear_buffer_uptodate(bh);
143 	}
144 	unlock_buffer(bh);
145 	put_bh(bh);
146 }
147 
148 /*
149  * Write out and wait upon all the dirty data associated with a block
150  * device via its mapping.  Does not take the superblock lock.
151  */
152 int sync_blockdev(struct block_device *bdev)
153 {
154 	int ret = 0;
155 
156 	if (bdev)
157 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
158 	return ret;
159 }
160 EXPORT_SYMBOL(sync_blockdev);
161 
162 /*
163  * Write out and wait upon all dirty data associated with this
164  * device.   Filesystem data as well as the underlying block
165  * device.  Takes the superblock lock.
166  */
167 int fsync_bdev(struct block_device *bdev)
168 {
169 	struct super_block *sb = get_super(bdev);
170 	if (sb) {
171 		int res = fsync_super(sb);
172 		drop_super(sb);
173 		return res;
174 	}
175 	return sync_blockdev(bdev);
176 }
177 
178 /**
179  * freeze_bdev  --  lock a filesystem and force it into a consistent state
180  * @bdev:	blockdevice to lock
181  *
182  * This takes the block device bd_mount_sem to make sure no new mounts
183  * happen on bdev until thaw_bdev() is called.
184  * If a superblock is found on this device, we take the s_umount semaphore
185  * on it to make sure nobody unmounts until the snapshot creation is done.
186  */
187 struct super_block *freeze_bdev(struct block_device *bdev)
188 {
189 	struct super_block *sb;
190 
191 	down(&bdev->bd_mount_sem);
192 	sb = get_super(bdev);
193 	if (sb && !(sb->s_flags & MS_RDONLY)) {
194 		sb->s_frozen = SB_FREEZE_WRITE;
195 		smp_wmb();
196 
197 		__fsync_super(sb);
198 
199 		sb->s_frozen = SB_FREEZE_TRANS;
200 		smp_wmb();
201 
202 		sync_blockdev(sb->s_bdev);
203 
204 		if (sb->s_op->write_super_lockfs)
205 			sb->s_op->write_super_lockfs(sb);
206 	}
207 
208 	sync_blockdev(bdev);
209 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
210 }
211 EXPORT_SYMBOL(freeze_bdev);
212 
213 /**
214  * thaw_bdev  -- unlock filesystem
215  * @bdev:	blockdevice to unlock
216  * @sb:		associated superblock
217  *
218  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
219  */
220 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
221 {
222 	if (sb) {
223 		BUG_ON(sb->s_bdev != bdev);
224 
225 		if (sb->s_op->unlockfs)
226 			sb->s_op->unlockfs(sb);
227 		sb->s_frozen = SB_UNFROZEN;
228 		smp_wmb();
229 		wake_up(&sb->s_wait_unfrozen);
230 		drop_super(sb);
231 	}
232 
233 	up(&bdev->bd_mount_sem);
234 }
235 EXPORT_SYMBOL(thaw_bdev);
236 
237 /*
238  * Various filesystems appear to want __find_get_block to be non-blocking.
239  * But it's the page lock which protects the buffers.  To get around this,
240  * we get exclusion from try_to_free_buffers with the blockdev mapping's
241  * private_lock.
242  *
243  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
244  * may be quite high.  This code could TryLock the page, and if that
245  * succeeds, there is no need to take private_lock. (But if
246  * private_lock is contended then so is mapping->tree_lock).
247  */
248 static struct buffer_head *
249 __find_get_block_slow(struct block_device *bdev, sector_t block)
250 {
251 	struct inode *bd_inode = bdev->bd_inode;
252 	struct address_space *bd_mapping = bd_inode->i_mapping;
253 	struct buffer_head *ret = NULL;
254 	pgoff_t index;
255 	struct buffer_head *bh;
256 	struct buffer_head *head;
257 	struct page *page;
258 	int all_mapped = 1;
259 
260 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
261 	page = find_get_page(bd_mapping, index);
262 	if (!page)
263 		goto out;
264 
265 	spin_lock(&bd_mapping->private_lock);
266 	if (!page_has_buffers(page))
267 		goto out_unlock;
268 	head = page_buffers(page);
269 	bh = head;
270 	do {
271 		if (bh->b_blocknr == block) {
272 			ret = bh;
273 			get_bh(bh);
274 			goto out_unlock;
275 		}
276 		if (!buffer_mapped(bh))
277 			all_mapped = 0;
278 		bh = bh->b_this_page;
279 	} while (bh != head);
280 
281 	/* we might be here because some of the buffers on this page are
282 	 * not mapped.  This is due to various races between
283 	 * file io on the block device and getblk.  It gets dealt with
284 	 * elsewhere, don't buffer_error if we had some unmapped buffers
285 	 */
286 	if (all_mapped) {
287 		printk("__find_get_block_slow() failed. "
288 			"block=%llu, b_blocknr=%llu\n",
289 			(unsigned long long)block,
290 			(unsigned long long)bh->b_blocknr);
291 		printk("b_state=0x%08lx, b_size=%zu\n",
292 			bh->b_state, bh->b_size);
293 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
294 	}
295 out_unlock:
296 	spin_unlock(&bd_mapping->private_lock);
297 	page_cache_release(page);
298 out:
299 	return ret;
300 }
301 
302 /* If invalidate_buffers() will trash dirty buffers, it means some kind
303    of fs corruption is going on. Trashing dirty data always imply losing
304    information that was supposed to be just stored on the physical layer
305    by the user.
306 
307    Thus invalidate_buffers in general usage is not allwowed to trash
308    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
309    be preserved.  These buffers are simply skipped.
310 
311    We also skip buffers which are still in use.  For example this can
312    happen if a userspace program is reading the block device.
313 
314    NOTE: In the case where the user removed a removable-media-disk even if
315    there's still dirty data not synced on disk (due a bug in the device driver
316    or due an error of the user), by not destroying the dirty buffers we could
317    generate corruption also on the next media inserted, thus a parameter is
318    necessary to handle this case in the most safe way possible (trying
319    to not corrupt also the new disk inserted with the data belonging to
320    the old now corrupted disk). Also for the ramdisk the natural thing
321    to do in order to release the ramdisk memory is to destroy dirty buffers.
322 
323    These are two special cases. Normal usage imply the device driver
324    to issue a sync on the device (without waiting I/O completion) and
325    then an invalidate_buffers call that doesn't trash dirty buffers.
326 
327    For handling cache coherency with the blkdev pagecache the 'update' case
328    is been introduced. It is needed to re-read from disk any pinned
329    buffer. NOTE: re-reading from disk is destructive so we can do it only
330    when we assume nobody is changing the buffercache under our I/O and when
331    we think the disk contains more recent information than the buffercache.
332    The update == 1 pass marks the buffers we need to update, the update == 2
333    pass does the actual I/O. */
334 void invalidate_bdev(struct block_device *bdev)
335 {
336 	struct address_space *mapping = bdev->bd_inode->i_mapping;
337 
338 	if (mapping->nrpages == 0)
339 		return;
340 
341 	invalidate_bh_lrus();
342 	invalidate_mapping_pages(mapping, 0, -1);
343 }
344 
345 /*
346  * Kick pdflush then try to free up some ZONE_NORMAL memory.
347  */
348 static void free_more_memory(void)
349 {
350 	struct zone **zones;
351 	pg_data_t *pgdat;
352 
353 	wakeup_pdflush(1024);
354 	yield();
355 
356 	for_each_online_pgdat(pgdat) {
357 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
358 		if (*zones)
359 			try_to_free_pages(zones, 0, GFP_NOFS);
360 	}
361 }
362 
363 /*
364  * I/O completion handler for block_read_full_page() - pages
365  * which come unlocked at the end of I/O.
366  */
367 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
368 {
369 	unsigned long flags;
370 	struct buffer_head *first;
371 	struct buffer_head *tmp;
372 	struct page *page;
373 	int page_uptodate = 1;
374 
375 	BUG_ON(!buffer_async_read(bh));
376 
377 	page = bh->b_page;
378 	if (uptodate) {
379 		set_buffer_uptodate(bh);
380 	} else {
381 		clear_buffer_uptodate(bh);
382 		if (printk_ratelimit())
383 			buffer_io_error(bh);
384 		SetPageError(page);
385 	}
386 
387 	/*
388 	 * Be _very_ careful from here on. Bad things can happen if
389 	 * two buffer heads end IO at almost the same time and both
390 	 * decide that the page is now completely done.
391 	 */
392 	first = page_buffers(page);
393 	local_irq_save(flags);
394 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
395 	clear_buffer_async_read(bh);
396 	unlock_buffer(bh);
397 	tmp = bh;
398 	do {
399 		if (!buffer_uptodate(tmp))
400 			page_uptodate = 0;
401 		if (buffer_async_read(tmp)) {
402 			BUG_ON(!buffer_locked(tmp));
403 			goto still_busy;
404 		}
405 		tmp = tmp->b_this_page;
406 	} while (tmp != bh);
407 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 	local_irq_restore(flags);
409 
410 	/*
411 	 * If none of the buffers had errors and they are all
412 	 * uptodate then we can set the page uptodate.
413 	 */
414 	if (page_uptodate && !PageError(page))
415 		SetPageUptodate(page);
416 	unlock_page(page);
417 	return;
418 
419 still_busy:
420 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 	local_irq_restore(flags);
422 	return;
423 }
424 
425 /*
426  * Completion handler for block_write_full_page() - pages which are unlocked
427  * during I/O, and which have PageWriteback cleared upon I/O completion.
428  */
429 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
430 {
431 	char b[BDEVNAME_SIZE];
432 	unsigned long flags;
433 	struct buffer_head *first;
434 	struct buffer_head *tmp;
435 	struct page *page;
436 
437 	BUG_ON(!buffer_async_write(bh));
438 
439 	page = bh->b_page;
440 	if (uptodate) {
441 		set_buffer_uptodate(bh);
442 	} else {
443 		if (printk_ratelimit()) {
444 			buffer_io_error(bh);
445 			printk(KERN_WARNING "lost page write due to "
446 					"I/O error on %s\n",
447 			       bdevname(bh->b_bdev, b));
448 		}
449 		set_bit(AS_EIO, &page->mapping->flags);
450 		set_buffer_write_io_error(bh);
451 		clear_buffer_uptodate(bh);
452 		SetPageError(page);
453 	}
454 
455 	first = page_buffers(page);
456 	local_irq_save(flags);
457 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
458 
459 	clear_buffer_async_write(bh);
460 	unlock_buffer(bh);
461 	tmp = bh->b_this_page;
462 	while (tmp != bh) {
463 		if (buffer_async_write(tmp)) {
464 			BUG_ON(!buffer_locked(tmp));
465 			goto still_busy;
466 		}
467 		tmp = tmp->b_this_page;
468 	}
469 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
470 	local_irq_restore(flags);
471 	end_page_writeback(page);
472 	return;
473 
474 still_busy:
475 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
476 	local_irq_restore(flags);
477 	return;
478 }
479 
480 /*
481  * If a page's buffers are under async readin (end_buffer_async_read
482  * completion) then there is a possibility that another thread of
483  * control could lock one of the buffers after it has completed
484  * but while some of the other buffers have not completed.  This
485  * locked buffer would confuse end_buffer_async_read() into not unlocking
486  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
487  * that this buffer is not under async I/O.
488  *
489  * The page comes unlocked when it has no locked buffer_async buffers
490  * left.
491  *
492  * PageLocked prevents anyone starting new async I/O reads any of
493  * the buffers.
494  *
495  * PageWriteback is used to prevent simultaneous writeout of the same
496  * page.
497  *
498  * PageLocked prevents anyone from starting writeback of a page which is
499  * under read I/O (PageWriteback is only ever set against a locked page).
500  */
501 static void mark_buffer_async_read(struct buffer_head *bh)
502 {
503 	bh->b_end_io = end_buffer_async_read;
504 	set_buffer_async_read(bh);
505 }
506 
507 void mark_buffer_async_write(struct buffer_head *bh)
508 {
509 	bh->b_end_io = end_buffer_async_write;
510 	set_buffer_async_write(bh);
511 }
512 EXPORT_SYMBOL(mark_buffer_async_write);
513 
514 
515 /*
516  * fs/buffer.c contains helper functions for buffer-backed address space's
517  * fsync functions.  A common requirement for buffer-based filesystems is
518  * that certain data from the backing blockdev needs to be written out for
519  * a successful fsync().  For example, ext2 indirect blocks need to be
520  * written back and waited upon before fsync() returns.
521  *
522  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
523  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
524  * management of a list of dependent buffers at ->i_mapping->private_list.
525  *
526  * Locking is a little subtle: try_to_free_buffers() will remove buffers
527  * from their controlling inode's queue when they are being freed.  But
528  * try_to_free_buffers() will be operating against the *blockdev* mapping
529  * at the time, not against the S_ISREG file which depends on those buffers.
530  * So the locking for private_list is via the private_lock in the address_space
531  * which backs the buffers.  Which is different from the address_space
532  * against which the buffers are listed.  So for a particular address_space,
533  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
534  * mapping->private_list will always be protected by the backing blockdev's
535  * ->private_lock.
536  *
537  * Which introduces a requirement: all buffers on an address_space's
538  * ->private_list must be from the same address_space: the blockdev's.
539  *
540  * address_spaces which do not place buffers at ->private_list via these
541  * utility functions are free to use private_lock and private_list for
542  * whatever they want.  The only requirement is that list_empty(private_list)
543  * be true at clear_inode() time.
544  *
545  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
546  * filesystems should do that.  invalidate_inode_buffers() should just go
547  * BUG_ON(!list_empty).
548  *
549  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
550  * take an address_space, not an inode.  And it should be called
551  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
552  * queued up.
553  *
554  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
555  * list if it is already on a list.  Because if the buffer is on a list,
556  * it *must* already be on the right one.  If not, the filesystem is being
557  * silly.  This will save a ton of locking.  But first we have to ensure
558  * that buffers are taken *off* the old inode's list when they are freed
559  * (presumably in truncate).  That requires careful auditing of all
560  * filesystems (do it inside bforget()).  It could also be done by bringing
561  * b_inode back.
562  */
563 
564 /*
565  * The buffer's backing address_space's private_lock must be held
566  */
567 static inline void __remove_assoc_queue(struct buffer_head *bh)
568 {
569 	list_del_init(&bh->b_assoc_buffers);
570 	WARN_ON(!bh->b_assoc_map);
571 	if (buffer_write_io_error(bh))
572 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
573 	bh->b_assoc_map = NULL;
574 }
575 
576 int inode_has_buffers(struct inode *inode)
577 {
578 	return !list_empty(&inode->i_data.private_list);
579 }
580 
581 /*
582  * osync is designed to support O_SYNC io.  It waits synchronously for
583  * all already-submitted IO to complete, but does not queue any new
584  * writes to the disk.
585  *
586  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
587  * you dirty the buffers, and then use osync_inode_buffers to wait for
588  * completion.  Any other dirty buffers which are not yet queued for
589  * write will not be flushed to disk by the osync.
590  */
591 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
592 {
593 	struct buffer_head *bh;
594 	struct list_head *p;
595 	int err = 0;
596 
597 	spin_lock(lock);
598 repeat:
599 	list_for_each_prev(p, list) {
600 		bh = BH_ENTRY(p);
601 		if (buffer_locked(bh)) {
602 			get_bh(bh);
603 			spin_unlock(lock);
604 			wait_on_buffer(bh);
605 			if (!buffer_uptodate(bh))
606 				err = -EIO;
607 			brelse(bh);
608 			spin_lock(lock);
609 			goto repeat;
610 		}
611 	}
612 	spin_unlock(lock);
613 	return err;
614 }
615 
616 /**
617  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
618  *                        buffers
619  * @mapping: the mapping which wants those buffers written
620  *
621  * Starts I/O against the buffers at mapping->private_list, and waits upon
622  * that I/O.
623  *
624  * Basically, this is a convenience function for fsync().
625  * @mapping is a file or directory which needs those buffers to be written for
626  * a successful fsync().
627  */
628 int sync_mapping_buffers(struct address_space *mapping)
629 {
630 	struct address_space *buffer_mapping = mapping->assoc_mapping;
631 
632 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
633 		return 0;
634 
635 	return fsync_buffers_list(&buffer_mapping->private_lock,
636 					&mapping->private_list);
637 }
638 EXPORT_SYMBOL(sync_mapping_buffers);
639 
640 /*
641  * Called when we've recently written block `bblock', and it is known that
642  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
643  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
644  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
645  */
646 void write_boundary_block(struct block_device *bdev,
647 			sector_t bblock, unsigned blocksize)
648 {
649 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
650 	if (bh) {
651 		if (buffer_dirty(bh))
652 			ll_rw_block(WRITE, 1, &bh);
653 		put_bh(bh);
654 	}
655 }
656 
657 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
658 {
659 	struct address_space *mapping = inode->i_mapping;
660 	struct address_space *buffer_mapping = bh->b_page->mapping;
661 
662 	mark_buffer_dirty(bh);
663 	if (!mapping->assoc_mapping) {
664 		mapping->assoc_mapping = buffer_mapping;
665 	} else {
666 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
667 	}
668 	if (list_empty(&bh->b_assoc_buffers)) {
669 		spin_lock(&buffer_mapping->private_lock);
670 		list_move_tail(&bh->b_assoc_buffers,
671 				&mapping->private_list);
672 		bh->b_assoc_map = mapping;
673 		spin_unlock(&buffer_mapping->private_lock);
674 	}
675 }
676 EXPORT_SYMBOL(mark_buffer_dirty_inode);
677 
678 /*
679  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
680  * dirty.
681  *
682  * If warn is true, then emit a warning if the page is not uptodate and has
683  * not been truncated.
684  */
685 static int __set_page_dirty(struct page *page,
686 		struct address_space *mapping, int warn)
687 {
688 	if (unlikely(!mapping))
689 		return !TestSetPageDirty(page);
690 
691 	if (TestSetPageDirty(page))
692 		return 0;
693 
694 	write_lock_irq(&mapping->tree_lock);
695 	if (page->mapping) {	/* Race with truncate? */
696 		WARN_ON_ONCE(warn && !PageUptodate(page));
697 
698 		if (mapping_cap_account_dirty(mapping)) {
699 			__inc_zone_page_state(page, NR_FILE_DIRTY);
700 			task_io_account_write(PAGE_CACHE_SIZE);
701 		}
702 		radix_tree_tag_set(&mapping->page_tree,
703 				page_index(page), PAGECACHE_TAG_DIRTY);
704 	}
705 	write_unlock_irq(&mapping->tree_lock);
706 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
707 
708 	return 1;
709 }
710 
711 /*
712  * Add a page to the dirty page list.
713  *
714  * It is a sad fact of life that this function is called from several places
715  * deeply under spinlocking.  It may not sleep.
716  *
717  * If the page has buffers, the uptodate buffers are set dirty, to preserve
718  * dirty-state coherency between the page and the buffers.  It the page does
719  * not have buffers then when they are later attached they will all be set
720  * dirty.
721  *
722  * The buffers are dirtied before the page is dirtied.  There's a small race
723  * window in which a writepage caller may see the page cleanness but not the
724  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
725  * before the buffers, a concurrent writepage caller could clear the page dirty
726  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
727  * page on the dirty page list.
728  *
729  * We use private_lock to lock against try_to_free_buffers while using the
730  * page's buffer list.  Also use this to protect against clean buffers being
731  * added to the page after it was set dirty.
732  *
733  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
734  * address_space though.
735  */
736 int __set_page_dirty_buffers(struct page *page)
737 {
738 	struct address_space *mapping = page_mapping(page);
739 
740 	if (unlikely(!mapping))
741 		return !TestSetPageDirty(page);
742 
743 	spin_lock(&mapping->private_lock);
744 	if (page_has_buffers(page)) {
745 		struct buffer_head *head = page_buffers(page);
746 		struct buffer_head *bh = head;
747 
748 		do {
749 			set_buffer_dirty(bh);
750 			bh = bh->b_this_page;
751 		} while (bh != head);
752 	}
753 	spin_unlock(&mapping->private_lock);
754 
755 	return __set_page_dirty(page, mapping, 1);
756 }
757 EXPORT_SYMBOL(__set_page_dirty_buffers);
758 
759 /*
760  * Write out and wait upon a list of buffers.
761  *
762  * We have conflicting pressures: we want to make sure that all
763  * initially dirty buffers get waited on, but that any subsequently
764  * dirtied buffers don't.  After all, we don't want fsync to last
765  * forever if somebody is actively writing to the file.
766  *
767  * Do this in two main stages: first we copy dirty buffers to a
768  * temporary inode list, queueing the writes as we go.  Then we clean
769  * up, waiting for those writes to complete.
770  *
771  * During this second stage, any subsequent updates to the file may end
772  * up refiling the buffer on the original inode's dirty list again, so
773  * there is a chance we will end up with a buffer queued for write but
774  * not yet completed on that list.  So, as a final cleanup we go through
775  * the osync code to catch these locked, dirty buffers without requeuing
776  * any newly dirty buffers for write.
777  */
778 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
779 {
780 	struct buffer_head *bh;
781 	struct list_head tmp;
782 	int err = 0, err2;
783 
784 	INIT_LIST_HEAD(&tmp);
785 
786 	spin_lock(lock);
787 	while (!list_empty(list)) {
788 		bh = BH_ENTRY(list->next);
789 		__remove_assoc_queue(bh);
790 		if (buffer_dirty(bh) || buffer_locked(bh)) {
791 			list_add(&bh->b_assoc_buffers, &tmp);
792 			if (buffer_dirty(bh)) {
793 				get_bh(bh);
794 				spin_unlock(lock);
795 				/*
796 				 * Ensure any pending I/O completes so that
797 				 * ll_rw_block() actually writes the current
798 				 * contents - it is a noop if I/O is still in
799 				 * flight on potentially older contents.
800 				 */
801 				ll_rw_block(SWRITE, 1, &bh);
802 				brelse(bh);
803 				spin_lock(lock);
804 			}
805 		}
806 	}
807 
808 	while (!list_empty(&tmp)) {
809 		bh = BH_ENTRY(tmp.prev);
810 		list_del_init(&bh->b_assoc_buffers);
811 		get_bh(bh);
812 		spin_unlock(lock);
813 		wait_on_buffer(bh);
814 		if (!buffer_uptodate(bh))
815 			err = -EIO;
816 		brelse(bh);
817 		spin_lock(lock);
818 	}
819 
820 	spin_unlock(lock);
821 	err2 = osync_buffers_list(lock, list);
822 	if (err)
823 		return err;
824 	else
825 		return err2;
826 }
827 
828 /*
829  * Invalidate any and all dirty buffers on a given inode.  We are
830  * probably unmounting the fs, but that doesn't mean we have already
831  * done a sync().  Just drop the buffers from the inode list.
832  *
833  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
834  * assumes that all the buffers are against the blockdev.  Not true
835  * for reiserfs.
836  */
837 void invalidate_inode_buffers(struct inode *inode)
838 {
839 	if (inode_has_buffers(inode)) {
840 		struct address_space *mapping = &inode->i_data;
841 		struct list_head *list = &mapping->private_list;
842 		struct address_space *buffer_mapping = mapping->assoc_mapping;
843 
844 		spin_lock(&buffer_mapping->private_lock);
845 		while (!list_empty(list))
846 			__remove_assoc_queue(BH_ENTRY(list->next));
847 		spin_unlock(&buffer_mapping->private_lock);
848 	}
849 }
850 
851 /*
852  * Remove any clean buffers from the inode's buffer list.  This is called
853  * when we're trying to free the inode itself.  Those buffers can pin it.
854  *
855  * Returns true if all buffers were removed.
856  */
857 int remove_inode_buffers(struct inode *inode)
858 {
859 	int ret = 1;
860 
861 	if (inode_has_buffers(inode)) {
862 		struct address_space *mapping = &inode->i_data;
863 		struct list_head *list = &mapping->private_list;
864 		struct address_space *buffer_mapping = mapping->assoc_mapping;
865 
866 		spin_lock(&buffer_mapping->private_lock);
867 		while (!list_empty(list)) {
868 			struct buffer_head *bh = BH_ENTRY(list->next);
869 			if (buffer_dirty(bh)) {
870 				ret = 0;
871 				break;
872 			}
873 			__remove_assoc_queue(bh);
874 		}
875 		spin_unlock(&buffer_mapping->private_lock);
876 	}
877 	return ret;
878 }
879 
880 /*
881  * Create the appropriate buffers when given a page for data area and
882  * the size of each buffer.. Use the bh->b_this_page linked list to
883  * follow the buffers created.  Return NULL if unable to create more
884  * buffers.
885  *
886  * The retry flag is used to differentiate async IO (paging, swapping)
887  * which may not fail from ordinary buffer allocations.
888  */
889 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
890 		int retry)
891 {
892 	struct buffer_head *bh, *head;
893 	long offset;
894 
895 try_again:
896 	head = NULL;
897 	offset = PAGE_SIZE;
898 	while ((offset -= size) >= 0) {
899 		bh = alloc_buffer_head(GFP_NOFS);
900 		if (!bh)
901 			goto no_grow;
902 
903 		bh->b_bdev = NULL;
904 		bh->b_this_page = head;
905 		bh->b_blocknr = -1;
906 		head = bh;
907 
908 		bh->b_state = 0;
909 		atomic_set(&bh->b_count, 0);
910 		bh->b_private = NULL;
911 		bh->b_size = size;
912 
913 		/* Link the buffer to its page */
914 		set_bh_page(bh, page, offset);
915 
916 		init_buffer(bh, NULL, NULL);
917 	}
918 	return head;
919 /*
920  * In case anything failed, we just free everything we got.
921  */
922 no_grow:
923 	if (head) {
924 		do {
925 			bh = head;
926 			head = head->b_this_page;
927 			free_buffer_head(bh);
928 		} while (head);
929 	}
930 
931 	/*
932 	 * Return failure for non-async IO requests.  Async IO requests
933 	 * are not allowed to fail, so we have to wait until buffer heads
934 	 * become available.  But we don't want tasks sleeping with
935 	 * partially complete buffers, so all were released above.
936 	 */
937 	if (!retry)
938 		return NULL;
939 
940 	/* We're _really_ low on memory. Now we just
941 	 * wait for old buffer heads to become free due to
942 	 * finishing IO.  Since this is an async request and
943 	 * the reserve list is empty, we're sure there are
944 	 * async buffer heads in use.
945 	 */
946 	free_more_memory();
947 	goto try_again;
948 }
949 EXPORT_SYMBOL_GPL(alloc_page_buffers);
950 
951 static inline void
952 link_dev_buffers(struct page *page, struct buffer_head *head)
953 {
954 	struct buffer_head *bh, *tail;
955 
956 	bh = head;
957 	do {
958 		tail = bh;
959 		bh = bh->b_this_page;
960 	} while (bh);
961 	tail->b_this_page = head;
962 	attach_page_buffers(page, head);
963 }
964 
965 /*
966  * Initialise the state of a blockdev page's buffers.
967  */
968 static void
969 init_page_buffers(struct page *page, struct block_device *bdev,
970 			sector_t block, int size)
971 {
972 	struct buffer_head *head = page_buffers(page);
973 	struct buffer_head *bh = head;
974 	int uptodate = PageUptodate(page);
975 
976 	do {
977 		if (!buffer_mapped(bh)) {
978 			init_buffer(bh, NULL, NULL);
979 			bh->b_bdev = bdev;
980 			bh->b_blocknr = block;
981 			if (uptodate)
982 				set_buffer_uptodate(bh);
983 			set_buffer_mapped(bh);
984 		}
985 		block++;
986 		bh = bh->b_this_page;
987 	} while (bh != head);
988 }
989 
990 /*
991  * Create the page-cache page that contains the requested block.
992  *
993  * This is user purely for blockdev mappings.
994  */
995 static struct page *
996 grow_dev_page(struct block_device *bdev, sector_t block,
997 		pgoff_t index, int size)
998 {
999 	struct inode *inode = bdev->bd_inode;
1000 	struct page *page;
1001 	struct buffer_head *bh;
1002 
1003 	page = find_or_create_page(inode->i_mapping, index,
1004 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1005 	if (!page)
1006 		return NULL;
1007 
1008 	BUG_ON(!PageLocked(page));
1009 
1010 	if (page_has_buffers(page)) {
1011 		bh = page_buffers(page);
1012 		if (bh->b_size == size) {
1013 			init_page_buffers(page, bdev, block, size);
1014 			return page;
1015 		}
1016 		if (!try_to_free_buffers(page))
1017 			goto failed;
1018 	}
1019 
1020 	/*
1021 	 * Allocate some buffers for this page
1022 	 */
1023 	bh = alloc_page_buffers(page, size, 0);
1024 	if (!bh)
1025 		goto failed;
1026 
1027 	/*
1028 	 * Link the page to the buffers and initialise them.  Take the
1029 	 * lock to be atomic wrt __find_get_block(), which does not
1030 	 * run under the page lock.
1031 	 */
1032 	spin_lock(&inode->i_mapping->private_lock);
1033 	link_dev_buffers(page, bh);
1034 	init_page_buffers(page, bdev, block, size);
1035 	spin_unlock(&inode->i_mapping->private_lock);
1036 	return page;
1037 
1038 failed:
1039 	BUG();
1040 	unlock_page(page);
1041 	page_cache_release(page);
1042 	return NULL;
1043 }
1044 
1045 /*
1046  * Create buffers for the specified block device block's page.  If
1047  * that page was dirty, the buffers are set dirty also.
1048  */
1049 static int
1050 grow_buffers(struct block_device *bdev, sector_t block, int size)
1051 {
1052 	struct page *page;
1053 	pgoff_t index;
1054 	int sizebits;
1055 
1056 	sizebits = -1;
1057 	do {
1058 		sizebits++;
1059 	} while ((size << sizebits) < PAGE_SIZE);
1060 
1061 	index = block >> sizebits;
1062 
1063 	/*
1064 	 * Check for a block which wants to lie outside our maximum possible
1065 	 * pagecache index.  (this comparison is done using sector_t types).
1066 	 */
1067 	if (unlikely(index != block >> sizebits)) {
1068 		char b[BDEVNAME_SIZE];
1069 
1070 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1071 			"device %s\n",
1072 			__FUNCTION__, (unsigned long long)block,
1073 			bdevname(bdev, b));
1074 		return -EIO;
1075 	}
1076 	block = index << sizebits;
1077 	/* Create a page with the proper size buffers.. */
1078 	page = grow_dev_page(bdev, block, index, size);
1079 	if (!page)
1080 		return 0;
1081 	unlock_page(page);
1082 	page_cache_release(page);
1083 	return 1;
1084 }
1085 
1086 static struct buffer_head *
1087 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1088 {
1089 	/* Size must be multiple of hard sectorsize */
1090 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1091 			(size < 512 || size > PAGE_SIZE))) {
1092 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1093 					size);
1094 		printk(KERN_ERR "hardsect size: %d\n",
1095 					bdev_hardsect_size(bdev));
1096 
1097 		dump_stack();
1098 		return NULL;
1099 	}
1100 
1101 	for (;;) {
1102 		struct buffer_head * bh;
1103 		int ret;
1104 
1105 		bh = __find_get_block(bdev, block, size);
1106 		if (bh)
1107 			return bh;
1108 
1109 		ret = grow_buffers(bdev, block, size);
1110 		if (ret < 0)
1111 			return NULL;
1112 		if (ret == 0)
1113 			free_more_memory();
1114 	}
1115 }
1116 
1117 /*
1118  * The relationship between dirty buffers and dirty pages:
1119  *
1120  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1121  * the page is tagged dirty in its radix tree.
1122  *
1123  * At all times, the dirtiness of the buffers represents the dirtiness of
1124  * subsections of the page.  If the page has buffers, the page dirty bit is
1125  * merely a hint about the true dirty state.
1126  *
1127  * When a page is set dirty in its entirety, all its buffers are marked dirty
1128  * (if the page has buffers).
1129  *
1130  * When a buffer is marked dirty, its page is dirtied, but the page's other
1131  * buffers are not.
1132  *
1133  * Also.  When blockdev buffers are explicitly read with bread(), they
1134  * individually become uptodate.  But their backing page remains not
1135  * uptodate - even if all of its buffers are uptodate.  A subsequent
1136  * block_read_full_page() against that page will discover all the uptodate
1137  * buffers, will set the page uptodate and will perform no I/O.
1138  */
1139 
1140 /**
1141  * mark_buffer_dirty - mark a buffer_head as needing writeout
1142  * @bh: the buffer_head to mark dirty
1143  *
1144  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1145  * backing page dirty, then tag the page as dirty in its address_space's radix
1146  * tree and then attach the address_space's inode to its superblock's dirty
1147  * inode list.
1148  *
1149  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1150  * mapping->tree_lock and the global inode_lock.
1151  */
1152 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1153 {
1154 	WARN_ON_ONCE(!buffer_uptodate(bh));
1155 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1156 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1157 }
1158 
1159 /*
1160  * Decrement a buffer_head's reference count.  If all buffers against a page
1161  * have zero reference count, are clean and unlocked, and if the page is clean
1162  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1163  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1164  * a page but it ends up not being freed, and buffers may later be reattached).
1165  */
1166 void __brelse(struct buffer_head * buf)
1167 {
1168 	if (atomic_read(&buf->b_count)) {
1169 		put_bh(buf);
1170 		return;
1171 	}
1172 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1173 	WARN_ON(1);
1174 }
1175 
1176 /*
1177  * bforget() is like brelse(), except it discards any
1178  * potentially dirty data.
1179  */
1180 void __bforget(struct buffer_head *bh)
1181 {
1182 	clear_buffer_dirty(bh);
1183 	if (!list_empty(&bh->b_assoc_buffers)) {
1184 		struct address_space *buffer_mapping = bh->b_page->mapping;
1185 
1186 		spin_lock(&buffer_mapping->private_lock);
1187 		list_del_init(&bh->b_assoc_buffers);
1188 		bh->b_assoc_map = NULL;
1189 		spin_unlock(&buffer_mapping->private_lock);
1190 	}
1191 	__brelse(bh);
1192 }
1193 
1194 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1195 {
1196 	lock_buffer(bh);
1197 	if (buffer_uptodate(bh)) {
1198 		unlock_buffer(bh);
1199 		return bh;
1200 	} else {
1201 		get_bh(bh);
1202 		bh->b_end_io = end_buffer_read_sync;
1203 		submit_bh(READ, bh);
1204 		wait_on_buffer(bh);
1205 		if (buffer_uptodate(bh))
1206 			return bh;
1207 	}
1208 	brelse(bh);
1209 	return NULL;
1210 }
1211 
1212 /*
1213  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1214  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1215  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1216  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1217  * CPU's LRUs at the same time.
1218  *
1219  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1220  * sb_find_get_block().
1221  *
1222  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1223  * a local interrupt disable for that.
1224  */
1225 
1226 #define BH_LRU_SIZE	8
1227 
1228 struct bh_lru {
1229 	struct buffer_head *bhs[BH_LRU_SIZE];
1230 };
1231 
1232 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1233 
1234 #ifdef CONFIG_SMP
1235 #define bh_lru_lock()	local_irq_disable()
1236 #define bh_lru_unlock()	local_irq_enable()
1237 #else
1238 #define bh_lru_lock()	preempt_disable()
1239 #define bh_lru_unlock()	preempt_enable()
1240 #endif
1241 
1242 static inline void check_irqs_on(void)
1243 {
1244 #ifdef irqs_disabled
1245 	BUG_ON(irqs_disabled());
1246 #endif
1247 }
1248 
1249 /*
1250  * The LRU management algorithm is dopey-but-simple.  Sorry.
1251  */
1252 static void bh_lru_install(struct buffer_head *bh)
1253 {
1254 	struct buffer_head *evictee = NULL;
1255 	struct bh_lru *lru;
1256 
1257 	check_irqs_on();
1258 	bh_lru_lock();
1259 	lru = &__get_cpu_var(bh_lrus);
1260 	if (lru->bhs[0] != bh) {
1261 		struct buffer_head *bhs[BH_LRU_SIZE];
1262 		int in;
1263 		int out = 0;
1264 
1265 		get_bh(bh);
1266 		bhs[out++] = bh;
1267 		for (in = 0; in < BH_LRU_SIZE; in++) {
1268 			struct buffer_head *bh2 = lru->bhs[in];
1269 
1270 			if (bh2 == bh) {
1271 				__brelse(bh2);
1272 			} else {
1273 				if (out >= BH_LRU_SIZE) {
1274 					BUG_ON(evictee != NULL);
1275 					evictee = bh2;
1276 				} else {
1277 					bhs[out++] = bh2;
1278 				}
1279 			}
1280 		}
1281 		while (out < BH_LRU_SIZE)
1282 			bhs[out++] = NULL;
1283 		memcpy(lru->bhs, bhs, sizeof(bhs));
1284 	}
1285 	bh_lru_unlock();
1286 
1287 	if (evictee)
1288 		__brelse(evictee);
1289 }
1290 
1291 /*
1292  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1293  */
1294 static struct buffer_head *
1295 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1296 {
1297 	struct buffer_head *ret = NULL;
1298 	struct bh_lru *lru;
1299 	unsigned int i;
1300 
1301 	check_irqs_on();
1302 	bh_lru_lock();
1303 	lru = &__get_cpu_var(bh_lrus);
1304 	for (i = 0; i < BH_LRU_SIZE; i++) {
1305 		struct buffer_head *bh = lru->bhs[i];
1306 
1307 		if (bh && bh->b_bdev == bdev &&
1308 				bh->b_blocknr == block && bh->b_size == size) {
1309 			if (i) {
1310 				while (i) {
1311 					lru->bhs[i] = lru->bhs[i - 1];
1312 					i--;
1313 				}
1314 				lru->bhs[0] = bh;
1315 			}
1316 			get_bh(bh);
1317 			ret = bh;
1318 			break;
1319 		}
1320 	}
1321 	bh_lru_unlock();
1322 	return ret;
1323 }
1324 
1325 /*
1326  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1327  * it in the LRU and mark it as accessed.  If it is not present then return
1328  * NULL
1329  */
1330 struct buffer_head *
1331 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1332 {
1333 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1334 
1335 	if (bh == NULL) {
1336 		bh = __find_get_block_slow(bdev, block);
1337 		if (bh)
1338 			bh_lru_install(bh);
1339 	}
1340 	if (bh)
1341 		touch_buffer(bh);
1342 	return bh;
1343 }
1344 EXPORT_SYMBOL(__find_get_block);
1345 
1346 /*
1347  * __getblk will locate (and, if necessary, create) the buffer_head
1348  * which corresponds to the passed block_device, block and size. The
1349  * returned buffer has its reference count incremented.
1350  *
1351  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1352  * illegal block number, __getblk() will happily return a buffer_head
1353  * which represents the non-existent block.  Very weird.
1354  *
1355  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1356  * attempt is failing.  FIXME, perhaps?
1357  */
1358 struct buffer_head *
1359 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1360 {
1361 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1362 
1363 	might_sleep();
1364 	if (bh == NULL)
1365 		bh = __getblk_slow(bdev, block, size);
1366 	return bh;
1367 }
1368 EXPORT_SYMBOL(__getblk);
1369 
1370 /*
1371  * Do async read-ahead on a buffer..
1372  */
1373 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1374 {
1375 	struct buffer_head *bh = __getblk(bdev, block, size);
1376 	if (likely(bh)) {
1377 		ll_rw_block(READA, 1, &bh);
1378 		brelse(bh);
1379 	}
1380 }
1381 EXPORT_SYMBOL(__breadahead);
1382 
1383 /**
1384  *  __bread() - reads a specified block and returns the bh
1385  *  @bdev: the block_device to read from
1386  *  @block: number of block
1387  *  @size: size (in bytes) to read
1388  *
1389  *  Reads a specified block, and returns buffer head that contains it.
1390  *  It returns NULL if the block was unreadable.
1391  */
1392 struct buffer_head *
1393 __bread(struct block_device *bdev, sector_t block, unsigned size)
1394 {
1395 	struct buffer_head *bh = __getblk(bdev, block, size);
1396 
1397 	if (likely(bh) && !buffer_uptodate(bh))
1398 		bh = __bread_slow(bh);
1399 	return bh;
1400 }
1401 EXPORT_SYMBOL(__bread);
1402 
1403 /*
1404  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1405  * This doesn't race because it runs in each cpu either in irq
1406  * or with preempt disabled.
1407  */
1408 static void invalidate_bh_lru(void *arg)
1409 {
1410 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1411 	int i;
1412 
1413 	for (i = 0; i < BH_LRU_SIZE; i++) {
1414 		brelse(b->bhs[i]);
1415 		b->bhs[i] = NULL;
1416 	}
1417 	put_cpu_var(bh_lrus);
1418 }
1419 
1420 void invalidate_bh_lrus(void)
1421 {
1422 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1423 }
1424 
1425 void set_bh_page(struct buffer_head *bh,
1426 		struct page *page, unsigned long offset)
1427 {
1428 	bh->b_page = page;
1429 	BUG_ON(offset >= PAGE_SIZE);
1430 	if (PageHighMem(page))
1431 		/*
1432 		 * This catches illegal uses and preserves the offset:
1433 		 */
1434 		bh->b_data = (char *)(0 + offset);
1435 	else
1436 		bh->b_data = page_address(page) + offset;
1437 }
1438 EXPORT_SYMBOL(set_bh_page);
1439 
1440 /*
1441  * Called when truncating a buffer on a page completely.
1442  */
1443 static void discard_buffer(struct buffer_head * bh)
1444 {
1445 	lock_buffer(bh);
1446 	clear_buffer_dirty(bh);
1447 	bh->b_bdev = NULL;
1448 	clear_buffer_mapped(bh);
1449 	clear_buffer_req(bh);
1450 	clear_buffer_new(bh);
1451 	clear_buffer_delay(bh);
1452 	clear_buffer_unwritten(bh);
1453 	unlock_buffer(bh);
1454 }
1455 
1456 /**
1457  * block_invalidatepage - invalidate part of all of a buffer-backed page
1458  *
1459  * @page: the page which is affected
1460  * @offset: the index of the truncation point
1461  *
1462  * block_invalidatepage() is called when all or part of the page has become
1463  * invalidatedby a truncate operation.
1464  *
1465  * block_invalidatepage() does not have to release all buffers, but it must
1466  * ensure that no dirty buffer is left outside @offset and that no I/O
1467  * is underway against any of the blocks which are outside the truncation
1468  * point.  Because the caller is about to free (and possibly reuse) those
1469  * blocks on-disk.
1470  */
1471 void block_invalidatepage(struct page *page, unsigned long offset)
1472 {
1473 	struct buffer_head *head, *bh, *next;
1474 	unsigned int curr_off = 0;
1475 
1476 	BUG_ON(!PageLocked(page));
1477 	if (!page_has_buffers(page))
1478 		goto out;
1479 
1480 	head = page_buffers(page);
1481 	bh = head;
1482 	do {
1483 		unsigned int next_off = curr_off + bh->b_size;
1484 		next = bh->b_this_page;
1485 
1486 		/*
1487 		 * is this block fully invalidated?
1488 		 */
1489 		if (offset <= curr_off)
1490 			discard_buffer(bh);
1491 		curr_off = next_off;
1492 		bh = next;
1493 	} while (bh != head);
1494 
1495 	/*
1496 	 * We release buffers only if the entire page is being invalidated.
1497 	 * The get_block cached value has been unconditionally invalidated,
1498 	 * so real IO is not possible anymore.
1499 	 */
1500 	if (offset == 0)
1501 		try_to_release_page(page, 0);
1502 out:
1503 	return;
1504 }
1505 EXPORT_SYMBOL(block_invalidatepage);
1506 
1507 /*
1508  * We attach and possibly dirty the buffers atomically wrt
1509  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1510  * is already excluded via the page lock.
1511  */
1512 void create_empty_buffers(struct page *page,
1513 			unsigned long blocksize, unsigned long b_state)
1514 {
1515 	struct buffer_head *bh, *head, *tail;
1516 
1517 	head = alloc_page_buffers(page, blocksize, 1);
1518 	bh = head;
1519 	do {
1520 		bh->b_state |= b_state;
1521 		tail = bh;
1522 		bh = bh->b_this_page;
1523 	} while (bh);
1524 	tail->b_this_page = head;
1525 
1526 	spin_lock(&page->mapping->private_lock);
1527 	if (PageUptodate(page) || PageDirty(page)) {
1528 		bh = head;
1529 		do {
1530 			if (PageDirty(page))
1531 				set_buffer_dirty(bh);
1532 			if (PageUptodate(page))
1533 				set_buffer_uptodate(bh);
1534 			bh = bh->b_this_page;
1535 		} while (bh != head);
1536 	}
1537 	attach_page_buffers(page, head);
1538 	spin_unlock(&page->mapping->private_lock);
1539 }
1540 EXPORT_SYMBOL(create_empty_buffers);
1541 
1542 /*
1543  * We are taking a block for data and we don't want any output from any
1544  * buffer-cache aliases starting from return from that function and
1545  * until the moment when something will explicitly mark the buffer
1546  * dirty (hopefully that will not happen until we will free that block ;-)
1547  * We don't even need to mark it not-uptodate - nobody can expect
1548  * anything from a newly allocated buffer anyway. We used to used
1549  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1550  * don't want to mark the alias unmapped, for example - it would confuse
1551  * anyone who might pick it with bread() afterwards...
1552  *
1553  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1554  * be writeout I/O going on against recently-freed buffers.  We don't
1555  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1556  * only if we really need to.  That happens here.
1557  */
1558 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1559 {
1560 	struct buffer_head *old_bh;
1561 
1562 	might_sleep();
1563 
1564 	old_bh = __find_get_block_slow(bdev, block);
1565 	if (old_bh) {
1566 		clear_buffer_dirty(old_bh);
1567 		wait_on_buffer(old_bh);
1568 		clear_buffer_req(old_bh);
1569 		__brelse(old_bh);
1570 	}
1571 }
1572 EXPORT_SYMBOL(unmap_underlying_metadata);
1573 
1574 /*
1575  * NOTE! All mapped/uptodate combinations are valid:
1576  *
1577  *	Mapped	Uptodate	Meaning
1578  *
1579  *	No	No		"unknown" - must do get_block()
1580  *	No	Yes		"hole" - zero-filled
1581  *	Yes	No		"allocated" - allocated on disk, not read in
1582  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1583  *
1584  * "Dirty" is valid only with the last case (mapped+uptodate).
1585  */
1586 
1587 /*
1588  * While block_write_full_page is writing back the dirty buffers under
1589  * the page lock, whoever dirtied the buffers may decide to clean them
1590  * again at any time.  We handle that by only looking at the buffer
1591  * state inside lock_buffer().
1592  *
1593  * If block_write_full_page() is called for regular writeback
1594  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1595  * locked buffer.   This only can happen if someone has written the buffer
1596  * directly, with submit_bh().  At the address_space level PageWriteback
1597  * prevents this contention from occurring.
1598  */
1599 static int __block_write_full_page(struct inode *inode, struct page *page,
1600 			get_block_t *get_block, struct writeback_control *wbc)
1601 {
1602 	int err;
1603 	sector_t block;
1604 	sector_t last_block;
1605 	struct buffer_head *bh, *head;
1606 	const unsigned blocksize = 1 << inode->i_blkbits;
1607 	int nr_underway = 0;
1608 
1609 	BUG_ON(!PageLocked(page));
1610 
1611 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1612 
1613 	if (!page_has_buffers(page)) {
1614 		create_empty_buffers(page, blocksize,
1615 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1616 	}
1617 
1618 	/*
1619 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1620 	 * here, and the (potentially unmapped) buffers may become dirty at
1621 	 * any time.  If a buffer becomes dirty here after we've inspected it
1622 	 * then we just miss that fact, and the page stays dirty.
1623 	 *
1624 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1625 	 * handle that here by just cleaning them.
1626 	 */
1627 
1628 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1629 	head = page_buffers(page);
1630 	bh = head;
1631 
1632 	/*
1633 	 * Get all the dirty buffers mapped to disk addresses and
1634 	 * handle any aliases from the underlying blockdev's mapping.
1635 	 */
1636 	do {
1637 		if (block > last_block) {
1638 			/*
1639 			 * mapped buffers outside i_size will occur, because
1640 			 * this page can be outside i_size when there is a
1641 			 * truncate in progress.
1642 			 */
1643 			/*
1644 			 * The buffer was zeroed by block_write_full_page()
1645 			 */
1646 			clear_buffer_dirty(bh);
1647 			set_buffer_uptodate(bh);
1648 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1649 			WARN_ON(bh->b_size != blocksize);
1650 			err = get_block(inode, block, bh, 1);
1651 			if (err)
1652 				goto recover;
1653 			if (buffer_new(bh)) {
1654 				/* blockdev mappings never come here */
1655 				clear_buffer_new(bh);
1656 				unmap_underlying_metadata(bh->b_bdev,
1657 							bh->b_blocknr);
1658 			}
1659 		}
1660 		bh = bh->b_this_page;
1661 		block++;
1662 	} while (bh != head);
1663 
1664 	do {
1665 		if (!buffer_mapped(bh))
1666 			continue;
1667 		/*
1668 		 * If it's a fully non-blocking write attempt and we cannot
1669 		 * lock the buffer then redirty the page.  Note that this can
1670 		 * potentially cause a busy-wait loop from pdflush and kswapd
1671 		 * activity, but those code paths have their own higher-level
1672 		 * throttling.
1673 		 */
1674 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1675 			lock_buffer(bh);
1676 		} else if (test_set_buffer_locked(bh)) {
1677 			redirty_page_for_writepage(wbc, page);
1678 			continue;
1679 		}
1680 		if (test_clear_buffer_dirty(bh)) {
1681 			mark_buffer_async_write(bh);
1682 		} else {
1683 			unlock_buffer(bh);
1684 		}
1685 	} while ((bh = bh->b_this_page) != head);
1686 
1687 	/*
1688 	 * The page and its buffers are protected by PageWriteback(), so we can
1689 	 * drop the bh refcounts early.
1690 	 */
1691 	BUG_ON(PageWriteback(page));
1692 	set_page_writeback(page);
1693 
1694 	do {
1695 		struct buffer_head *next = bh->b_this_page;
1696 		if (buffer_async_write(bh)) {
1697 			submit_bh(WRITE, bh);
1698 			nr_underway++;
1699 		}
1700 		bh = next;
1701 	} while (bh != head);
1702 	unlock_page(page);
1703 
1704 	err = 0;
1705 done:
1706 	if (nr_underway == 0) {
1707 		/*
1708 		 * The page was marked dirty, but the buffers were
1709 		 * clean.  Someone wrote them back by hand with
1710 		 * ll_rw_block/submit_bh.  A rare case.
1711 		 */
1712 		end_page_writeback(page);
1713 
1714 		/*
1715 		 * The page and buffer_heads can be released at any time from
1716 		 * here on.
1717 		 */
1718 		wbc->pages_skipped++;	/* We didn't write this page */
1719 	}
1720 	return err;
1721 
1722 recover:
1723 	/*
1724 	 * ENOSPC, or some other error.  We may already have added some
1725 	 * blocks to the file, so we need to write these out to avoid
1726 	 * exposing stale data.
1727 	 * The page is currently locked and not marked for writeback
1728 	 */
1729 	bh = head;
1730 	/* Recovery: lock and submit the mapped buffers */
1731 	do {
1732 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1733 			lock_buffer(bh);
1734 			mark_buffer_async_write(bh);
1735 		} else {
1736 			/*
1737 			 * The buffer may have been set dirty during
1738 			 * attachment to a dirty page.
1739 			 */
1740 			clear_buffer_dirty(bh);
1741 		}
1742 	} while ((bh = bh->b_this_page) != head);
1743 	SetPageError(page);
1744 	BUG_ON(PageWriteback(page));
1745 	mapping_set_error(page->mapping, err);
1746 	set_page_writeback(page);
1747 	do {
1748 		struct buffer_head *next = bh->b_this_page;
1749 		if (buffer_async_write(bh)) {
1750 			clear_buffer_dirty(bh);
1751 			submit_bh(WRITE, bh);
1752 			nr_underway++;
1753 		}
1754 		bh = next;
1755 	} while (bh != head);
1756 	unlock_page(page);
1757 	goto done;
1758 }
1759 
1760 static int __block_prepare_write(struct inode *inode, struct page *page,
1761 		unsigned from, unsigned to, get_block_t *get_block)
1762 {
1763 	unsigned block_start, block_end;
1764 	sector_t block;
1765 	int err = 0;
1766 	unsigned blocksize, bbits;
1767 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1768 
1769 	BUG_ON(!PageLocked(page));
1770 	BUG_ON(from > PAGE_CACHE_SIZE);
1771 	BUG_ON(to > PAGE_CACHE_SIZE);
1772 	BUG_ON(from > to);
1773 
1774 	blocksize = 1 << inode->i_blkbits;
1775 	if (!page_has_buffers(page))
1776 		create_empty_buffers(page, blocksize, 0);
1777 	head = page_buffers(page);
1778 
1779 	bbits = inode->i_blkbits;
1780 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1781 
1782 	for(bh = head, block_start = 0; bh != head || !block_start;
1783 	    block++, block_start=block_end, bh = bh->b_this_page) {
1784 		block_end = block_start + blocksize;
1785 		if (block_end <= from || block_start >= to) {
1786 			if (PageUptodate(page)) {
1787 				if (!buffer_uptodate(bh))
1788 					set_buffer_uptodate(bh);
1789 			}
1790 			continue;
1791 		}
1792 		if (buffer_new(bh))
1793 			clear_buffer_new(bh);
1794 		if (!buffer_mapped(bh)) {
1795 			WARN_ON(bh->b_size != blocksize);
1796 			err = get_block(inode, block, bh, 1);
1797 			if (err)
1798 				break;
1799 			if (buffer_new(bh)) {
1800 				unmap_underlying_metadata(bh->b_bdev,
1801 							bh->b_blocknr);
1802 				if (PageUptodate(page)) {
1803 					set_buffer_uptodate(bh);
1804 					continue;
1805 				}
1806 				if (block_end > to || block_start < from) {
1807 					void *kaddr;
1808 
1809 					kaddr = kmap_atomic(page, KM_USER0);
1810 					if (block_end > to)
1811 						memset(kaddr+to, 0,
1812 							block_end-to);
1813 					if (block_start < from)
1814 						memset(kaddr+block_start,
1815 							0, from-block_start);
1816 					flush_dcache_page(page);
1817 					kunmap_atomic(kaddr, KM_USER0);
1818 				}
1819 				continue;
1820 			}
1821 		}
1822 		if (PageUptodate(page)) {
1823 			if (!buffer_uptodate(bh))
1824 				set_buffer_uptodate(bh);
1825 			continue;
1826 		}
1827 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1828 		    !buffer_unwritten(bh) &&
1829 		     (block_start < from || block_end > to)) {
1830 			ll_rw_block(READ, 1, &bh);
1831 			*wait_bh++=bh;
1832 		}
1833 	}
1834 	/*
1835 	 * If we issued read requests - let them complete.
1836 	 */
1837 	while(wait_bh > wait) {
1838 		wait_on_buffer(*--wait_bh);
1839 		if (!buffer_uptodate(*wait_bh))
1840 			err = -EIO;
1841 	}
1842 	if (!err) {
1843 		bh = head;
1844 		do {
1845 			if (buffer_new(bh))
1846 				clear_buffer_new(bh);
1847 		} while ((bh = bh->b_this_page) != head);
1848 		return 0;
1849 	}
1850 	/* Error case: */
1851 	/*
1852 	 * Zero out any newly allocated blocks to avoid exposing stale
1853 	 * data.  If BH_New is set, we know that the block was newly
1854 	 * allocated in the above loop.
1855 	 */
1856 	bh = head;
1857 	block_start = 0;
1858 	do {
1859 		block_end = block_start+blocksize;
1860 		if (block_end <= from)
1861 			goto next_bh;
1862 		if (block_start >= to)
1863 			break;
1864 		if (buffer_new(bh)) {
1865 			clear_buffer_new(bh);
1866 			zero_user_page(page, block_start, bh->b_size, KM_USER0);
1867 			set_buffer_uptodate(bh);
1868 			mark_buffer_dirty(bh);
1869 		}
1870 next_bh:
1871 		block_start = block_end;
1872 		bh = bh->b_this_page;
1873 	} while (bh != head);
1874 	return err;
1875 }
1876 
1877 static int __block_commit_write(struct inode *inode, struct page *page,
1878 		unsigned from, unsigned to)
1879 {
1880 	unsigned block_start, block_end;
1881 	int partial = 0;
1882 	unsigned blocksize;
1883 	struct buffer_head *bh, *head;
1884 
1885 	blocksize = 1 << inode->i_blkbits;
1886 
1887 	for(bh = head = page_buffers(page), block_start = 0;
1888 	    bh != head || !block_start;
1889 	    block_start=block_end, bh = bh->b_this_page) {
1890 		block_end = block_start + blocksize;
1891 		if (block_end <= from || block_start >= to) {
1892 			if (!buffer_uptodate(bh))
1893 				partial = 1;
1894 		} else {
1895 			set_buffer_uptodate(bh);
1896 			mark_buffer_dirty(bh);
1897 		}
1898 	}
1899 
1900 	/*
1901 	 * If this is a partial write which happened to make all buffers
1902 	 * uptodate then we can optimize away a bogus readpage() for
1903 	 * the next read(). Here we 'discover' whether the page went
1904 	 * uptodate as a result of this (potentially partial) write.
1905 	 */
1906 	if (!partial)
1907 		SetPageUptodate(page);
1908 	return 0;
1909 }
1910 
1911 /*
1912  * Generic "read page" function for block devices that have the normal
1913  * get_block functionality. This is most of the block device filesystems.
1914  * Reads the page asynchronously --- the unlock_buffer() and
1915  * set/clear_buffer_uptodate() functions propagate buffer state into the
1916  * page struct once IO has completed.
1917  */
1918 int block_read_full_page(struct page *page, get_block_t *get_block)
1919 {
1920 	struct inode *inode = page->mapping->host;
1921 	sector_t iblock, lblock;
1922 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1923 	unsigned int blocksize;
1924 	int nr, i;
1925 	int fully_mapped = 1;
1926 
1927 	BUG_ON(!PageLocked(page));
1928 	blocksize = 1 << inode->i_blkbits;
1929 	if (!page_has_buffers(page))
1930 		create_empty_buffers(page, blocksize, 0);
1931 	head = page_buffers(page);
1932 
1933 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1934 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1935 	bh = head;
1936 	nr = 0;
1937 	i = 0;
1938 
1939 	do {
1940 		if (buffer_uptodate(bh))
1941 			continue;
1942 
1943 		if (!buffer_mapped(bh)) {
1944 			int err = 0;
1945 
1946 			fully_mapped = 0;
1947 			if (iblock < lblock) {
1948 				WARN_ON(bh->b_size != blocksize);
1949 				err = get_block(inode, iblock, bh, 0);
1950 				if (err)
1951 					SetPageError(page);
1952 			}
1953 			if (!buffer_mapped(bh)) {
1954 				zero_user_page(page, i * blocksize, blocksize,
1955 						KM_USER0);
1956 				if (!err)
1957 					set_buffer_uptodate(bh);
1958 				continue;
1959 			}
1960 			/*
1961 			 * get_block() might have updated the buffer
1962 			 * synchronously
1963 			 */
1964 			if (buffer_uptodate(bh))
1965 				continue;
1966 		}
1967 		arr[nr++] = bh;
1968 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
1969 
1970 	if (fully_mapped)
1971 		SetPageMappedToDisk(page);
1972 
1973 	if (!nr) {
1974 		/*
1975 		 * All buffers are uptodate - we can set the page uptodate
1976 		 * as well. But not if get_block() returned an error.
1977 		 */
1978 		if (!PageError(page))
1979 			SetPageUptodate(page);
1980 		unlock_page(page);
1981 		return 0;
1982 	}
1983 
1984 	/* Stage two: lock the buffers */
1985 	for (i = 0; i < nr; i++) {
1986 		bh = arr[i];
1987 		lock_buffer(bh);
1988 		mark_buffer_async_read(bh);
1989 	}
1990 
1991 	/*
1992 	 * Stage 3: start the IO.  Check for uptodateness
1993 	 * inside the buffer lock in case another process reading
1994 	 * the underlying blockdev brought it uptodate (the sct fix).
1995 	 */
1996 	for (i = 0; i < nr; i++) {
1997 		bh = arr[i];
1998 		if (buffer_uptodate(bh))
1999 			end_buffer_async_read(bh, 1);
2000 		else
2001 			submit_bh(READ, bh);
2002 	}
2003 	return 0;
2004 }
2005 
2006 /* utility function for filesystems that need to do work on expanding
2007  * truncates.  Uses prepare/commit_write to allow the filesystem to
2008  * deal with the hole.
2009  */
2010 static int __generic_cont_expand(struct inode *inode, loff_t size,
2011 				 pgoff_t index, unsigned int offset)
2012 {
2013 	struct address_space *mapping = inode->i_mapping;
2014 	struct page *page;
2015 	unsigned long limit;
2016 	int err;
2017 
2018 	err = -EFBIG;
2019         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2020 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2021 		send_sig(SIGXFSZ, current, 0);
2022 		goto out;
2023 	}
2024 	if (size > inode->i_sb->s_maxbytes)
2025 		goto out;
2026 
2027 	err = -ENOMEM;
2028 	page = grab_cache_page(mapping, index);
2029 	if (!page)
2030 		goto out;
2031 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2032 	if (err) {
2033 		/*
2034 		 * ->prepare_write() may have instantiated a few blocks
2035 		 * outside i_size.  Trim these off again.
2036 		 */
2037 		unlock_page(page);
2038 		page_cache_release(page);
2039 		vmtruncate(inode, inode->i_size);
2040 		goto out;
2041 	}
2042 
2043 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2044 
2045 	unlock_page(page);
2046 	page_cache_release(page);
2047 	if (err > 0)
2048 		err = 0;
2049 out:
2050 	return err;
2051 }
2052 
2053 int generic_cont_expand(struct inode *inode, loff_t size)
2054 {
2055 	pgoff_t index;
2056 	unsigned int offset;
2057 
2058 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2059 
2060 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
2061 	** skip the prepare.  make sure we never send an offset for the start
2062 	** of a block
2063 	*/
2064 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2065 		/* caller must handle this extra byte. */
2066 		offset++;
2067 	}
2068 	index = size >> PAGE_CACHE_SHIFT;
2069 
2070 	return __generic_cont_expand(inode, size, index, offset);
2071 }
2072 
2073 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2074 {
2075 	loff_t pos = size - 1;
2076 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2077 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2078 
2079 	/* prepare/commit_write can handle even if from==to==start of block. */
2080 	return __generic_cont_expand(inode, size, index, offset);
2081 }
2082 
2083 /*
2084  * For moronic filesystems that do not allow holes in file.
2085  * We may have to extend the file.
2086  */
2087 
2088 int cont_prepare_write(struct page *page, unsigned offset,
2089 		unsigned to, get_block_t *get_block, loff_t *bytes)
2090 {
2091 	struct address_space *mapping = page->mapping;
2092 	struct inode *inode = mapping->host;
2093 	struct page *new_page;
2094 	pgoff_t pgpos;
2095 	long status;
2096 	unsigned zerofrom;
2097 	unsigned blocksize = 1 << inode->i_blkbits;
2098 
2099 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2100 		status = -ENOMEM;
2101 		new_page = grab_cache_page(mapping, pgpos);
2102 		if (!new_page)
2103 			goto out;
2104 		/* we might sleep */
2105 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2106 			unlock_page(new_page);
2107 			page_cache_release(new_page);
2108 			continue;
2109 		}
2110 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2111 		if (zerofrom & (blocksize-1)) {
2112 			*bytes |= (blocksize-1);
2113 			(*bytes)++;
2114 		}
2115 		status = __block_prepare_write(inode, new_page, zerofrom,
2116 						PAGE_CACHE_SIZE, get_block);
2117 		if (status)
2118 			goto out_unmap;
2119 		zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
2120 				KM_USER0);
2121 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2122 		unlock_page(new_page);
2123 		page_cache_release(new_page);
2124 	}
2125 
2126 	if (page->index < pgpos) {
2127 		/* completely inside the area */
2128 		zerofrom = offset;
2129 	} else {
2130 		/* page covers the boundary, find the boundary offset */
2131 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2132 
2133 		/* if we will expand the thing last block will be filled */
2134 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
2135 			*bytes |= (blocksize-1);
2136 			(*bytes)++;
2137 		}
2138 
2139 		/* starting below the boundary? Nothing to zero out */
2140 		if (offset <= zerofrom)
2141 			zerofrom = offset;
2142 	}
2143 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2144 	if (status)
2145 		goto out1;
2146 	if (zerofrom < offset) {
2147 		zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
2148 		__block_commit_write(inode, page, zerofrom, offset);
2149 	}
2150 	return 0;
2151 out1:
2152 	ClearPageUptodate(page);
2153 	return status;
2154 
2155 out_unmap:
2156 	ClearPageUptodate(new_page);
2157 	unlock_page(new_page);
2158 	page_cache_release(new_page);
2159 out:
2160 	return status;
2161 }
2162 
2163 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2164 			get_block_t *get_block)
2165 {
2166 	struct inode *inode = page->mapping->host;
2167 	int err = __block_prepare_write(inode, page, from, to, get_block);
2168 	if (err)
2169 		ClearPageUptodate(page);
2170 	return err;
2171 }
2172 
2173 int block_commit_write(struct page *page, unsigned from, unsigned to)
2174 {
2175 	struct inode *inode = page->mapping->host;
2176 	__block_commit_write(inode,page,from,to);
2177 	return 0;
2178 }
2179 
2180 int generic_commit_write(struct file *file, struct page *page,
2181 		unsigned from, unsigned to)
2182 {
2183 	struct inode *inode = page->mapping->host;
2184 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2185 	__block_commit_write(inode,page,from,to);
2186 	/*
2187 	 * No need to use i_size_read() here, the i_size
2188 	 * cannot change under us because we hold i_mutex.
2189 	 */
2190 	if (pos > inode->i_size) {
2191 		i_size_write(inode, pos);
2192 		mark_inode_dirty(inode);
2193 	}
2194 	return 0;
2195 }
2196 
2197 /*
2198  * block_page_mkwrite() is not allowed to change the file size as it gets
2199  * called from a page fault handler when a page is first dirtied. Hence we must
2200  * be careful to check for EOF conditions here. We set the page up correctly
2201  * for a written page which means we get ENOSPC checking when writing into
2202  * holes and correct delalloc and unwritten extent mapping on filesystems that
2203  * support these features.
2204  *
2205  * We are not allowed to take the i_mutex here so we have to play games to
2206  * protect against truncate races as the page could now be beyond EOF.  Because
2207  * vmtruncate() writes the inode size before removing pages, once we have the
2208  * page lock we can determine safely if the page is beyond EOF. If it is not
2209  * beyond EOF, then the page is guaranteed safe against truncation until we
2210  * unlock the page.
2211  */
2212 int
2213 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2214 		   get_block_t get_block)
2215 {
2216 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2217 	unsigned long end;
2218 	loff_t size;
2219 	int ret = -EINVAL;
2220 
2221 	lock_page(page);
2222 	size = i_size_read(inode);
2223 	if ((page->mapping != inode->i_mapping) ||
2224 	    (page_offset(page) > size)) {
2225 		/* page got truncated out from underneath us */
2226 		goto out_unlock;
2227 	}
2228 
2229 	/* page is wholly or partially inside EOF */
2230 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2231 		end = size & ~PAGE_CACHE_MASK;
2232 	else
2233 		end = PAGE_CACHE_SIZE;
2234 
2235 	ret = block_prepare_write(page, 0, end, get_block);
2236 	if (!ret)
2237 		ret = block_commit_write(page, 0, end);
2238 
2239 out_unlock:
2240 	unlock_page(page);
2241 	return ret;
2242 }
2243 
2244 /*
2245  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2246  * immediately, while under the page lock.  So it needs a special end_io
2247  * handler which does not touch the bh after unlocking it.
2248  *
2249  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2250  * a race there is benign: unlock_buffer() only use the bh's address for
2251  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2252  * itself.
2253  */
2254 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2255 {
2256 	if (uptodate) {
2257 		set_buffer_uptodate(bh);
2258 	} else {
2259 		/* This happens, due to failed READA attempts. */
2260 		clear_buffer_uptodate(bh);
2261 	}
2262 	unlock_buffer(bh);
2263 }
2264 
2265 /*
2266  * On entry, the page is fully not uptodate.
2267  * On exit the page is fully uptodate in the areas outside (from,to)
2268  */
2269 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2270 			get_block_t *get_block)
2271 {
2272 	struct inode *inode = page->mapping->host;
2273 	const unsigned blkbits = inode->i_blkbits;
2274 	const unsigned blocksize = 1 << blkbits;
2275 	struct buffer_head map_bh;
2276 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2277 	unsigned block_in_page;
2278 	unsigned block_start;
2279 	sector_t block_in_file;
2280 	char *kaddr;
2281 	int nr_reads = 0;
2282 	int i;
2283 	int ret = 0;
2284 	int is_mapped_to_disk = 1;
2285 
2286 	if (PageMappedToDisk(page))
2287 		return 0;
2288 
2289 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2290 	map_bh.b_page = page;
2291 
2292 	/*
2293 	 * We loop across all blocks in the page, whether or not they are
2294 	 * part of the affected region.  This is so we can discover if the
2295 	 * page is fully mapped-to-disk.
2296 	 */
2297 	for (block_start = 0, block_in_page = 0;
2298 		  block_start < PAGE_CACHE_SIZE;
2299 		  block_in_page++, block_start += blocksize) {
2300 		unsigned block_end = block_start + blocksize;
2301 		int create;
2302 
2303 		map_bh.b_state = 0;
2304 		create = 1;
2305 		if (block_start >= to)
2306 			create = 0;
2307 		map_bh.b_size = blocksize;
2308 		ret = get_block(inode, block_in_file + block_in_page,
2309 					&map_bh, create);
2310 		if (ret)
2311 			goto failed;
2312 		if (!buffer_mapped(&map_bh))
2313 			is_mapped_to_disk = 0;
2314 		if (buffer_new(&map_bh))
2315 			unmap_underlying_metadata(map_bh.b_bdev,
2316 							map_bh.b_blocknr);
2317 		if (PageUptodate(page))
2318 			continue;
2319 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2320 			kaddr = kmap_atomic(page, KM_USER0);
2321 			if (block_start < from)
2322 				memset(kaddr+block_start, 0, from-block_start);
2323 			if (block_end > to)
2324 				memset(kaddr + to, 0, block_end - to);
2325 			flush_dcache_page(page);
2326 			kunmap_atomic(kaddr, KM_USER0);
2327 			continue;
2328 		}
2329 		if (buffer_uptodate(&map_bh))
2330 			continue;	/* reiserfs does this */
2331 		if (block_start < from || block_end > to) {
2332 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2333 
2334 			if (!bh) {
2335 				ret = -ENOMEM;
2336 				goto failed;
2337 			}
2338 			bh->b_state = map_bh.b_state;
2339 			atomic_set(&bh->b_count, 0);
2340 			bh->b_this_page = NULL;
2341 			bh->b_page = page;
2342 			bh->b_blocknr = map_bh.b_blocknr;
2343 			bh->b_size = blocksize;
2344 			bh->b_data = (char *)(long)block_start;
2345 			bh->b_bdev = map_bh.b_bdev;
2346 			bh->b_private = NULL;
2347 			read_bh[nr_reads++] = bh;
2348 		}
2349 	}
2350 
2351 	if (nr_reads) {
2352 		struct buffer_head *bh;
2353 
2354 		/*
2355 		 * The page is locked, so these buffers are protected from
2356 		 * any VM or truncate activity.  Hence we don't need to care
2357 		 * for the buffer_head refcounts.
2358 		 */
2359 		for (i = 0; i < nr_reads; i++) {
2360 			bh = read_bh[i];
2361 			lock_buffer(bh);
2362 			bh->b_end_io = end_buffer_read_nobh;
2363 			submit_bh(READ, bh);
2364 		}
2365 		for (i = 0; i < nr_reads; i++) {
2366 			bh = read_bh[i];
2367 			wait_on_buffer(bh);
2368 			if (!buffer_uptodate(bh))
2369 				ret = -EIO;
2370 			free_buffer_head(bh);
2371 			read_bh[i] = NULL;
2372 		}
2373 		if (ret)
2374 			goto failed;
2375 	}
2376 
2377 	if (is_mapped_to_disk)
2378 		SetPageMappedToDisk(page);
2379 
2380 	return 0;
2381 
2382 failed:
2383 	for (i = 0; i < nr_reads; i++) {
2384 		if (read_bh[i])
2385 			free_buffer_head(read_bh[i]);
2386 	}
2387 
2388 	/*
2389 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
2390 	 * so we'll later zero out any blocks which _were_ allocated.
2391 	 */
2392 	zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
2393 	SetPageUptodate(page);
2394 	set_page_dirty(page);
2395 	return ret;
2396 }
2397 EXPORT_SYMBOL(nobh_prepare_write);
2398 
2399 /*
2400  * Make sure any changes to nobh_commit_write() are reflected in
2401  * nobh_truncate_page(), since it doesn't call commit_write().
2402  */
2403 int nobh_commit_write(struct file *file, struct page *page,
2404 		unsigned from, unsigned to)
2405 {
2406 	struct inode *inode = page->mapping->host;
2407 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2408 
2409 	SetPageUptodate(page);
2410 	set_page_dirty(page);
2411 	if (pos > inode->i_size) {
2412 		i_size_write(inode, pos);
2413 		mark_inode_dirty(inode);
2414 	}
2415 	return 0;
2416 }
2417 EXPORT_SYMBOL(nobh_commit_write);
2418 
2419 /*
2420  * nobh_writepage() - based on block_full_write_page() except
2421  * that it tries to operate without attaching bufferheads to
2422  * the page.
2423  */
2424 int nobh_writepage(struct page *page, get_block_t *get_block,
2425 			struct writeback_control *wbc)
2426 {
2427 	struct inode * const inode = page->mapping->host;
2428 	loff_t i_size = i_size_read(inode);
2429 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2430 	unsigned offset;
2431 	int ret;
2432 
2433 	/* Is the page fully inside i_size? */
2434 	if (page->index < end_index)
2435 		goto out;
2436 
2437 	/* Is the page fully outside i_size? (truncate in progress) */
2438 	offset = i_size & (PAGE_CACHE_SIZE-1);
2439 	if (page->index >= end_index+1 || !offset) {
2440 		/*
2441 		 * The page may have dirty, unmapped buffers.  For example,
2442 		 * they may have been added in ext3_writepage().  Make them
2443 		 * freeable here, so the page does not leak.
2444 		 */
2445 #if 0
2446 		/* Not really sure about this  - do we need this ? */
2447 		if (page->mapping->a_ops->invalidatepage)
2448 			page->mapping->a_ops->invalidatepage(page, offset);
2449 #endif
2450 		unlock_page(page);
2451 		return 0; /* don't care */
2452 	}
2453 
2454 	/*
2455 	 * The page straddles i_size.  It must be zeroed out on each and every
2456 	 * writepage invocation because it may be mmapped.  "A file is mapped
2457 	 * in multiples of the page size.  For a file that is not a multiple of
2458 	 * the  page size, the remaining memory is zeroed when mapped, and
2459 	 * writes to that region are not written out to the file."
2460 	 */
2461 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2462 out:
2463 	ret = mpage_writepage(page, get_block, wbc);
2464 	if (ret == -EAGAIN)
2465 		ret = __block_write_full_page(inode, page, get_block, wbc);
2466 	return ret;
2467 }
2468 EXPORT_SYMBOL(nobh_writepage);
2469 
2470 /*
2471  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2472  */
2473 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2474 {
2475 	struct inode *inode = mapping->host;
2476 	unsigned blocksize = 1 << inode->i_blkbits;
2477 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2478 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2479 	unsigned to;
2480 	struct page *page;
2481 	const struct address_space_operations *a_ops = mapping->a_ops;
2482 	int ret = 0;
2483 
2484 	if ((offset & (blocksize - 1)) == 0)
2485 		goto out;
2486 
2487 	ret = -ENOMEM;
2488 	page = grab_cache_page(mapping, index);
2489 	if (!page)
2490 		goto out;
2491 
2492 	to = (offset + blocksize) & ~(blocksize - 1);
2493 	ret = a_ops->prepare_write(NULL, page, offset, to);
2494 	if (ret == 0) {
2495 		zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2496 				KM_USER0);
2497 		/*
2498 		 * It would be more correct to call aops->commit_write()
2499 		 * here, but this is more efficient.
2500 		 */
2501 		SetPageUptodate(page);
2502 		set_page_dirty(page);
2503 	}
2504 	unlock_page(page);
2505 	page_cache_release(page);
2506 out:
2507 	return ret;
2508 }
2509 EXPORT_SYMBOL(nobh_truncate_page);
2510 
2511 int block_truncate_page(struct address_space *mapping,
2512 			loff_t from, get_block_t *get_block)
2513 {
2514 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2515 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2516 	unsigned blocksize;
2517 	sector_t iblock;
2518 	unsigned length, pos;
2519 	struct inode *inode = mapping->host;
2520 	struct page *page;
2521 	struct buffer_head *bh;
2522 	int err;
2523 
2524 	blocksize = 1 << inode->i_blkbits;
2525 	length = offset & (blocksize - 1);
2526 
2527 	/* Block boundary? Nothing to do */
2528 	if (!length)
2529 		return 0;
2530 
2531 	length = blocksize - length;
2532 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2533 
2534 	page = grab_cache_page(mapping, index);
2535 	err = -ENOMEM;
2536 	if (!page)
2537 		goto out;
2538 
2539 	if (!page_has_buffers(page))
2540 		create_empty_buffers(page, blocksize, 0);
2541 
2542 	/* Find the buffer that contains "offset" */
2543 	bh = page_buffers(page);
2544 	pos = blocksize;
2545 	while (offset >= pos) {
2546 		bh = bh->b_this_page;
2547 		iblock++;
2548 		pos += blocksize;
2549 	}
2550 
2551 	err = 0;
2552 	if (!buffer_mapped(bh)) {
2553 		WARN_ON(bh->b_size != blocksize);
2554 		err = get_block(inode, iblock, bh, 0);
2555 		if (err)
2556 			goto unlock;
2557 		/* unmapped? It's a hole - nothing to do */
2558 		if (!buffer_mapped(bh))
2559 			goto unlock;
2560 	}
2561 
2562 	/* Ok, it's mapped. Make sure it's up-to-date */
2563 	if (PageUptodate(page))
2564 		set_buffer_uptodate(bh);
2565 
2566 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2567 		err = -EIO;
2568 		ll_rw_block(READ, 1, &bh);
2569 		wait_on_buffer(bh);
2570 		/* Uhhuh. Read error. Complain and punt. */
2571 		if (!buffer_uptodate(bh))
2572 			goto unlock;
2573 	}
2574 
2575 	zero_user_page(page, offset, length, KM_USER0);
2576 	mark_buffer_dirty(bh);
2577 	err = 0;
2578 
2579 unlock:
2580 	unlock_page(page);
2581 	page_cache_release(page);
2582 out:
2583 	return err;
2584 }
2585 
2586 /*
2587  * The generic ->writepage function for buffer-backed address_spaces
2588  */
2589 int block_write_full_page(struct page *page, get_block_t *get_block,
2590 			struct writeback_control *wbc)
2591 {
2592 	struct inode * const inode = page->mapping->host;
2593 	loff_t i_size = i_size_read(inode);
2594 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2595 	unsigned offset;
2596 
2597 	/* Is the page fully inside i_size? */
2598 	if (page->index < end_index)
2599 		return __block_write_full_page(inode, page, get_block, wbc);
2600 
2601 	/* Is the page fully outside i_size? (truncate in progress) */
2602 	offset = i_size & (PAGE_CACHE_SIZE-1);
2603 	if (page->index >= end_index+1 || !offset) {
2604 		/*
2605 		 * The page may have dirty, unmapped buffers.  For example,
2606 		 * they may have been added in ext3_writepage().  Make them
2607 		 * freeable here, so the page does not leak.
2608 		 */
2609 		do_invalidatepage(page, 0);
2610 		unlock_page(page);
2611 		return 0; /* don't care */
2612 	}
2613 
2614 	/*
2615 	 * The page straddles i_size.  It must be zeroed out on each and every
2616 	 * writepage invokation because it may be mmapped.  "A file is mapped
2617 	 * in multiples of the page size.  For a file that is not a multiple of
2618 	 * the  page size, the remaining memory is zeroed when mapped, and
2619 	 * writes to that region are not written out to the file."
2620 	 */
2621 	zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2622 	return __block_write_full_page(inode, page, get_block, wbc);
2623 }
2624 
2625 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2626 			    get_block_t *get_block)
2627 {
2628 	struct buffer_head tmp;
2629 	struct inode *inode = mapping->host;
2630 	tmp.b_state = 0;
2631 	tmp.b_blocknr = 0;
2632 	tmp.b_size = 1 << inode->i_blkbits;
2633 	get_block(inode, block, &tmp, 0);
2634 	return tmp.b_blocknr;
2635 }
2636 
2637 static void end_bio_bh_io_sync(struct bio *bio, int err)
2638 {
2639 	struct buffer_head *bh = bio->bi_private;
2640 
2641 	if (err == -EOPNOTSUPP) {
2642 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2643 		set_bit(BH_Eopnotsupp, &bh->b_state);
2644 	}
2645 
2646 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2647 	bio_put(bio);
2648 }
2649 
2650 int submit_bh(int rw, struct buffer_head * bh)
2651 {
2652 	struct bio *bio;
2653 	int ret = 0;
2654 
2655 	BUG_ON(!buffer_locked(bh));
2656 	BUG_ON(!buffer_mapped(bh));
2657 	BUG_ON(!bh->b_end_io);
2658 
2659 	if (buffer_ordered(bh) && (rw == WRITE))
2660 		rw = WRITE_BARRIER;
2661 
2662 	/*
2663 	 * Only clear out a write error when rewriting, should this
2664 	 * include WRITE_SYNC as well?
2665 	 */
2666 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2667 		clear_buffer_write_io_error(bh);
2668 
2669 	/*
2670 	 * from here on down, it's all bio -- do the initial mapping,
2671 	 * submit_bio -> generic_make_request may further map this bio around
2672 	 */
2673 	bio = bio_alloc(GFP_NOIO, 1);
2674 
2675 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2676 	bio->bi_bdev = bh->b_bdev;
2677 	bio->bi_io_vec[0].bv_page = bh->b_page;
2678 	bio->bi_io_vec[0].bv_len = bh->b_size;
2679 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2680 
2681 	bio->bi_vcnt = 1;
2682 	bio->bi_idx = 0;
2683 	bio->bi_size = bh->b_size;
2684 
2685 	bio->bi_end_io = end_bio_bh_io_sync;
2686 	bio->bi_private = bh;
2687 
2688 	bio_get(bio);
2689 	submit_bio(rw, bio);
2690 
2691 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2692 		ret = -EOPNOTSUPP;
2693 
2694 	bio_put(bio);
2695 	return ret;
2696 }
2697 
2698 /**
2699  * ll_rw_block: low-level access to block devices (DEPRECATED)
2700  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2701  * @nr: number of &struct buffer_heads in the array
2702  * @bhs: array of pointers to &struct buffer_head
2703  *
2704  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2705  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2706  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2707  * are sent to disk. The fourth %READA option is described in the documentation
2708  * for generic_make_request() which ll_rw_block() calls.
2709  *
2710  * This function drops any buffer that it cannot get a lock on (with the
2711  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2712  * clean when doing a write request, and any buffer that appears to be
2713  * up-to-date when doing read request.  Further it marks as clean buffers that
2714  * are processed for writing (the buffer cache won't assume that they are
2715  * actually clean until the buffer gets unlocked).
2716  *
2717  * ll_rw_block sets b_end_io to simple completion handler that marks
2718  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2719  * any waiters.
2720  *
2721  * All of the buffers must be for the same device, and must also be a
2722  * multiple of the current approved size for the device.
2723  */
2724 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2725 {
2726 	int i;
2727 
2728 	for (i = 0; i < nr; i++) {
2729 		struct buffer_head *bh = bhs[i];
2730 
2731 		if (rw == SWRITE)
2732 			lock_buffer(bh);
2733 		else if (test_set_buffer_locked(bh))
2734 			continue;
2735 
2736 		if (rw == WRITE || rw == SWRITE) {
2737 			if (test_clear_buffer_dirty(bh)) {
2738 				bh->b_end_io = end_buffer_write_sync;
2739 				get_bh(bh);
2740 				submit_bh(WRITE, bh);
2741 				continue;
2742 			}
2743 		} else {
2744 			if (!buffer_uptodate(bh)) {
2745 				bh->b_end_io = end_buffer_read_sync;
2746 				get_bh(bh);
2747 				submit_bh(rw, bh);
2748 				continue;
2749 			}
2750 		}
2751 		unlock_buffer(bh);
2752 	}
2753 }
2754 
2755 /*
2756  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2757  * and then start new I/O and then wait upon it.  The caller must have a ref on
2758  * the buffer_head.
2759  */
2760 int sync_dirty_buffer(struct buffer_head *bh)
2761 {
2762 	int ret = 0;
2763 
2764 	WARN_ON(atomic_read(&bh->b_count) < 1);
2765 	lock_buffer(bh);
2766 	if (test_clear_buffer_dirty(bh)) {
2767 		get_bh(bh);
2768 		bh->b_end_io = end_buffer_write_sync;
2769 		ret = submit_bh(WRITE, bh);
2770 		wait_on_buffer(bh);
2771 		if (buffer_eopnotsupp(bh)) {
2772 			clear_buffer_eopnotsupp(bh);
2773 			ret = -EOPNOTSUPP;
2774 		}
2775 		if (!ret && !buffer_uptodate(bh))
2776 			ret = -EIO;
2777 	} else {
2778 		unlock_buffer(bh);
2779 	}
2780 	return ret;
2781 }
2782 
2783 /*
2784  * try_to_free_buffers() checks if all the buffers on this particular page
2785  * are unused, and releases them if so.
2786  *
2787  * Exclusion against try_to_free_buffers may be obtained by either
2788  * locking the page or by holding its mapping's private_lock.
2789  *
2790  * If the page is dirty but all the buffers are clean then we need to
2791  * be sure to mark the page clean as well.  This is because the page
2792  * may be against a block device, and a later reattachment of buffers
2793  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2794  * filesystem data on the same device.
2795  *
2796  * The same applies to regular filesystem pages: if all the buffers are
2797  * clean then we set the page clean and proceed.  To do that, we require
2798  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2799  * private_lock.
2800  *
2801  * try_to_free_buffers() is non-blocking.
2802  */
2803 static inline int buffer_busy(struct buffer_head *bh)
2804 {
2805 	return atomic_read(&bh->b_count) |
2806 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2807 }
2808 
2809 static int
2810 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2811 {
2812 	struct buffer_head *head = page_buffers(page);
2813 	struct buffer_head *bh;
2814 
2815 	bh = head;
2816 	do {
2817 		if (buffer_write_io_error(bh) && page->mapping)
2818 			set_bit(AS_EIO, &page->mapping->flags);
2819 		if (buffer_busy(bh))
2820 			goto failed;
2821 		bh = bh->b_this_page;
2822 	} while (bh != head);
2823 
2824 	do {
2825 		struct buffer_head *next = bh->b_this_page;
2826 
2827 		if (!list_empty(&bh->b_assoc_buffers))
2828 			__remove_assoc_queue(bh);
2829 		bh = next;
2830 	} while (bh != head);
2831 	*buffers_to_free = head;
2832 	__clear_page_buffers(page);
2833 	return 1;
2834 failed:
2835 	return 0;
2836 }
2837 
2838 int try_to_free_buffers(struct page *page)
2839 {
2840 	struct address_space * const mapping = page->mapping;
2841 	struct buffer_head *buffers_to_free = NULL;
2842 	int ret = 0;
2843 
2844 	BUG_ON(!PageLocked(page));
2845 	if (PageWriteback(page))
2846 		return 0;
2847 
2848 	if (mapping == NULL) {		/* can this still happen? */
2849 		ret = drop_buffers(page, &buffers_to_free);
2850 		goto out;
2851 	}
2852 
2853 	spin_lock(&mapping->private_lock);
2854 	ret = drop_buffers(page, &buffers_to_free);
2855 
2856 	/*
2857 	 * If the filesystem writes its buffers by hand (eg ext3)
2858 	 * then we can have clean buffers against a dirty page.  We
2859 	 * clean the page here; otherwise the VM will never notice
2860 	 * that the filesystem did any IO at all.
2861 	 *
2862 	 * Also, during truncate, discard_buffer will have marked all
2863 	 * the page's buffers clean.  We discover that here and clean
2864 	 * the page also.
2865 	 *
2866 	 * private_lock must be held over this entire operation in order
2867 	 * to synchronise against __set_page_dirty_buffers and prevent the
2868 	 * dirty bit from being lost.
2869 	 */
2870 	if (ret)
2871 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
2872 	spin_unlock(&mapping->private_lock);
2873 out:
2874 	if (buffers_to_free) {
2875 		struct buffer_head *bh = buffers_to_free;
2876 
2877 		do {
2878 			struct buffer_head *next = bh->b_this_page;
2879 			free_buffer_head(bh);
2880 			bh = next;
2881 		} while (bh != buffers_to_free);
2882 	}
2883 	return ret;
2884 }
2885 EXPORT_SYMBOL(try_to_free_buffers);
2886 
2887 void block_sync_page(struct page *page)
2888 {
2889 	struct address_space *mapping;
2890 
2891 	smp_mb();
2892 	mapping = page_mapping(page);
2893 	if (mapping)
2894 		blk_run_backing_dev(mapping->backing_dev_info, page);
2895 }
2896 
2897 /*
2898  * There are no bdflush tunables left.  But distributions are
2899  * still running obsolete flush daemons, so we terminate them here.
2900  *
2901  * Use of bdflush() is deprecated and will be removed in a future kernel.
2902  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2903  */
2904 asmlinkage long sys_bdflush(int func, long data)
2905 {
2906 	static int msg_count;
2907 
2908 	if (!capable(CAP_SYS_ADMIN))
2909 		return -EPERM;
2910 
2911 	if (msg_count < 5) {
2912 		msg_count++;
2913 		printk(KERN_INFO
2914 			"warning: process `%s' used the obsolete bdflush"
2915 			" system call\n", current->comm);
2916 		printk(KERN_INFO "Fix your initscripts?\n");
2917 	}
2918 
2919 	if (func == 1)
2920 		do_exit(0);
2921 	return 0;
2922 }
2923 
2924 /*
2925  * Buffer-head allocation
2926  */
2927 static struct kmem_cache *bh_cachep;
2928 
2929 /*
2930  * Once the number of bh's in the machine exceeds this level, we start
2931  * stripping them in writeback.
2932  */
2933 static int max_buffer_heads;
2934 
2935 int buffer_heads_over_limit;
2936 
2937 struct bh_accounting {
2938 	int nr;			/* Number of live bh's */
2939 	int ratelimit;		/* Limit cacheline bouncing */
2940 };
2941 
2942 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2943 
2944 static void recalc_bh_state(void)
2945 {
2946 	int i;
2947 	int tot = 0;
2948 
2949 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2950 		return;
2951 	__get_cpu_var(bh_accounting).ratelimit = 0;
2952 	for_each_online_cpu(i)
2953 		tot += per_cpu(bh_accounting, i).nr;
2954 	buffer_heads_over_limit = (tot > max_buffer_heads);
2955 }
2956 
2957 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2958 {
2959 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2960 	if (ret) {
2961 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2962 		get_cpu_var(bh_accounting).nr++;
2963 		recalc_bh_state();
2964 		put_cpu_var(bh_accounting);
2965 	}
2966 	return ret;
2967 }
2968 EXPORT_SYMBOL(alloc_buffer_head);
2969 
2970 void free_buffer_head(struct buffer_head *bh)
2971 {
2972 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
2973 	kmem_cache_free(bh_cachep, bh);
2974 	get_cpu_var(bh_accounting).nr--;
2975 	recalc_bh_state();
2976 	put_cpu_var(bh_accounting);
2977 }
2978 EXPORT_SYMBOL(free_buffer_head);
2979 
2980 static void buffer_exit_cpu(int cpu)
2981 {
2982 	int i;
2983 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2984 
2985 	for (i = 0; i < BH_LRU_SIZE; i++) {
2986 		brelse(b->bhs[i]);
2987 		b->bhs[i] = NULL;
2988 	}
2989 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2990 	per_cpu(bh_accounting, cpu).nr = 0;
2991 	put_cpu_var(bh_accounting);
2992 }
2993 
2994 static int buffer_cpu_notify(struct notifier_block *self,
2995 			      unsigned long action, void *hcpu)
2996 {
2997 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
2998 		buffer_exit_cpu((unsigned long)hcpu);
2999 	return NOTIFY_OK;
3000 }
3001 
3002 void __init buffer_init(void)
3003 {
3004 	int nrpages;
3005 
3006 	bh_cachep = KMEM_CACHE(buffer_head,
3007 			SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3008 
3009 	/*
3010 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3011 	 */
3012 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3013 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3014 	hotcpu_notifier(buffer_cpu_notify, 0);
3015 }
3016 
3017 EXPORT_SYMBOL(__bforget);
3018 EXPORT_SYMBOL(__brelse);
3019 EXPORT_SYMBOL(__wait_on_buffer);
3020 EXPORT_SYMBOL(block_commit_write);
3021 EXPORT_SYMBOL(block_prepare_write);
3022 EXPORT_SYMBOL(block_page_mkwrite);
3023 EXPORT_SYMBOL(block_read_full_page);
3024 EXPORT_SYMBOL(block_sync_page);
3025 EXPORT_SYMBOL(block_truncate_page);
3026 EXPORT_SYMBOL(block_write_full_page);
3027 EXPORT_SYMBOL(cont_prepare_write);
3028 EXPORT_SYMBOL(end_buffer_read_sync);
3029 EXPORT_SYMBOL(end_buffer_write_sync);
3030 EXPORT_SYMBOL(file_fsync);
3031 EXPORT_SYMBOL(fsync_bdev);
3032 EXPORT_SYMBOL(generic_block_bmap);
3033 EXPORT_SYMBOL(generic_commit_write);
3034 EXPORT_SYMBOL(generic_cont_expand);
3035 EXPORT_SYMBOL(generic_cont_expand_simple);
3036 EXPORT_SYMBOL(init_buffer);
3037 EXPORT_SYMBOL(invalidate_bdev);
3038 EXPORT_SYMBOL(ll_rw_block);
3039 EXPORT_SYMBOL(mark_buffer_dirty);
3040 EXPORT_SYMBOL(submit_bh);
3041 EXPORT_SYMBOL(sync_dirty_buffer);
3042 EXPORT_SYMBOL(unlock_buffer);
3043