xref: /linux/fs/buffer.c (revision f99cb7a43c5cca1813a97312487acf7a0f88ee2a)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	smp_mb__before_clear_bit();
80 	clear_buffer_locked(bh);
81 	smp_mb__after_clear_bit();
82 	wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94 
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98 	ClearPagePrivate(page);
99 	set_page_private(page, 0);
100 	page_cache_release(page);
101 }
102 
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105 	char b[BDEVNAME_SIZE];
106 
107 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 			bdevname(bh->b_bdev, b),
109 			(unsigned long long)bh->b_blocknr);
110 }
111 
112 /*
113  * End-of-IO handler helper function which does not touch the bh after
114  * unlocking it.
115  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116  * a race there is benign: unlock_buffer() only use the bh's address for
117  * hashing after unlocking the buffer, so it doesn't actually touch the bh
118  * itself.
119  */
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
121 {
122 	if (uptodate) {
123 		set_buffer_uptodate(bh);
124 	} else {
125 		/* This happens, due to failed READA attempts. */
126 		clear_buffer_uptodate(bh);
127 	}
128 	unlock_buffer(bh);
129 }
130 
131 /*
132  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
133  * unlock the buffer. This is what ll_rw_block uses too.
134  */
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136 {
137 	__end_buffer_read_notouch(bh, uptodate);
138 	put_bh(bh);
139 }
140 
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142 {
143 	char b[BDEVNAME_SIZE];
144 
145 	if (uptodate) {
146 		set_buffer_uptodate(bh);
147 	} else {
148 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 			buffer_io_error(bh);
150 			printk(KERN_WARNING "lost page write due to "
151 					"I/O error on %s\n",
152 				       bdevname(bh->b_bdev, b));
153 		}
154 		set_buffer_write_io_error(bh);
155 		clear_buffer_uptodate(bh);
156 	}
157 	unlock_buffer(bh);
158 	put_bh(bh);
159 }
160 
161 /*
162  * Write out and wait upon all the dirty data associated with a block
163  * device via its mapping.  Does not take the superblock lock.
164  */
165 int sync_blockdev(struct block_device *bdev)
166 {
167 	int ret = 0;
168 
169 	if (bdev)
170 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
171 	return ret;
172 }
173 EXPORT_SYMBOL(sync_blockdev);
174 
175 /*
176  * Write out and wait upon all dirty data associated with this
177  * device.   Filesystem data as well as the underlying block
178  * device.  Takes the superblock lock.
179  */
180 int fsync_bdev(struct block_device *bdev)
181 {
182 	struct super_block *sb = get_super(bdev);
183 	if (sb) {
184 		int res = fsync_super(sb);
185 		drop_super(sb);
186 		return res;
187 	}
188 	return sync_blockdev(bdev);
189 }
190 
191 /**
192  * freeze_bdev  --  lock a filesystem and force it into a consistent state
193  * @bdev:	blockdevice to lock
194  *
195  * This takes the block device bd_mount_sem to make sure no new mounts
196  * happen on bdev until thaw_bdev() is called.
197  * If a superblock is found on this device, we take the s_umount semaphore
198  * on it to make sure nobody unmounts until the snapshot creation is done.
199  */
200 struct super_block *freeze_bdev(struct block_device *bdev)
201 {
202 	struct super_block *sb;
203 
204 	down(&bdev->bd_mount_sem);
205 	sb = get_super(bdev);
206 	if (sb && !(sb->s_flags & MS_RDONLY)) {
207 		sb->s_frozen = SB_FREEZE_WRITE;
208 		smp_wmb();
209 
210 		__fsync_super(sb);
211 
212 		sb->s_frozen = SB_FREEZE_TRANS;
213 		smp_wmb();
214 
215 		sync_blockdev(sb->s_bdev);
216 
217 		if (sb->s_op->write_super_lockfs)
218 			sb->s_op->write_super_lockfs(sb);
219 	}
220 
221 	sync_blockdev(bdev);
222 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
223 }
224 EXPORT_SYMBOL(freeze_bdev);
225 
226 /**
227  * thaw_bdev  -- unlock filesystem
228  * @bdev:	blockdevice to unlock
229  * @sb:		associated superblock
230  *
231  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232  */
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234 {
235 	if (sb) {
236 		BUG_ON(sb->s_bdev != bdev);
237 
238 		if (sb->s_op->unlockfs)
239 			sb->s_op->unlockfs(sb);
240 		sb->s_frozen = SB_UNFROZEN;
241 		smp_wmb();
242 		wake_up(&sb->s_wait_unfrozen);
243 		drop_super(sb);
244 	}
245 
246 	up(&bdev->bd_mount_sem);
247 }
248 EXPORT_SYMBOL(thaw_bdev);
249 
250 /*
251  * Various filesystems appear to want __find_get_block to be non-blocking.
252  * But it's the page lock which protects the buffers.  To get around this,
253  * we get exclusion from try_to_free_buffers with the blockdev mapping's
254  * private_lock.
255  *
256  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257  * may be quite high.  This code could TryLock the page, and if that
258  * succeeds, there is no need to take private_lock. (But if
259  * private_lock is contended then so is mapping->tree_lock).
260  */
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
263 {
264 	struct inode *bd_inode = bdev->bd_inode;
265 	struct address_space *bd_mapping = bd_inode->i_mapping;
266 	struct buffer_head *ret = NULL;
267 	pgoff_t index;
268 	struct buffer_head *bh;
269 	struct buffer_head *head;
270 	struct page *page;
271 	int all_mapped = 1;
272 
273 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 	page = find_get_page(bd_mapping, index);
275 	if (!page)
276 		goto out;
277 
278 	spin_lock(&bd_mapping->private_lock);
279 	if (!page_has_buffers(page))
280 		goto out_unlock;
281 	head = page_buffers(page);
282 	bh = head;
283 	do {
284 		if (bh->b_blocknr == block) {
285 			ret = bh;
286 			get_bh(bh);
287 			goto out_unlock;
288 		}
289 		if (!buffer_mapped(bh))
290 			all_mapped = 0;
291 		bh = bh->b_this_page;
292 	} while (bh != head);
293 
294 	/* we might be here because some of the buffers on this page are
295 	 * not mapped.  This is due to various races between
296 	 * file io on the block device and getblk.  It gets dealt with
297 	 * elsewhere, don't buffer_error if we had some unmapped buffers
298 	 */
299 	if (all_mapped) {
300 		printk("__find_get_block_slow() failed. "
301 			"block=%llu, b_blocknr=%llu\n",
302 			(unsigned long long)block,
303 			(unsigned long long)bh->b_blocknr);
304 		printk("b_state=0x%08lx, b_size=%zu\n",
305 			bh->b_state, bh->b_size);
306 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 	}
308 out_unlock:
309 	spin_unlock(&bd_mapping->private_lock);
310 	page_cache_release(page);
311 out:
312 	return ret;
313 }
314 
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316    of fs corruption is going on. Trashing dirty data always imply losing
317    information that was supposed to be just stored on the physical layer
318    by the user.
319 
320    Thus invalidate_buffers in general usage is not allwowed to trash
321    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322    be preserved.  These buffers are simply skipped.
323 
324    We also skip buffers which are still in use.  For example this can
325    happen if a userspace program is reading the block device.
326 
327    NOTE: In the case where the user removed a removable-media-disk even if
328    there's still dirty data not synced on disk (due a bug in the device driver
329    or due an error of the user), by not destroying the dirty buffers we could
330    generate corruption also on the next media inserted, thus a parameter is
331    necessary to handle this case in the most safe way possible (trying
332    to not corrupt also the new disk inserted with the data belonging to
333    the old now corrupted disk). Also for the ramdisk the natural thing
334    to do in order to release the ramdisk memory is to destroy dirty buffers.
335 
336    These are two special cases. Normal usage imply the device driver
337    to issue a sync on the device (without waiting I/O completion) and
338    then an invalidate_buffers call that doesn't trash dirty buffers.
339 
340    For handling cache coherency with the blkdev pagecache the 'update' case
341    is been introduced. It is needed to re-read from disk any pinned
342    buffer. NOTE: re-reading from disk is destructive so we can do it only
343    when we assume nobody is changing the buffercache under our I/O and when
344    we think the disk contains more recent information than the buffercache.
345    The update == 1 pass marks the buffers we need to update, the update == 2
346    pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
348 {
349 	struct address_space *mapping = bdev->bd_inode->i_mapping;
350 
351 	if (mapping->nrpages == 0)
352 		return;
353 
354 	invalidate_bh_lrus();
355 	invalidate_mapping_pages(mapping, 0, -1);
356 }
357 
358 /*
359  * Kick pdflush then try to free up some ZONE_NORMAL memory.
360  */
361 static void free_more_memory(void)
362 {
363 	struct zone **zones;
364 	pg_data_t *pgdat;
365 
366 	wakeup_pdflush(1024);
367 	yield();
368 
369 	for_each_online_pgdat(pgdat) {
370 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
371 		if (*zones)
372 			try_to_free_pages(zones, 0, GFP_NOFS);
373 	}
374 }
375 
376 /*
377  * I/O completion handler for block_read_full_page() - pages
378  * which come unlocked at the end of I/O.
379  */
380 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
381 {
382 	unsigned long flags;
383 	struct buffer_head *first;
384 	struct buffer_head *tmp;
385 	struct page *page;
386 	int page_uptodate = 1;
387 
388 	BUG_ON(!buffer_async_read(bh));
389 
390 	page = bh->b_page;
391 	if (uptodate) {
392 		set_buffer_uptodate(bh);
393 	} else {
394 		clear_buffer_uptodate(bh);
395 		if (printk_ratelimit())
396 			buffer_io_error(bh);
397 		SetPageError(page);
398 	}
399 
400 	/*
401 	 * Be _very_ careful from here on. Bad things can happen if
402 	 * two buffer heads end IO at almost the same time and both
403 	 * decide that the page is now completely done.
404 	 */
405 	first = page_buffers(page);
406 	local_irq_save(flags);
407 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
408 	clear_buffer_async_read(bh);
409 	unlock_buffer(bh);
410 	tmp = bh;
411 	do {
412 		if (!buffer_uptodate(tmp))
413 			page_uptodate = 0;
414 		if (buffer_async_read(tmp)) {
415 			BUG_ON(!buffer_locked(tmp));
416 			goto still_busy;
417 		}
418 		tmp = tmp->b_this_page;
419 	} while (tmp != bh);
420 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 	local_irq_restore(flags);
422 
423 	/*
424 	 * If none of the buffers had errors and they are all
425 	 * uptodate then we can set the page uptodate.
426 	 */
427 	if (page_uptodate && !PageError(page))
428 		SetPageUptodate(page);
429 	unlock_page(page);
430 	return;
431 
432 still_busy:
433 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 	local_irq_restore(flags);
435 	return;
436 }
437 
438 /*
439  * Completion handler for block_write_full_page() - pages which are unlocked
440  * during I/O, and which have PageWriteback cleared upon I/O completion.
441  */
442 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
443 {
444 	char b[BDEVNAME_SIZE];
445 	unsigned long flags;
446 	struct buffer_head *first;
447 	struct buffer_head *tmp;
448 	struct page *page;
449 
450 	BUG_ON(!buffer_async_write(bh));
451 
452 	page = bh->b_page;
453 	if (uptodate) {
454 		set_buffer_uptodate(bh);
455 	} else {
456 		if (printk_ratelimit()) {
457 			buffer_io_error(bh);
458 			printk(KERN_WARNING "lost page write due to "
459 					"I/O error on %s\n",
460 			       bdevname(bh->b_bdev, b));
461 		}
462 		set_bit(AS_EIO, &page->mapping->flags);
463 		set_buffer_write_io_error(bh);
464 		clear_buffer_uptodate(bh);
465 		SetPageError(page);
466 	}
467 
468 	first = page_buffers(page);
469 	local_irq_save(flags);
470 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471 
472 	clear_buffer_async_write(bh);
473 	unlock_buffer(bh);
474 	tmp = bh->b_this_page;
475 	while (tmp != bh) {
476 		if (buffer_async_write(tmp)) {
477 			BUG_ON(!buffer_locked(tmp));
478 			goto still_busy;
479 		}
480 		tmp = tmp->b_this_page;
481 	}
482 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 	local_irq_restore(flags);
484 	end_page_writeback(page);
485 	return;
486 
487 still_busy:
488 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 	local_irq_restore(flags);
490 	return;
491 }
492 
493 /*
494  * If a page's buffers are under async readin (end_buffer_async_read
495  * completion) then there is a possibility that another thread of
496  * control could lock one of the buffers after it has completed
497  * but while some of the other buffers have not completed.  This
498  * locked buffer would confuse end_buffer_async_read() into not unlocking
499  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
500  * that this buffer is not under async I/O.
501  *
502  * The page comes unlocked when it has no locked buffer_async buffers
503  * left.
504  *
505  * PageLocked prevents anyone starting new async I/O reads any of
506  * the buffers.
507  *
508  * PageWriteback is used to prevent simultaneous writeout of the same
509  * page.
510  *
511  * PageLocked prevents anyone from starting writeback of a page which is
512  * under read I/O (PageWriteback is only ever set against a locked page).
513  */
514 static void mark_buffer_async_read(struct buffer_head *bh)
515 {
516 	bh->b_end_io = end_buffer_async_read;
517 	set_buffer_async_read(bh);
518 }
519 
520 void mark_buffer_async_write(struct buffer_head *bh)
521 {
522 	bh->b_end_io = end_buffer_async_write;
523 	set_buffer_async_write(bh);
524 }
525 EXPORT_SYMBOL(mark_buffer_async_write);
526 
527 
528 /*
529  * fs/buffer.c contains helper functions for buffer-backed address space's
530  * fsync functions.  A common requirement for buffer-based filesystems is
531  * that certain data from the backing blockdev needs to be written out for
532  * a successful fsync().  For example, ext2 indirect blocks need to be
533  * written back and waited upon before fsync() returns.
534  *
535  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537  * management of a list of dependent buffers at ->i_mapping->private_list.
538  *
539  * Locking is a little subtle: try_to_free_buffers() will remove buffers
540  * from their controlling inode's queue when they are being freed.  But
541  * try_to_free_buffers() will be operating against the *blockdev* mapping
542  * at the time, not against the S_ISREG file which depends on those buffers.
543  * So the locking for private_list is via the private_lock in the address_space
544  * which backs the buffers.  Which is different from the address_space
545  * against which the buffers are listed.  So for a particular address_space,
546  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
547  * mapping->private_list will always be protected by the backing blockdev's
548  * ->private_lock.
549  *
550  * Which introduces a requirement: all buffers on an address_space's
551  * ->private_list must be from the same address_space: the blockdev's.
552  *
553  * address_spaces which do not place buffers at ->private_list via these
554  * utility functions are free to use private_lock and private_list for
555  * whatever they want.  The only requirement is that list_empty(private_list)
556  * be true at clear_inode() time.
557  *
558  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
559  * filesystems should do that.  invalidate_inode_buffers() should just go
560  * BUG_ON(!list_empty).
561  *
562  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
563  * take an address_space, not an inode.  And it should be called
564  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565  * queued up.
566  *
567  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568  * list if it is already on a list.  Because if the buffer is on a list,
569  * it *must* already be on the right one.  If not, the filesystem is being
570  * silly.  This will save a ton of locking.  But first we have to ensure
571  * that buffers are taken *off* the old inode's list when they are freed
572  * (presumably in truncate).  That requires careful auditing of all
573  * filesystems (do it inside bforget()).  It could also be done by bringing
574  * b_inode back.
575  */
576 
577 /*
578  * The buffer's backing address_space's private_lock must be held
579  */
580 static inline void __remove_assoc_queue(struct buffer_head *bh)
581 {
582 	list_del_init(&bh->b_assoc_buffers);
583 	WARN_ON(!bh->b_assoc_map);
584 	if (buffer_write_io_error(bh))
585 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 	bh->b_assoc_map = NULL;
587 }
588 
589 int inode_has_buffers(struct inode *inode)
590 {
591 	return !list_empty(&inode->i_data.private_list);
592 }
593 
594 /*
595  * osync is designed to support O_SYNC io.  It waits synchronously for
596  * all already-submitted IO to complete, but does not queue any new
597  * writes to the disk.
598  *
599  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600  * you dirty the buffers, and then use osync_inode_buffers to wait for
601  * completion.  Any other dirty buffers which are not yet queued for
602  * write will not be flushed to disk by the osync.
603  */
604 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
605 {
606 	struct buffer_head *bh;
607 	struct list_head *p;
608 	int err = 0;
609 
610 	spin_lock(lock);
611 repeat:
612 	list_for_each_prev(p, list) {
613 		bh = BH_ENTRY(p);
614 		if (buffer_locked(bh)) {
615 			get_bh(bh);
616 			spin_unlock(lock);
617 			wait_on_buffer(bh);
618 			if (!buffer_uptodate(bh))
619 				err = -EIO;
620 			brelse(bh);
621 			spin_lock(lock);
622 			goto repeat;
623 		}
624 	}
625 	spin_unlock(lock);
626 	return err;
627 }
628 
629 /**
630  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
631  *                        buffers
632  * @mapping: the mapping which wants those buffers written
633  *
634  * Starts I/O against the buffers at mapping->private_list, and waits upon
635  * that I/O.
636  *
637  * Basically, this is a convenience function for fsync().
638  * @mapping is a file or directory which needs those buffers to be written for
639  * a successful fsync().
640  */
641 int sync_mapping_buffers(struct address_space *mapping)
642 {
643 	struct address_space *buffer_mapping = mapping->assoc_mapping;
644 
645 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
646 		return 0;
647 
648 	return fsync_buffers_list(&buffer_mapping->private_lock,
649 					&mapping->private_list);
650 }
651 EXPORT_SYMBOL(sync_mapping_buffers);
652 
653 /*
654  * Called when we've recently written block `bblock', and it is known that
655  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
656  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
657  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
658  */
659 void write_boundary_block(struct block_device *bdev,
660 			sector_t bblock, unsigned blocksize)
661 {
662 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
663 	if (bh) {
664 		if (buffer_dirty(bh))
665 			ll_rw_block(WRITE, 1, &bh);
666 		put_bh(bh);
667 	}
668 }
669 
670 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
671 {
672 	struct address_space *mapping = inode->i_mapping;
673 	struct address_space *buffer_mapping = bh->b_page->mapping;
674 
675 	mark_buffer_dirty(bh);
676 	if (!mapping->assoc_mapping) {
677 		mapping->assoc_mapping = buffer_mapping;
678 	} else {
679 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
680 	}
681 	if (!bh->b_assoc_map) {
682 		spin_lock(&buffer_mapping->private_lock);
683 		list_move_tail(&bh->b_assoc_buffers,
684 				&mapping->private_list);
685 		bh->b_assoc_map = mapping;
686 		spin_unlock(&buffer_mapping->private_lock);
687 	}
688 }
689 EXPORT_SYMBOL(mark_buffer_dirty_inode);
690 
691 /*
692  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693  * dirty.
694  *
695  * If warn is true, then emit a warning if the page is not uptodate and has
696  * not been truncated.
697  */
698 static int __set_page_dirty(struct page *page,
699 		struct address_space *mapping, int warn)
700 {
701 	if (unlikely(!mapping))
702 		return !TestSetPageDirty(page);
703 
704 	if (TestSetPageDirty(page))
705 		return 0;
706 
707 	write_lock_irq(&mapping->tree_lock);
708 	if (page->mapping) {	/* Race with truncate? */
709 		WARN_ON_ONCE(warn && !PageUptodate(page));
710 
711 		if (mapping_cap_account_dirty(mapping)) {
712 			__inc_zone_page_state(page, NR_FILE_DIRTY);
713 			__inc_bdi_stat(mapping->backing_dev_info,
714 					BDI_RECLAIMABLE);
715 			task_io_account_write(PAGE_CACHE_SIZE);
716 		}
717 		radix_tree_tag_set(&mapping->page_tree,
718 				page_index(page), PAGECACHE_TAG_DIRTY);
719 	}
720 	write_unlock_irq(&mapping->tree_lock);
721 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
722 
723 	return 1;
724 }
725 
726 /*
727  * Add a page to the dirty page list.
728  *
729  * It is a sad fact of life that this function is called from several places
730  * deeply under spinlocking.  It may not sleep.
731  *
732  * If the page has buffers, the uptodate buffers are set dirty, to preserve
733  * dirty-state coherency between the page and the buffers.  It the page does
734  * not have buffers then when they are later attached they will all be set
735  * dirty.
736  *
737  * The buffers are dirtied before the page is dirtied.  There's a small race
738  * window in which a writepage caller may see the page cleanness but not the
739  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
740  * before the buffers, a concurrent writepage caller could clear the page dirty
741  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
742  * page on the dirty page list.
743  *
744  * We use private_lock to lock against try_to_free_buffers while using the
745  * page's buffer list.  Also use this to protect against clean buffers being
746  * added to the page after it was set dirty.
747  *
748  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
749  * address_space though.
750  */
751 int __set_page_dirty_buffers(struct page *page)
752 {
753 	struct address_space *mapping = page_mapping(page);
754 
755 	if (unlikely(!mapping))
756 		return !TestSetPageDirty(page);
757 
758 	spin_lock(&mapping->private_lock);
759 	if (page_has_buffers(page)) {
760 		struct buffer_head *head = page_buffers(page);
761 		struct buffer_head *bh = head;
762 
763 		do {
764 			set_buffer_dirty(bh);
765 			bh = bh->b_this_page;
766 		} while (bh != head);
767 	}
768 	spin_unlock(&mapping->private_lock);
769 
770 	return __set_page_dirty(page, mapping, 1);
771 }
772 EXPORT_SYMBOL(__set_page_dirty_buffers);
773 
774 /*
775  * Write out and wait upon a list of buffers.
776  *
777  * We have conflicting pressures: we want to make sure that all
778  * initially dirty buffers get waited on, but that any subsequently
779  * dirtied buffers don't.  After all, we don't want fsync to last
780  * forever if somebody is actively writing to the file.
781  *
782  * Do this in two main stages: first we copy dirty buffers to a
783  * temporary inode list, queueing the writes as we go.  Then we clean
784  * up, waiting for those writes to complete.
785  *
786  * During this second stage, any subsequent updates to the file may end
787  * up refiling the buffer on the original inode's dirty list again, so
788  * there is a chance we will end up with a buffer queued for write but
789  * not yet completed on that list.  So, as a final cleanup we go through
790  * the osync code to catch these locked, dirty buffers without requeuing
791  * any newly dirty buffers for write.
792  */
793 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
794 {
795 	struct buffer_head *bh;
796 	struct list_head tmp;
797 	struct address_space *mapping;
798 	int err = 0, err2;
799 
800 	INIT_LIST_HEAD(&tmp);
801 
802 	spin_lock(lock);
803 	while (!list_empty(list)) {
804 		bh = BH_ENTRY(list->next);
805 		mapping = bh->b_assoc_map;
806 		__remove_assoc_queue(bh);
807 		/* Avoid race with mark_buffer_dirty_inode() which does
808 		 * a lockless check and we rely on seeing the dirty bit */
809 		smp_mb();
810 		if (buffer_dirty(bh) || buffer_locked(bh)) {
811 			list_add(&bh->b_assoc_buffers, &tmp);
812 			bh->b_assoc_map = mapping;
813 			if (buffer_dirty(bh)) {
814 				get_bh(bh);
815 				spin_unlock(lock);
816 				/*
817 				 * Ensure any pending I/O completes so that
818 				 * ll_rw_block() actually writes the current
819 				 * contents - it is a noop if I/O is still in
820 				 * flight on potentially older contents.
821 				 */
822 				ll_rw_block(SWRITE, 1, &bh);
823 				brelse(bh);
824 				spin_lock(lock);
825 			}
826 		}
827 	}
828 
829 	while (!list_empty(&tmp)) {
830 		bh = BH_ENTRY(tmp.prev);
831 		get_bh(bh);
832 		mapping = bh->b_assoc_map;
833 		__remove_assoc_queue(bh);
834 		/* Avoid race with mark_buffer_dirty_inode() which does
835 		 * a lockless check and we rely on seeing the dirty bit */
836 		smp_mb();
837 		if (buffer_dirty(bh)) {
838 			list_add(&bh->b_assoc_buffers,
839 				 &bh->b_assoc_map->private_list);
840 			bh->b_assoc_map = mapping;
841 		}
842 		spin_unlock(lock);
843 		wait_on_buffer(bh);
844 		if (!buffer_uptodate(bh))
845 			err = -EIO;
846 		brelse(bh);
847 		spin_lock(lock);
848 	}
849 
850 	spin_unlock(lock);
851 	err2 = osync_buffers_list(lock, list);
852 	if (err)
853 		return err;
854 	else
855 		return err2;
856 }
857 
858 /*
859  * Invalidate any and all dirty buffers on a given inode.  We are
860  * probably unmounting the fs, but that doesn't mean we have already
861  * done a sync().  Just drop the buffers from the inode list.
862  *
863  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
864  * assumes that all the buffers are against the blockdev.  Not true
865  * for reiserfs.
866  */
867 void invalidate_inode_buffers(struct inode *inode)
868 {
869 	if (inode_has_buffers(inode)) {
870 		struct address_space *mapping = &inode->i_data;
871 		struct list_head *list = &mapping->private_list;
872 		struct address_space *buffer_mapping = mapping->assoc_mapping;
873 
874 		spin_lock(&buffer_mapping->private_lock);
875 		while (!list_empty(list))
876 			__remove_assoc_queue(BH_ENTRY(list->next));
877 		spin_unlock(&buffer_mapping->private_lock);
878 	}
879 }
880 
881 /*
882  * Remove any clean buffers from the inode's buffer list.  This is called
883  * when we're trying to free the inode itself.  Those buffers can pin it.
884  *
885  * Returns true if all buffers were removed.
886  */
887 int remove_inode_buffers(struct inode *inode)
888 {
889 	int ret = 1;
890 
891 	if (inode_has_buffers(inode)) {
892 		struct address_space *mapping = &inode->i_data;
893 		struct list_head *list = &mapping->private_list;
894 		struct address_space *buffer_mapping = mapping->assoc_mapping;
895 
896 		spin_lock(&buffer_mapping->private_lock);
897 		while (!list_empty(list)) {
898 			struct buffer_head *bh = BH_ENTRY(list->next);
899 			if (buffer_dirty(bh)) {
900 				ret = 0;
901 				break;
902 			}
903 			__remove_assoc_queue(bh);
904 		}
905 		spin_unlock(&buffer_mapping->private_lock);
906 	}
907 	return ret;
908 }
909 
910 /*
911  * Create the appropriate buffers when given a page for data area and
912  * the size of each buffer.. Use the bh->b_this_page linked list to
913  * follow the buffers created.  Return NULL if unable to create more
914  * buffers.
915  *
916  * The retry flag is used to differentiate async IO (paging, swapping)
917  * which may not fail from ordinary buffer allocations.
918  */
919 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
920 		int retry)
921 {
922 	struct buffer_head *bh, *head;
923 	long offset;
924 
925 try_again:
926 	head = NULL;
927 	offset = PAGE_SIZE;
928 	while ((offset -= size) >= 0) {
929 		bh = alloc_buffer_head(GFP_NOFS);
930 		if (!bh)
931 			goto no_grow;
932 
933 		bh->b_bdev = NULL;
934 		bh->b_this_page = head;
935 		bh->b_blocknr = -1;
936 		head = bh;
937 
938 		bh->b_state = 0;
939 		atomic_set(&bh->b_count, 0);
940 		bh->b_private = NULL;
941 		bh->b_size = size;
942 
943 		/* Link the buffer to its page */
944 		set_bh_page(bh, page, offset);
945 
946 		init_buffer(bh, NULL, NULL);
947 	}
948 	return head;
949 /*
950  * In case anything failed, we just free everything we got.
951  */
952 no_grow:
953 	if (head) {
954 		do {
955 			bh = head;
956 			head = head->b_this_page;
957 			free_buffer_head(bh);
958 		} while (head);
959 	}
960 
961 	/*
962 	 * Return failure for non-async IO requests.  Async IO requests
963 	 * are not allowed to fail, so we have to wait until buffer heads
964 	 * become available.  But we don't want tasks sleeping with
965 	 * partially complete buffers, so all were released above.
966 	 */
967 	if (!retry)
968 		return NULL;
969 
970 	/* We're _really_ low on memory. Now we just
971 	 * wait for old buffer heads to become free due to
972 	 * finishing IO.  Since this is an async request and
973 	 * the reserve list is empty, we're sure there are
974 	 * async buffer heads in use.
975 	 */
976 	free_more_memory();
977 	goto try_again;
978 }
979 EXPORT_SYMBOL_GPL(alloc_page_buffers);
980 
981 static inline void
982 link_dev_buffers(struct page *page, struct buffer_head *head)
983 {
984 	struct buffer_head *bh, *tail;
985 
986 	bh = head;
987 	do {
988 		tail = bh;
989 		bh = bh->b_this_page;
990 	} while (bh);
991 	tail->b_this_page = head;
992 	attach_page_buffers(page, head);
993 }
994 
995 /*
996  * Initialise the state of a blockdev page's buffers.
997  */
998 static void
999 init_page_buffers(struct page *page, struct block_device *bdev,
1000 			sector_t block, int size)
1001 {
1002 	struct buffer_head *head = page_buffers(page);
1003 	struct buffer_head *bh = head;
1004 	int uptodate = PageUptodate(page);
1005 
1006 	do {
1007 		if (!buffer_mapped(bh)) {
1008 			init_buffer(bh, NULL, NULL);
1009 			bh->b_bdev = bdev;
1010 			bh->b_blocknr = block;
1011 			if (uptodate)
1012 				set_buffer_uptodate(bh);
1013 			set_buffer_mapped(bh);
1014 		}
1015 		block++;
1016 		bh = bh->b_this_page;
1017 	} while (bh != head);
1018 }
1019 
1020 /*
1021  * Create the page-cache page that contains the requested block.
1022  *
1023  * This is user purely for blockdev mappings.
1024  */
1025 static struct page *
1026 grow_dev_page(struct block_device *bdev, sector_t block,
1027 		pgoff_t index, int size)
1028 {
1029 	struct inode *inode = bdev->bd_inode;
1030 	struct page *page;
1031 	struct buffer_head *bh;
1032 
1033 	page = find_or_create_page(inode->i_mapping, index,
1034 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1035 	if (!page)
1036 		return NULL;
1037 
1038 	BUG_ON(!PageLocked(page));
1039 
1040 	if (page_has_buffers(page)) {
1041 		bh = page_buffers(page);
1042 		if (bh->b_size == size) {
1043 			init_page_buffers(page, bdev, block, size);
1044 			return page;
1045 		}
1046 		if (!try_to_free_buffers(page))
1047 			goto failed;
1048 	}
1049 
1050 	/*
1051 	 * Allocate some buffers for this page
1052 	 */
1053 	bh = alloc_page_buffers(page, size, 0);
1054 	if (!bh)
1055 		goto failed;
1056 
1057 	/*
1058 	 * Link the page to the buffers and initialise them.  Take the
1059 	 * lock to be atomic wrt __find_get_block(), which does not
1060 	 * run under the page lock.
1061 	 */
1062 	spin_lock(&inode->i_mapping->private_lock);
1063 	link_dev_buffers(page, bh);
1064 	init_page_buffers(page, bdev, block, size);
1065 	spin_unlock(&inode->i_mapping->private_lock);
1066 	return page;
1067 
1068 failed:
1069 	BUG();
1070 	unlock_page(page);
1071 	page_cache_release(page);
1072 	return NULL;
1073 }
1074 
1075 /*
1076  * Create buffers for the specified block device block's page.  If
1077  * that page was dirty, the buffers are set dirty also.
1078  */
1079 static int
1080 grow_buffers(struct block_device *bdev, sector_t block, int size)
1081 {
1082 	struct page *page;
1083 	pgoff_t index;
1084 	int sizebits;
1085 
1086 	sizebits = -1;
1087 	do {
1088 		sizebits++;
1089 	} while ((size << sizebits) < PAGE_SIZE);
1090 
1091 	index = block >> sizebits;
1092 
1093 	/*
1094 	 * Check for a block which wants to lie outside our maximum possible
1095 	 * pagecache index.  (this comparison is done using sector_t types).
1096 	 */
1097 	if (unlikely(index != block >> sizebits)) {
1098 		char b[BDEVNAME_SIZE];
1099 
1100 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1101 			"device %s\n",
1102 			__FUNCTION__, (unsigned long long)block,
1103 			bdevname(bdev, b));
1104 		return -EIO;
1105 	}
1106 	block = index << sizebits;
1107 	/* Create a page with the proper size buffers.. */
1108 	page = grow_dev_page(bdev, block, index, size);
1109 	if (!page)
1110 		return 0;
1111 	unlock_page(page);
1112 	page_cache_release(page);
1113 	return 1;
1114 }
1115 
1116 static struct buffer_head *
1117 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1118 {
1119 	/* Size must be multiple of hard sectorsize */
1120 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1121 			(size < 512 || size > PAGE_SIZE))) {
1122 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1123 					size);
1124 		printk(KERN_ERR "hardsect size: %d\n",
1125 					bdev_hardsect_size(bdev));
1126 
1127 		dump_stack();
1128 		return NULL;
1129 	}
1130 
1131 	for (;;) {
1132 		struct buffer_head * bh;
1133 		int ret;
1134 
1135 		bh = __find_get_block(bdev, block, size);
1136 		if (bh)
1137 			return bh;
1138 
1139 		ret = grow_buffers(bdev, block, size);
1140 		if (ret < 0)
1141 			return NULL;
1142 		if (ret == 0)
1143 			free_more_memory();
1144 	}
1145 }
1146 
1147 /*
1148  * The relationship between dirty buffers and dirty pages:
1149  *
1150  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1151  * the page is tagged dirty in its radix tree.
1152  *
1153  * At all times, the dirtiness of the buffers represents the dirtiness of
1154  * subsections of the page.  If the page has buffers, the page dirty bit is
1155  * merely a hint about the true dirty state.
1156  *
1157  * When a page is set dirty in its entirety, all its buffers are marked dirty
1158  * (if the page has buffers).
1159  *
1160  * When a buffer is marked dirty, its page is dirtied, but the page's other
1161  * buffers are not.
1162  *
1163  * Also.  When blockdev buffers are explicitly read with bread(), they
1164  * individually become uptodate.  But their backing page remains not
1165  * uptodate - even if all of its buffers are uptodate.  A subsequent
1166  * block_read_full_page() against that page will discover all the uptodate
1167  * buffers, will set the page uptodate and will perform no I/O.
1168  */
1169 
1170 /**
1171  * mark_buffer_dirty - mark a buffer_head as needing writeout
1172  * @bh: the buffer_head to mark dirty
1173  *
1174  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1175  * backing page dirty, then tag the page as dirty in its address_space's radix
1176  * tree and then attach the address_space's inode to its superblock's dirty
1177  * inode list.
1178  *
1179  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1180  * mapping->tree_lock and the global inode_lock.
1181  */
1182 void mark_buffer_dirty(struct buffer_head *bh)
1183 {
1184 	WARN_ON_ONCE(!buffer_uptodate(bh));
1185 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1186 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1187 }
1188 
1189 /*
1190  * Decrement a buffer_head's reference count.  If all buffers against a page
1191  * have zero reference count, are clean and unlocked, and if the page is clean
1192  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1193  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1194  * a page but it ends up not being freed, and buffers may later be reattached).
1195  */
1196 void __brelse(struct buffer_head * buf)
1197 {
1198 	if (atomic_read(&buf->b_count)) {
1199 		put_bh(buf);
1200 		return;
1201 	}
1202 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1203 	WARN_ON(1);
1204 }
1205 
1206 /*
1207  * bforget() is like brelse(), except it discards any
1208  * potentially dirty data.
1209  */
1210 void __bforget(struct buffer_head *bh)
1211 {
1212 	clear_buffer_dirty(bh);
1213 	if (bh->b_assoc_map) {
1214 		struct address_space *buffer_mapping = bh->b_page->mapping;
1215 
1216 		spin_lock(&buffer_mapping->private_lock);
1217 		list_del_init(&bh->b_assoc_buffers);
1218 		bh->b_assoc_map = NULL;
1219 		spin_unlock(&buffer_mapping->private_lock);
1220 	}
1221 	__brelse(bh);
1222 }
1223 
1224 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1225 {
1226 	lock_buffer(bh);
1227 	if (buffer_uptodate(bh)) {
1228 		unlock_buffer(bh);
1229 		return bh;
1230 	} else {
1231 		get_bh(bh);
1232 		bh->b_end_io = end_buffer_read_sync;
1233 		submit_bh(READ, bh);
1234 		wait_on_buffer(bh);
1235 		if (buffer_uptodate(bh))
1236 			return bh;
1237 	}
1238 	brelse(bh);
1239 	return NULL;
1240 }
1241 
1242 /*
1243  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1244  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1245  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1246  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1247  * CPU's LRUs at the same time.
1248  *
1249  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1250  * sb_find_get_block().
1251  *
1252  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1253  * a local interrupt disable for that.
1254  */
1255 
1256 #define BH_LRU_SIZE	8
1257 
1258 struct bh_lru {
1259 	struct buffer_head *bhs[BH_LRU_SIZE];
1260 };
1261 
1262 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1263 
1264 #ifdef CONFIG_SMP
1265 #define bh_lru_lock()	local_irq_disable()
1266 #define bh_lru_unlock()	local_irq_enable()
1267 #else
1268 #define bh_lru_lock()	preempt_disable()
1269 #define bh_lru_unlock()	preempt_enable()
1270 #endif
1271 
1272 static inline void check_irqs_on(void)
1273 {
1274 #ifdef irqs_disabled
1275 	BUG_ON(irqs_disabled());
1276 #endif
1277 }
1278 
1279 /*
1280  * The LRU management algorithm is dopey-but-simple.  Sorry.
1281  */
1282 static void bh_lru_install(struct buffer_head *bh)
1283 {
1284 	struct buffer_head *evictee = NULL;
1285 	struct bh_lru *lru;
1286 
1287 	check_irqs_on();
1288 	bh_lru_lock();
1289 	lru = &__get_cpu_var(bh_lrus);
1290 	if (lru->bhs[0] != bh) {
1291 		struct buffer_head *bhs[BH_LRU_SIZE];
1292 		int in;
1293 		int out = 0;
1294 
1295 		get_bh(bh);
1296 		bhs[out++] = bh;
1297 		for (in = 0; in < BH_LRU_SIZE; in++) {
1298 			struct buffer_head *bh2 = lru->bhs[in];
1299 
1300 			if (bh2 == bh) {
1301 				__brelse(bh2);
1302 			} else {
1303 				if (out >= BH_LRU_SIZE) {
1304 					BUG_ON(evictee != NULL);
1305 					evictee = bh2;
1306 				} else {
1307 					bhs[out++] = bh2;
1308 				}
1309 			}
1310 		}
1311 		while (out < BH_LRU_SIZE)
1312 			bhs[out++] = NULL;
1313 		memcpy(lru->bhs, bhs, sizeof(bhs));
1314 	}
1315 	bh_lru_unlock();
1316 
1317 	if (evictee)
1318 		__brelse(evictee);
1319 }
1320 
1321 /*
1322  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1323  */
1324 static struct buffer_head *
1325 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1326 {
1327 	struct buffer_head *ret = NULL;
1328 	struct bh_lru *lru;
1329 	unsigned int i;
1330 
1331 	check_irqs_on();
1332 	bh_lru_lock();
1333 	lru = &__get_cpu_var(bh_lrus);
1334 	for (i = 0; i < BH_LRU_SIZE; i++) {
1335 		struct buffer_head *bh = lru->bhs[i];
1336 
1337 		if (bh && bh->b_bdev == bdev &&
1338 				bh->b_blocknr == block && bh->b_size == size) {
1339 			if (i) {
1340 				while (i) {
1341 					lru->bhs[i] = lru->bhs[i - 1];
1342 					i--;
1343 				}
1344 				lru->bhs[0] = bh;
1345 			}
1346 			get_bh(bh);
1347 			ret = bh;
1348 			break;
1349 		}
1350 	}
1351 	bh_lru_unlock();
1352 	return ret;
1353 }
1354 
1355 /*
1356  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1357  * it in the LRU and mark it as accessed.  If it is not present then return
1358  * NULL
1359  */
1360 struct buffer_head *
1361 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1362 {
1363 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1364 
1365 	if (bh == NULL) {
1366 		bh = __find_get_block_slow(bdev, block);
1367 		if (bh)
1368 			bh_lru_install(bh);
1369 	}
1370 	if (bh)
1371 		touch_buffer(bh);
1372 	return bh;
1373 }
1374 EXPORT_SYMBOL(__find_get_block);
1375 
1376 /*
1377  * __getblk will locate (and, if necessary, create) the buffer_head
1378  * which corresponds to the passed block_device, block and size. The
1379  * returned buffer has its reference count incremented.
1380  *
1381  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1382  * illegal block number, __getblk() will happily return a buffer_head
1383  * which represents the non-existent block.  Very weird.
1384  *
1385  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1386  * attempt is failing.  FIXME, perhaps?
1387  */
1388 struct buffer_head *
1389 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1390 {
1391 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1392 
1393 	might_sleep();
1394 	if (bh == NULL)
1395 		bh = __getblk_slow(bdev, block, size);
1396 	return bh;
1397 }
1398 EXPORT_SYMBOL(__getblk);
1399 
1400 /*
1401  * Do async read-ahead on a buffer..
1402  */
1403 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1404 {
1405 	struct buffer_head *bh = __getblk(bdev, block, size);
1406 	if (likely(bh)) {
1407 		ll_rw_block(READA, 1, &bh);
1408 		brelse(bh);
1409 	}
1410 }
1411 EXPORT_SYMBOL(__breadahead);
1412 
1413 /**
1414  *  __bread() - reads a specified block and returns the bh
1415  *  @bdev: the block_device to read from
1416  *  @block: number of block
1417  *  @size: size (in bytes) to read
1418  *
1419  *  Reads a specified block, and returns buffer head that contains it.
1420  *  It returns NULL if the block was unreadable.
1421  */
1422 struct buffer_head *
1423 __bread(struct block_device *bdev, sector_t block, unsigned size)
1424 {
1425 	struct buffer_head *bh = __getblk(bdev, block, size);
1426 
1427 	if (likely(bh) && !buffer_uptodate(bh))
1428 		bh = __bread_slow(bh);
1429 	return bh;
1430 }
1431 EXPORT_SYMBOL(__bread);
1432 
1433 /*
1434  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1435  * This doesn't race because it runs in each cpu either in irq
1436  * or with preempt disabled.
1437  */
1438 static void invalidate_bh_lru(void *arg)
1439 {
1440 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1441 	int i;
1442 
1443 	for (i = 0; i < BH_LRU_SIZE; i++) {
1444 		brelse(b->bhs[i]);
1445 		b->bhs[i] = NULL;
1446 	}
1447 	put_cpu_var(bh_lrus);
1448 }
1449 
1450 void invalidate_bh_lrus(void)
1451 {
1452 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1453 }
1454 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1455 
1456 void set_bh_page(struct buffer_head *bh,
1457 		struct page *page, unsigned long offset)
1458 {
1459 	bh->b_page = page;
1460 	BUG_ON(offset >= PAGE_SIZE);
1461 	if (PageHighMem(page))
1462 		/*
1463 		 * This catches illegal uses and preserves the offset:
1464 		 */
1465 		bh->b_data = (char *)(0 + offset);
1466 	else
1467 		bh->b_data = page_address(page) + offset;
1468 }
1469 EXPORT_SYMBOL(set_bh_page);
1470 
1471 /*
1472  * Called when truncating a buffer on a page completely.
1473  */
1474 static void discard_buffer(struct buffer_head * bh)
1475 {
1476 	lock_buffer(bh);
1477 	clear_buffer_dirty(bh);
1478 	bh->b_bdev = NULL;
1479 	clear_buffer_mapped(bh);
1480 	clear_buffer_req(bh);
1481 	clear_buffer_new(bh);
1482 	clear_buffer_delay(bh);
1483 	clear_buffer_unwritten(bh);
1484 	unlock_buffer(bh);
1485 }
1486 
1487 /**
1488  * block_invalidatepage - invalidate part of all of a buffer-backed page
1489  *
1490  * @page: the page which is affected
1491  * @offset: the index of the truncation point
1492  *
1493  * block_invalidatepage() is called when all or part of the page has become
1494  * invalidatedby a truncate operation.
1495  *
1496  * block_invalidatepage() does not have to release all buffers, but it must
1497  * ensure that no dirty buffer is left outside @offset and that no I/O
1498  * is underway against any of the blocks which are outside the truncation
1499  * point.  Because the caller is about to free (and possibly reuse) those
1500  * blocks on-disk.
1501  */
1502 void block_invalidatepage(struct page *page, unsigned long offset)
1503 {
1504 	struct buffer_head *head, *bh, *next;
1505 	unsigned int curr_off = 0;
1506 
1507 	BUG_ON(!PageLocked(page));
1508 	if (!page_has_buffers(page))
1509 		goto out;
1510 
1511 	head = page_buffers(page);
1512 	bh = head;
1513 	do {
1514 		unsigned int next_off = curr_off + bh->b_size;
1515 		next = bh->b_this_page;
1516 
1517 		/*
1518 		 * is this block fully invalidated?
1519 		 */
1520 		if (offset <= curr_off)
1521 			discard_buffer(bh);
1522 		curr_off = next_off;
1523 		bh = next;
1524 	} while (bh != head);
1525 
1526 	/*
1527 	 * We release buffers only if the entire page is being invalidated.
1528 	 * The get_block cached value has been unconditionally invalidated,
1529 	 * so real IO is not possible anymore.
1530 	 */
1531 	if (offset == 0)
1532 		try_to_release_page(page, 0);
1533 out:
1534 	return;
1535 }
1536 EXPORT_SYMBOL(block_invalidatepage);
1537 
1538 /*
1539  * We attach and possibly dirty the buffers atomically wrt
1540  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1541  * is already excluded via the page lock.
1542  */
1543 void create_empty_buffers(struct page *page,
1544 			unsigned long blocksize, unsigned long b_state)
1545 {
1546 	struct buffer_head *bh, *head, *tail;
1547 
1548 	head = alloc_page_buffers(page, blocksize, 1);
1549 	bh = head;
1550 	do {
1551 		bh->b_state |= b_state;
1552 		tail = bh;
1553 		bh = bh->b_this_page;
1554 	} while (bh);
1555 	tail->b_this_page = head;
1556 
1557 	spin_lock(&page->mapping->private_lock);
1558 	if (PageUptodate(page) || PageDirty(page)) {
1559 		bh = head;
1560 		do {
1561 			if (PageDirty(page))
1562 				set_buffer_dirty(bh);
1563 			if (PageUptodate(page))
1564 				set_buffer_uptodate(bh);
1565 			bh = bh->b_this_page;
1566 		} while (bh != head);
1567 	}
1568 	attach_page_buffers(page, head);
1569 	spin_unlock(&page->mapping->private_lock);
1570 }
1571 EXPORT_SYMBOL(create_empty_buffers);
1572 
1573 /*
1574  * We are taking a block for data and we don't want any output from any
1575  * buffer-cache aliases starting from return from that function and
1576  * until the moment when something will explicitly mark the buffer
1577  * dirty (hopefully that will not happen until we will free that block ;-)
1578  * We don't even need to mark it not-uptodate - nobody can expect
1579  * anything from a newly allocated buffer anyway. We used to used
1580  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1581  * don't want to mark the alias unmapped, for example - it would confuse
1582  * anyone who might pick it with bread() afterwards...
1583  *
1584  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1585  * be writeout I/O going on against recently-freed buffers.  We don't
1586  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1587  * only if we really need to.  That happens here.
1588  */
1589 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1590 {
1591 	struct buffer_head *old_bh;
1592 
1593 	might_sleep();
1594 
1595 	old_bh = __find_get_block_slow(bdev, block);
1596 	if (old_bh) {
1597 		clear_buffer_dirty(old_bh);
1598 		wait_on_buffer(old_bh);
1599 		clear_buffer_req(old_bh);
1600 		__brelse(old_bh);
1601 	}
1602 }
1603 EXPORT_SYMBOL(unmap_underlying_metadata);
1604 
1605 /*
1606  * NOTE! All mapped/uptodate combinations are valid:
1607  *
1608  *	Mapped	Uptodate	Meaning
1609  *
1610  *	No	No		"unknown" - must do get_block()
1611  *	No	Yes		"hole" - zero-filled
1612  *	Yes	No		"allocated" - allocated on disk, not read in
1613  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1614  *
1615  * "Dirty" is valid only with the last case (mapped+uptodate).
1616  */
1617 
1618 /*
1619  * While block_write_full_page is writing back the dirty buffers under
1620  * the page lock, whoever dirtied the buffers may decide to clean them
1621  * again at any time.  We handle that by only looking at the buffer
1622  * state inside lock_buffer().
1623  *
1624  * If block_write_full_page() is called for regular writeback
1625  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1626  * locked buffer.   This only can happen if someone has written the buffer
1627  * directly, with submit_bh().  At the address_space level PageWriteback
1628  * prevents this contention from occurring.
1629  */
1630 static int __block_write_full_page(struct inode *inode, struct page *page,
1631 			get_block_t *get_block, struct writeback_control *wbc)
1632 {
1633 	int err;
1634 	sector_t block;
1635 	sector_t last_block;
1636 	struct buffer_head *bh, *head;
1637 	const unsigned blocksize = 1 << inode->i_blkbits;
1638 	int nr_underway = 0;
1639 
1640 	BUG_ON(!PageLocked(page));
1641 
1642 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1643 
1644 	if (!page_has_buffers(page)) {
1645 		create_empty_buffers(page, blocksize,
1646 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1647 	}
1648 
1649 	/*
1650 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1651 	 * here, and the (potentially unmapped) buffers may become dirty at
1652 	 * any time.  If a buffer becomes dirty here after we've inspected it
1653 	 * then we just miss that fact, and the page stays dirty.
1654 	 *
1655 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1656 	 * handle that here by just cleaning them.
1657 	 */
1658 
1659 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1660 	head = page_buffers(page);
1661 	bh = head;
1662 
1663 	/*
1664 	 * Get all the dirty buffers mapped to disk addresses and
1665 	 * handle any aliases from the underlying blockdev's mapping.
1666 	 */
1667 	do {
1668 		if (block > last_block) {
1669 			/*
1670 			 * mapped buffers outside i_size will occur, because
1671 			 * this page can be outside i_size when there is a
1672 			 * truncate in progress.
1673 			 */
1674 			/*
1675 			 * The buffer was zeroed by block_write_full_page()
1676 			 */
1677 			clear_buffer_dirty(bh);
1678 			set_buffer_uptodate(bh);
1679 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1680 			WARN_ON(bh->b_size != blocksize);
1681 			err = get_block(inode, block, bh, 1);
1682 			if (err)
1683 				goto recover;
1684 			if (buffer_new(bh)) {
1685 				/* blockdev mappings never come here */
1686 				clear_buffer_new(bh);
1687 				unmap_underlying_metadata(bh->b_bdev,
1688 							bh->b_blocknr);
1689 			}
1690 		}
1691 		bh = bh->b_this_page;
1692 		block++;
1693 	} while (bh != head);
1694 
1695 	do {
1696 		if (!buffer_mapped(bh))
1697 			continue;
1698 		/*
1699 		 * If it's a fully non-blocking write attempt and we cannot
1700 		 * lock the buffer then redirty the page.  Note that this can
1701 		 * potentially cause a busy-wait loop from pdflush and kswapd
1702 		 * activity, but those code paths have their own higher-level
1703 		 * throttling.
1704 		 */
1705 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1706 			lock_buffer(bh);
1707 		} else if (test_set_buffer_locked(bh)) {
1708 			redirty_page_for_writepage(wbc, page);
1709 			continue;
1710 		}
1711 		if (test_clear_buffer_dirty(bh)) {
1712 			mark_buffer_async_write(bh);
1713 		} else {
1714 			unlock_buffer(bh);
1715 		}
1716 	} while ((bh = bh->b_this_page) != head);
1717 
1718 	/*
1719 	 * The page and its buffers are protected by PageWriteback(), so we can
1720 	 * drop the bh refcounts early.
1721 	 */
1722 	BUG_ON(PageWriteback(page));
1723 	set_page_writeback(page);
1724 
1725 	do {
1726 		struct buffer_head *next = bh->b_this_page;
1727 		if (buffer_async_write(bh)) {
1728 			submit_bh(WRITE, bh);
1729 			nr_underway++;
1730 		}
1731 		bh = next;
1732 	} while (bh != head);
1733 	unlock_page(page);
1734 
1735 	err = 0;
1736 done:
1737 	if (nr_underway == 0) {
1738 		/*
1739 		 * The page was marked dirty, but the buffers were
1740 		 * clean.  Someone wrote them back by hand with
1741 		 * ll_rw_block/submit_bh.  A rare case.
1742 		 */
1743 		end_page_writeback(page);
1744 
1745 		/*
1746 		 * The page and buffer_heads can be released at any time from
1747 		 * here on.
1748 		 */
1749 	}
1750 	return err;
1751 
1752 recover:
1753 	/*
1754 	 * ENOSPC, or some other error.  We may already have added some
1755 	 * blocks to the file, so we need to write these out to avoid
1756 	 * exposing stale data.
1757 	 * The page is currently locked and not marked for writeback
1758 	 */
1759 	bh = head;
1760 	/* Recovery: lock and submit the mapped buffers */
1761 	do {
1762 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1763 			lock_buffer(bh);
1764 			mark_buffer_async_write(bh);
1765 		} else {
1766 			/*
1767 			 * The buffer may have been set dirty during
1768 			 * attachment to a dirty page.
1769 			 */
1770 			clear_buffer_dirty(bh);
1771 		}
1772 	} while ((bh = bh->b_this_page) != head);
1773 	SetPageError(page);
1774 	BUG_ON(PageWriteback(page));
1775 	mapping_set_error(page->mapping, err);
1776 	set_page_writeback(page);
1777 	do {
1778 		struct buffer_head *next = bh->b_this_page;
1779 		if (buffer_async_write(bh)) {
1780 			clear_buffer_dirty(bh);
1781 			submit_bh(WRITE, bh);
1782 			nr_underway++;
1783 		}
1784 		bh = next;
1785 	} while (bh != head);
1786 	unlock_page(page);
1787 	goto done;
1788 }
1789 
1790 /*
1791  * If a page has any new buffers, zero them out here, and mark them uptodate
1792  * and dirty so they'll be written out (in order to prevent uninitialised
1793  * block data from leaking). And clear the new bit.
1794  */
1795 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1796 {
1797 	unsigned int block_start, block_end;
1798 	struct buffer_head *head, *bh;
1799 
1800 	BUG_ON(!PageLocked(page));
1801 	if (!page_has_buffers(page))
1802 		return;
1803 
1804 	bh = head = page_buffers(page);
1805 	block_start = 0;
1806 	do {
1807 		block_end = block_start + bh->b_size;
1808 
1809 		if (buffer_new(bh)) {
1810 			if (block_end > from && block_start < to) {
1811 				if (!PageUptodate(page)) {
1812 					unsigned start, size;
1813 
1814 					start = max(from, block_start);
1815 					size = min(to, block_end) - start;
1816 
1817 					zero_user(page, start, size);
1818 					set_buffer_uptodate(bh);
1819 				}
1820 
1821 				clear_buffer_new(bh);
1822 				mark_buffer_dirty(bh);
1823 			}
1824 		}
1825 
1826 		block_start = block_end;
1827 		bh = bh->b_this_page;
1828 	} while (bh != head);
1829 }
1830 EXPORT_SYMBOL(page_zero_new_buffers);
1831 
1832 static int __block_prepare_write(struct inode *inode, struct page *page,
1833 		unsigned from, unsigned to, get_block_t *get_block)
1834 {
1835 	unsigned block_start, block_end;
1836 	sector_t block;
1837 	int err = 0;
1838 	unsigned blocksize, bbits;
1839 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1840 
1841 	BUG_ON(!PageLocked(page));
1842 	BUG_ON(from > PAGE_CACHE_SIZE);
1843 	BUG_ON(to > PAGE_CACHE_SIZE);
1844 	BUG_ON(from > to);
1845 
1846 	blocksize = 1 << inode->i_blkbits;
1847 	if (!page_has_buffers(page))
1848 		create_empty_buffers(page, blocksize, 0);
1849 	head = page_buffers(page);
1850 
1851 	bbits = inode->i_blkbits;
1852 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1853 
1854 	for(bh = head, block_start = 0; bh != head || !block_start;
1855 	    block++, block_start=block_end, bh = bh->b_this_page) {
1856 		block_end = block_start + blocksize;
1857 		if (block_end <= from || block_start >= to) {
1858 			if (PageUptodate(page)) {
1859 				if (!buffer_uptodate(bh))
1860 					set_buffer_uptodate(bh);
1861 			}
1862 			continue;
1863 		}
1864 		if (buffer_new(bh))
1865 			clear_buffer_new(bh);
1866 		if (!buffer_mapped(bh)) {
1867 			WARN_ON(bh->b_size != blocksize);
1868 			err = get_block(inode, block, bh, 1);
1869 			if (err)
1870 				break;
1871 			if (buffer_new(bh)) {
1872 				unmap_underlying_metadata(bh->b_bdev,
1873 							bh->b_blocknr);
1874 				if (PageUptodate(page)) {
1875 					clear_buffer_new(bh);
1876 					set_buffer_uptodate(bh);
1877 					mark_buffer_dirty(bh);
1878 					continue;
1879 				}
1880 				if (block_end > to || block_start < from)
1881 					zero_user_segments(page,
1882 						to, block_end,
1883 						block_start, from);
1884 				continue;
1885 			}
1886 		}
1887 		if (PageUptodate(page)) {
1888 			if (!buffer_uptodate(bh))
1889 				set_buffer_uptodate(bh);
1890 			continue;
1891 		}
1892 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1893 		    !buffer_unwritten(bh) &&
1894 		     (block_start < from || block_end > to)) {
1895 			ll_rw_block(READ, 1, &bh);
1896 			*wait_bh++=bh;
1897 		}
1898 	}
1899 	/*
1900 	 * If we issued read requests - let them complete.
1901 	 */
1902 	while(wait_bh > wait) {
1903 		wait_on_buffer(*--wait_bh);
1904 		if (!buffer_uptodate(*wait_bh))
1905 			err = -EIO;
1906 	}
1907 	if (unlikely(err))
1908 		page_zero_new_buffers(page, from, to);
1909 	return err;
1910 }
1911 
1912 static int __block_commit_write(struct inode *inode, struct page *page,
1913 		unsigned from, unsigned to)
1914 {
1915 	unsigned block_start, block_end;
1916 	int partial = 0;
1917 	unsigned blocksize;
1918 	struct buffer_head *bh, *head;
1919 
1920 	blocksize = 1 << inode->i_blkbits;
1921 
1922 	for(bh = head = page_buffers(page), block_start = 0;
1923 	    bh != head || !block_start;
1924 	    block_start=block_end, bh = bh->b_this_page) {
1925 		block_end = block_start + blocksize;
1926 		if (block_end <= from || block_start >= to) {
1927 			if (!buffer_uptodate(bh))
1928 				partial = 1;
1929 		} else {
1930 			set_buffer_uptodate(bh);
1931 			mark_buffer_dirty(bh);
1932 		}
1933 		clear_buffer_new(bh);
1934 	}
1935 
1936 	/*
1937 	 * If this is a partial write which happened to make all buffers
1938 	 * uptodate then we can optimize away a bogus readpage() for
1939 	 * the next read(). Here we 'discover' whether the page went
1940 	 * uptodate as a result of this (potentially partial) write.
1941 	 */
1942 	if (!partial)
1943 		SetPageUptodate(page);
1944 	return 0;
1945 }
1946 
1947 /*
1948  * block_write_begin takes care of the basic task of block allocation and
1949  * bringing partial write blocks uptodate first.
1950  *
1951  * If *pagep is not NULL, then block_write_begin uses the locked page
1952  * at *pagep rather than allocating its own. In this case, the page will
1953  * not be unlocked or deallocated on failure.
1954  */
1955 int block_write_begin(struct file *file, struct address_space *mapping,
1956 			loff_t pos, unsigned len, unsigned flags,
1957 			struct page **pagep, void **fsdata,
1958 			get_block_t *get_block)
1959 {
1960 	struct inode *inode = mapping->host;
1961 	int status = 0;
1962 	struct page *page;
1963 	pgoff_t index;
1964 	unsigned start, end;
1965 	int ownpage = 0;
1966 
1967 	index = pos >> PAGE_CACHE_SHIFT;
1968 	start = pos & (PAGE_CACHE_SIZE - 1);
1969 	end = start + len;
1970 
1971 	page = *pagep;
1972 	if (page == NULL) {
1973 		ownpage = 1;
1974 		page = __grab_cache_page(mapping, index);
1975 		if (!page) {
1976 			status = -ENOMEM;
1977 			goto out;
1978 		}
1979 		*pagep = page;
1980 	} else
1981 		BUG_ON(!PageLocked(page));
1982 
1983 	status = __block_prepare_write(inode, page, start, end, get_block);
1984 	if (unlikely(status)) {
1985 		ClearPageUptodate(page);
1986 
1987 		if (ownpage) {
1988 			unlock_page(page);
1989 			page_cache_release(page);
1990 			*pagep = NULL;
1991 
1992 			/*
1993 			 * prepare_write() may have instantiated a few blocks
1994 			 * outside i_size.  Trim these off again. Don't need
1995 			 * i_size_read because we hold i_mutex.
1996 			 */
1997 			if (pos + len > inode->i_size)
1998 				vmtruncate(inode, inode->i_size);
1999 		}
2000 		goto out;
2001 	}
2002 
2003 out:
2004 	return status;
2005 }
2006 EXPORT_SYMBOL(block_write_begin);
2007 
2008 int block_write_end(struct file *file, struct address_space *mapping,
2009 			loff_t pos, unsigned len, unsigned copied,
2010 			struct page *page, void *fsdata)
2011 {
2012 	struct inode *inode = mapping->host;
2013 	unsigned start;
2014 
2015 	start = pos & (PAGE_CACHE_SIZE - 1);
2016 
2017 	if (unlikely(copied < len)) {
2018 		/*
2019 		 * The buffers that were written will now be uptodate, so we
2020 		 * don't have to worry about a readpage reading them and
2021 		 * overwriting a partial write. However if we have encountered
2022 		 * a short write and only partially written into a buffer, it
2023 		 * will not be marked uptodate, so a readpage might come in and
2024 		 * destroy our partial write.
2025 		 *
2026 		 * Do the simplest thing, and just treat any short write to a
2027 		 * non uptodate page as a zero-length write, and force the
2028 		 * caller to redo the whole thing.
2029 		 */
2030 		if (!PageUptodate(page))
2031 			copied = 0;
2032 
2033 		page_zero_new_buffers(page, start+copied, start+len);
2034 	}
2035 	flush_dcache_page(page);
2036 
2037 	/* This could be a short (even 0-length) commit */
2038 	__block_commit_write(inode, page, start, start+copied);
2039 
2040 	return copied;
2041 }
2042 EXPORT_SYMBOL(block_write_end);
2043 
2044 int generic_write_end(struct file *file, struct address_space *mapping,
2045 			loff_t pos, unsigned len, unsigned copied,
2046 			struct page *page, void *fsdata)
2047 {
2048 	struct inode *inode = mapping->host;
2049 
2050 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2051 
2052 	/*
2053 	 * No need to use i_size_read() here, the i_size
2054 	 * cannot change under us because we hold i_mutex.
2055 	 *
2056 	 * But it's important to update i_size while still holding page lock:
2057 	 * page writeout could otherwise come in and zero beyond i_size.
2058 	 */
2059 	if (pos+copied > inode->i_size) {
2060 		i_size_write(inode, pos+copied);
2061 		mark_inode_dirty(inode);
2062 	}
2063 
2064 	unlock_page(page);
2065 	page_cache_release(page);
2066 
2067 	return copied;
2068 }
2069 EXPORT_SYMBOL(generic_write_end);
2070 
2071 /*
2072  * Generic "read page" function for block devices that have the normal
2073  * get_block functionality. This is most of the block device filesystems.
2074  * Reads the page asynchronously --- the unlock_buffer() and
2075  * set/clear_buffer_uptodate() functions propagate buffer state into the
2076  * page struct once IO has completed.
2077  */
2078 int block_read_full_page(struct page *page, get_block_t *get_block)
2079 {
2080 	struct inode *inode = page->mapping->host;
2081 	sector_t iblock, lblock;
2082 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2083 	unsigned int blocksize;
2084 	int nr, i;
2085 	int fully_mapped = 1;
2086 
2087 	BUG_ON(!PageLocked(page));
2088 	blocksize = 1 << inode->i_blkbits;
2089 	if (!page_has_buffers(page))
2090 		create_empty_buffers(page, blocksize, 0);
2091 	head = page_buffers(page);
2092 
2093 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2094 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2095 	bh = head;
2096 	nr = 0;
2097 	i = 0;
2098 
2099 	do {
2100 		if (buffer_uptodate(bh))
2101 			continue;
2102 
2103 		if (!buffer_mapped(bh)) {
2104 			int err = 0;
2105 
2106 			fully_mapped = 0;
2107 			if (iblock < lblock) {
2108 				WARN_ON(bh->b_size != blocksize);
2109 				err = get_block(inode, iblock, bh, 0);
2110 				if (err)
2111 					SetPageError(page);
2112 			}
2113 			if (!buffer_mapped(bh)) {
2114 				zero_user(page, i * blocksize, blocksize);
2115 				if (!err)
2116 					set_buffer_uptodate(bh);
2117 				continue;
2118 			}
2119 			/*
2120 			 * get_block() might have updated the buffer
2121 			 * synchronously
2122 			 */
2123 			if (buffer_uptodate(bh))
2124 				continue;
2125 		}
2126 		arr[nr++] = bh;
2127 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2128 
2129 	if (fully_mapped)
2130 		SetPageMappedToDisk(page);
2131 
2132 	if (!nr) {
2133 		/*
2134 		 * All buffers are uptodate - we can set the page uptodate
2135 		 * as well. But not if get_block() returned an error.
2136 		 */
2137 		if (!PageError(page))
2138 			SetPageUptodate(page);
2139 		unlock_page(page);
2140 		return 0;
2141 	}
2142 
2143 	/* Stage two: lock the buffers */
2144 	for (i = 0; i < nr; i++) {
2145 		bh = arr[i];
2146 		lock_buffer(bh);
2147 		mark_buffer_async_read(bh);
2148 	}
2149 
2150 	/*
2151 	 * Stage 3: start the IO.  Check for uptodateness
2152 	 * inside the buffer lock in case another process reading
2153 	 * the underlying blockdev brought it uptodate (the sct fix).
2154 	 */
2155 	for (i = 0; i < nr; i++) {
2156 		bh = arr[i];
2157 		if (buffer_uptodate(bh))
2158 			end_buffer_async_read(bh, 1);
2159 		else
2160 			submit_bh(READ, bh);
2161 	}
2162 	return 0;
2163 }
2164 
2165 /* utility function for filesystems that need to do work on expanding
2166  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2167  * deal with the hole.
2168  */
2169 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2170 {
2171 	struct address_space *mapping = inode->i_mapping;
2172 	struct page *page;
2173 	void *fsdata;
2174 	unsigned long limit;
2175 	int err;
2176 
2177 	err = -EFBIG;
2178         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2179 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2180 		send_sig(SIGXFSZ, current, 0);
2181 		goto out;
2182 	}
2183 	if (size > inode->i_sb->s_maxbytes)
2184 		goto out;
2185 
2186 	err = pagecache_write_begin(NULL, mapping, size, 0,
2187 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2188 				&page, &fsdata);
2189 	if (err)
2190 		goto out;
2191 
2192 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2193 	BUG_ON(err > 0);
2194 
2195 out:
2196 	return err;
2197 }
2198 
2199 int cont_expand_zero(struct file *file, struct address_space *mapping,
2200 			loff_t pos, loff_t *bytes)
2201 {
2202 	struct inode *inode = mapping->host;
2203 	unsigned blocksize = 1 << inode->i_blkbits;
2204 	struct page *page;
2205 	void *fsdata;
2206 	pgoff_t index, curidx;
2207 	loff_t curpos;
2208 	unsigned zerofrom, offset, len;
2209 	int err = 0;
2210 
2211 	index = pos >> PAGE_CACHE_SHIFT;
2212 	offset = pos & ~PAGE_CACHE_MASK;
2213 
2214 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2215 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2216 		if (zerofrom & (blocksize-1)) {
2217 			*bytes |= (blocksize-1);
2218 			(*bytes)++;
2219 		}
2220 		len = PAGE_CACHE_SIZE - zerofrom;
2221 
2222 		err = pagecache_write_begin(file, mapping, curpos, len,
2223 						AOP_FLAG_UNINTERRUPTIBLE,
2224 						&page, &fsdata);
2225 		if (err)
2226 			goto out;
2227 		zero_user(page, zerofrom, len);
2228 		err = pagecache_write_end(file, mapping, curpos, len, len,
2229 						page, fsdata);
2230 		if (err < 0)
2231 			goto out;
2232 		BUG_ON(err != len);
2233 		err = 0;
2234 	}
2235 
2236 	/* page covers the boundary, find the boundary offset */
2237 	if (index == curidx) {
2238 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2239 		/* if we will expand the thing last block will be filled */
2240 		if (offset <= zerofrom) {
2241 			goto out;
2242 		}
2243 		if (zerofrom & (blocksize-1)) {
2244 			*bytes |= (blocksize-1);
2245 			(*bytes)++;
2246 		}
2247 		len = offset - zerofrom;
2248 
2249 		err = pagecache_write_begin(file, mapping, curpos, len,
2250 						AOP_FLAG_UNINTERRUPTIBLE,
2251 						&page, &fsdata);
2252 		if (err)
2253 			goto out;
2254 		zero_user(page, zerofrom, len);
2255 		err = pagecache_write_end(file, mapping, curpos, len, len,
2256 						page, fsdata);
2257 		if (err < 0)
2258 			goto out;
2259 		BUG_ON(err != len);
2260 		err = 0;
2261 	}
2262 out:
2263 	return err;
2264 }
2265 
2266 /*
2267  * For moronic filesystems that do not allow holes in file.
2268  * We may have to extend the file.
2269  */
2270 int cont_write_begin(struct file *file, struct address_space *mapping,
2271 			loff_t pos, unsigned len, unsigned flags,
2272 			struct page **pagep, void **fsdata,
2273 			get_block_t *get_block, loff_t *bytes)
2274 {
2275 	struct inode *inode = mapping->host;
2276 	unsigned blocksize = 1 << inode->i_blkbits;
2277 	unsigned zerofrom;
2278 	int err;
2279 
2280 	err = cont_expand_zero(file, mapping, pos, bytes);
2281 	if (err)
2282 		goto out;
2283 
2284 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2285 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2286 		*bytes |= (blocksize-1);
2287 		(*bytes)++;
2288 	}
2289 
2290 	*pagep = NULL;
2291 	err = block_write_begin(file, mapping, pos, len,
2292 				flags, pagep, fsdata, get_block);
2293 out:
2294 	return err;
2295 }
2296 
2297 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2298 			get_block_t *get_block)
2299 {
2300 	struct inode *inode = page->mapping->host;
2301 	int err = __block_prepare_write(inode, page, from, to, get_block);
2302 	if (err)
2303 		ClearPageUptodate(page);
2304 	return err;
2305 }
2306 
2307 int block_commit_write(struct page *page, unsigned from, unsigned to)
2308 {
2309 	struct inode *inode = page->mapping->host;
2310 	__block_commit_write(inode,page,from,to);
2311 	return 0;
2312 }
2313 
2314 int generic_commit_write(struct file *file, struct page *page,
2315 		unsigned from, unsigned to)
2316 {
2317 	struct inode *inode = page->mapping->host;
2318 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2319 	__block_commit_write(inode,page,from,to);
2320 	/*
2321 	 * No need to use i_size_read() here, the i_size
2322 	 * cannot change under us because we hold i_mutex.
2323 	 */
2324 	if (pos > inode->i_size) {
2325 		i_size_write(inode, pos);
2326 		mark_inode_dirty(inode);
2327 	}
2328 	return 0;
2329 }
2330 
2331 /*
2332  * block_page_mkwrite() is not allowed to change the file size as it gets
2333  * called from a page fault handler when a page is first dirtied. Hence we must
2334  * be careful to check for EOF conditions here. We set the page up correctly
2335  * for a written page which means we get ENOSPC checking when writing into
2336  * holes and correct delalloc and unwritten extent mapping on filesystems that
2337  * support these features.
2338  *
2339  * We are not allowed to take the i_mutex here so we have to play games to
2340  * protect against truncate races as the page could now be beyond EOF.  Because
2341  * vmtruncate() writes the inode size before removing pages, once we have the
2342  * page lock we can determine safely if the page is beyond EOF. If it is not
2343  * beyond EOF, then the page is guaranteed safe against truncation until we
2344  * unlock the page.
2345  */
2346 int
2347 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2348 		   get_block_t get_block)
2349 {
2350 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2351 	unsigned long end;
2352 	loff_t size;
2353 	int ret = -EINVAL;
2354 
2355 	lock_page(page);
2356 	size = i_size_read(inode);
2357 	if ((page->mapping != inode->i_mapping) ||
2358 	    (page_offset(page) > size)) {
2359 		/* page got truncated out from underneath us */
2360 		goto out_unlock;
2361 	}
2362 
2363 	/* page is wholly or partially inside EOF */
2364 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2365 		end = size & ~PAGE_CACHE_MASK;
2366 	else
2367 		end = PAGE_CACHE_SIZE;
2368 
2369 	ret = block_prepare_write(page, 0, end, get_block);
2370 	if (!ret)
2371 		ret = block_commit_write(page, 0, end);
2372 
2373 out_unlock:
2374 	unlock_page(page);
2375 	return ret;
2376 }
2377 
2378 /*
2379  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2380  * immediately, while under the page lock.  So it needs a special end_io
2381  * handler which does not touch the bh after unlocking it.
2382  */
2383 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2384 {
2385 	__end_buffer_read_notouch(bh, uptodate);
2386 }
2387 
2388 /*
2389  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2390  * the page (converting it to circular linked list and taking care of page
2391  * dirty races).
2392  */
2393 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2394 {
2395 	struct buffer_head *bh;
2396 
2397 	BUG_ON(!PageLocked(page));
2398 
2399 	spin_lock(&page->mapping->private_lock);
2400 	bh = head;
2401 	do {
2402 		if (PageDirty(page))
2403 			set_buffer_dirty(bh);
2404 		if (!bh->b_this_page)
2405 			bh->b_this_page = head;
2406 		bh = bh->b_this_page;
2407 	} while (bh != head);
2408 	attach_page_buffers(page, head);
2409 	spin_unlock(&page->mapping->private_lock);
2410 }
2411 
2412 /*
2413  * On entry, the page is fully not uptodate.
2414  * On exit the page is fully uptodate in the areas outside (from,to)
2415  */
2416 int nobh_write_begin(struct file *file, struct address_space *mapping,
2417 			loff_t pos, unsigned len, unsigned flags,
2418 			struct page **pagep, void **fsdata,
2419 			get_block_t *get_block)
2420 {
2421 	struct inode *inode = mapping->host;
2422 	const unsigned blkbits = inode->i_blkbits;
2423 	const unsigned blocksize = 1 << blkbits;
2424 	struct buffer_head *head, *bh;
2425 	struct page *page;
2426 	pgoff_t index;
2427 	unsigned from, to;
2428 	unsigned block_in_page;
2429 	unsigned block_start, block_end;
2430 	sector_t block_in_file;
2431 	int nr_reads = 0;
2432 	int ret = 0;
2433 	int is_mapped_to_disk = 1;
2434 
2435 	index = pos >> PAGE_CACHE_SHIFT;
2436 	from = pos & (PAGE_CACHE_SIZE - 1);
2437 	to = from + len;
2438 
2439 	page = __grab_cache_page(mapping, index);
2440 	if (!page)
2441 		return -ENOMEM;
2442 	*pagep = page;
2443 	*fsdata = NULL;
2444 
2445 	if (page_has_buffers(page)) {
2446 		unlock_page(page);
2447 		page_cache_release(page);
2448 		*pagep = NULL;
2449 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2450 					fsdata, get_block);
2451 	}
2452 
2453 	if (PageMappedToDisk(page))
2454 		return 0;
2455 
2456 	/*
2457 	 * Allocate buffers so that we can keep track of state, and potentially
2458 	 * attach them to the page if an error occurs. In the common case of
2459 	 * no error, they will just be freed again without ever being attached
2460 	 * to the page (which is all OK, because we're under the page lock).
2461 	 *
2462 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2463 	 * than the circular one we're used to.
2464 	 */
2465 	head = alloc_page_buffers(page, blocksize, 0);
2466 	if (!head) {
2467 		ret = -ENOMEM;
2468 		goto out_release;
2469 	}
2470 
2471 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2472 
2473 	/*
2474 	 * We loop across all blocks in the page, whether or not they are
2475 	 * part of the affected region.  This is so we can discover if the
2476 	 * page is fully mapped-to-disk.
2477 	 */
2478 	for (block_start = 0, block_in_page = 0, bh = head;
2479 		  block_start < PAGE_CACHE_SIZE;
2480 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2481 		int create;
2482 
2483 		block_end = block_start + blocksize;
2484 		bh->b_state = 0;
2485 		create = 1;
2486 		if (block_start >= to)
2487 			create = 0;
2488 		ret = get_block(inode, block_in_file + block_in_page,
2489 					bh, create);
2490 		if (ret)
2491 			goto failed;
2492 		if (!buffer_mapped(bh))
2493 			is_mapped_to_disk = 0;
2494 		if (buffer_new(bh))
2495 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2496 		if (PageUptodate(page)) {
2497 			set_buffer_uptodate(bh);
2498 			continue;
2499 		}
2500 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2501 			zero_user_segments(page, block_start, from,
2502 							to, block_end);
2503 			continue;
2504 		}
2505 		if (buffer_uptodate(bh))
2506 			continue;	/* reiserfs does this */
2507 		if (block_start < from || block_end > to) {
2508 			lock_buffer(bh);
2509 			bh->b_end_io = end_buffer_read_nobh;
2510 			submit_bh(READ, bh);
2511 			nr_reads++;
2512 		}
2513 	}
2514 
2515 	if (nr_reads) {
2516 		/*
2517 		 * The page is locked, so these buffers are protected from
2518 		 * any VM or truncate activity.  Hence we don't need to care
2519 		 * for the buffer_head refcounts.
2520 		 */
2521 		for (bh = head; bh; bh = bh->b_this_page) {
2522 			wait_on_buffer(bh);
2523 			if (!buffer_uptodate(bh))
2524 				ret = -EIO;
2525 		}
2526 		if (ret)
2527 			goto failed;
2528 	}
2529 
2530 	if (is_mapped_to_disk)
2531 		SetPageMappedToDisk(page);
2532 
2533 	*fsdata = head; /* to be released by nobh_write_end */
2534 
2535 	return 0;
2536 
2537 failed:
2538 	BUG_ON(!ret);
2539 	/*
2540 	 * Error recovery is a bit difficult. We need to zero out blocks that
2541 	 * were newly allocated, and dirty them to ensure they get written out.
2542 	 * Buffers need to be attached to the page at this point, otherwise
2543 	 * the handling of potential IO errors during writeout would be hard
2544 	 * (could try doing synchronous writeout, but what if that fails too?)
2545 	 */
2546 	attach_nobh_buffers(page, head);
2547 	page_zero_new_buffers(page, from, to);
2548 
2549 out_release:
2550 	unlock_page(page);
2551 	page_cache_release(page);
2552 	*pagep = NULL;
2553 
2554 	if (pos + len > inode->i_size)
2555 		vmtruncate(inode, inode->i_size);
2556 
2557 	return ret;
2558 }
2559 EXPORT_SYMBOL(nobh_write_begin);
2560 
2561 int nobh_write_end(struct file *file, struct address_space *mapping,
2562 			loff_t pos, unsigned len, unsigned copied,
2563 			struct page *page, void *fsdata)
2564 {
2565 	struct inode *inode = page->mapping->host;
2566 	struct buffer_head *head = fsdata;
2567 	struct buffer_head *bh;
2568 
2569 	if (!PageMappedToDisk(page)) {
2570 		if (unlikely(copied < len) && !page_has_buffers(page))
2571 			attach_nobh_buffers(page, head);
2572 		if (page_has_buffers(page))
2573 			return generic_write_end(file, mapping, pos, len,
2574 						copied, page, fsdata);
2575 	}
2576 
2577 	SetPageUptodate(page);
2578 	set_page_dirty(page);
2579 	if (pos+copied > inode->i_size) {
2580 		i_size_write(inode, pos+copied);
2581 		mark_inode_dirty(inode);
2582 	}
2583 
2584 	unlock_page(page);
2585 	page_cache_release(page);
2586 
2587 	while (head) {
2588 		bh = head;
2589 		head = head->b_this_page;
2590 		free_buffer_head(bh);
2591 	}
2592 
2593 	return copied;
2594 }
2595 EXPORT_SYMBOL(nobh_write_end);
2596 
2597 /*
2598  * nobh_writepage() - based on block_full_write_page() except
2599  * that it tries to operate without attaching bufferheads to
2600  * the page.
2601  */
2602 int nobh_writepage(struct page *page, get_block_t *get_block,
2603 			struct writeback_control *wbc)
2604 {
2605 	struct inode * const inode = page->mapping->host;
2606 	loff_t i_size = i_size_read(inode);
2607 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2608 	unsigned offset;
2609 	int ret;
2610 
2611 	/* Is the page fully inside i_size? */
2612 	if (page->index < end_index)
2613 		goto out;
2614 
2615 	/* Is the page fully outside i_size? (truncate in progress) */
2616 	offset = i_size & (PAGE_CACHE_SIZE-1);
2617 	if (page->index >= end_index+1 || !offset) {
2618 		/*
2619 		 * The page may have dirty, unmapped buffers.  For example,
2620 		 * they may have been added in ext3_writepage().  Make them
2621 		 * freeable here, so the page does not leak.
2622 		 */
2623 #if 0
2624 		/* Not really sure about this  - do we need this ? */
2625 		if (page->mapping->a_ops->invalidatepage)
2626 			page->mapping->a_ops->invalidatepage(page, offset);
2627 #endif
2628 		unlock_page(page);
2629 		return 0; /* don't care */
2630 	}
2631 
2632 	/*
2633 	 * The page straddles i_size.  It must be zeroed out on each and every
2634 	 * writepage invocation because it may be mmapped.  "A file is mapped
2635 	 * in multiples of the page size.  For a file that is not a multiple of
2636 	 * the  page size, the remaining memory is zeroed when mapped, and
2637 	 * writes to that region are not written out to the file."
2638 	 */
2639 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2640 out:
2641 	ret = mpage_writepage(page, get_block, wbc);
2642 	if (ret == -EAGAIN)
2643 		ret = __block_write_full_page(inode, page, get_block, wbc);
2644 	return ret;
2645 }
2646 EXPORT_SYMBOL(nobh_writepage);
2647 
2648 int nobh_truncate_page(struct address_space *mapping,
2649 			loff_t from, get_block_t *get_block)
2650 {
2651 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2652 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2653 	unsigned blocksize;
2654 	sector_t iblock;
2655 	unsigned length, pos;
2656 	struct inode *inode = mapping->host;
2657 	struct page *page;
2658 	struct buffer_head map_bh;
2659 	int err;
2660 
2661 	blocksize = 1 << inode->i_blkbits;
2662 	length = offset & (blocksize - 1);
2663 
2664 	/* Block boundary? Nothing to do */
2665 	if (!length)
2666 		return 0;
2667 
2668 	length = blocksize - length;
2669 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2670 
2671 	page = grab_cache_page(mapping, index);
2672 	err = -ENOMEM;
2673 	if (!page)
2674 		goto out;
2675 
2676 	if (page_has_buffers(page)) {
2677 has_buffers:
2678 		unlock_page(page);
2679 		page_cache_release(page);
2680 		return block_truncate_page(mapping, from, get_block);
2681 	}
2682 
2683 	/* Find the buffer that contains "offset" */
2684 	pos = blocksize;
2685 	while (offset >= pos) {
2686 		iblock++;
2687 		pos += blocksize;
2688 	}
2689 
2690 	err = get_block(inode, iblock, &map_bh, 0);
2691 	if (err)
2692 		goto unlock;
2693 	/* unmapped? It's a hole - nothing to do */
2694 	if (!buffer_mapped(&map_bh))
2695 		goto unlock;
2696 
2697 	/* Ok, it's mapped. Make sure it's up-to-date */
2698 	if (!PageUptodate(page)) {
2699 		err = mapping->a_ops->readpage(NULL, page);
2700 		if (err) {
2701 			page_cache_release(page);
2702 			goto out;
2703 		}
2704 		lock_page(page);
2705 		if (!PageUptodate(page)) {
2706 			err = -EIO;
2707 			goto unlock;
2708 		}
2709 		if (page_has_buffers(page))
2710 			goto has_buffers;
2711 	}
2712 	zero_user(page, offset, length);
2713 	set_page_dirty(page);
2714 	err = 0;
2715 
2716 unlock:
2717 	unlock_page(page);
2718 	page_cache_release(page);
2719 out:
2720 	return err;
2721 }
2722 EXPORT_SYMBOL(nobh_truncate_page);
2723 
2724 int block_truncate_page(struct address_space *mapping,
2725 			loff_t from, get_block_t *get_block)
2726 {
2727 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2728 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2729 	unsigned blocksize;
2730 	sector_t iblock;
2731 	unsigned length, pos;
2732 	struct inode *inode = mapping->host;
2733 	struct page *page;
2734 	struct buffer_head *bh;
2735 	int err;
2736 
2737 	blocksize = 1 << inode->i_blkbits;
2738 	length = offset & (blocksize - 1);
2739 
2740 	/* Block boundary? Nothing to do */
2741 	if (!length)
2742 		return 0;
2743 
2744 	length = blocksize - length;
2745 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2746 
2747 	page = grab_cache_page(mapping, index);
2748 	err = -ENOMEM;
2749 	if (!page)
2750 		goto out;
2751 
2752 	if (!page_has_buffers(page))
2753 		create_empty_buffers(page, blocksize, 0);
2754 
2755 	/* Find the buffer that contains "offset" */
2756 	bh = page_buffers(page);
2757 	pos = blocksize;
2758 	while (offset >= pos) {
2759 		bh = bh->b_this_page;
2760 		iblock++;
2761 		pos += blocksize;
2762 	}
2763 
2764 	err = 0;
2765 	if (!buffer_mapped(bh)) {
2766 		WARN_ON(bh->b_size != blocksize);
2767 		err = get_block(inode, iblock, bh, 0);
2768 		if (err)
2769 			goto unlock;
2770 		/* unmapped? It's a hole - nothing to do */
2771 		if (!buffer_mapped(bh))
2772 			goto unlock;
2773 	}
2774 
2775 	/* Ok, it's mapped. Make sure it's up-to-date */
2776 	if (PageUptodate(page))
2777 		set_buffer_uptodate(bh);
2778 
2779 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2780 		err = -EIO;
2781 		ll_rw_block(READ, 1, &bh);
2782 		wait_on_buffer(bh);
2783 		/* Uhhuh. Read error. Complain and punt. */
2784 		if (!buffer_uptodate(bh))
2785 			goto unlock;
2786 	}
2787 
2788 	zero_user(page, offset, length);
2789 	mark_buffer_dirty(bh);
2790 	err = 0;
2791 
2792 unlock:
2793 	unlock_page(page);
2794 	page_cache_release(page);
2795 out:
2796 	return err;
2797 }
2798 
2799 /*
2800  * The generic ->writepage function for buffer-backed address_spaces
2801  */
2802 int block_write_full_page(struct page *page, get_block_t *get_block,
2803 			struct writeback_control *wbc)
2804 {
2805 	struct inode * const inode = page->mapping->host;
2806 	loff_t i_size = i_size_read(inode);
2807 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2808 	unsigned offset;
2809 
2810 	/* Is the page fully inside i_size? */
2811 	if (page->index < end_index)
2812 		return __block_write_full_page(inode, page, get_block, wbc);
2813 
2814 	/* Is the page fully outside i_size? (truncate in progress) */
2815 	offset = i_size & (PAGE_CACHE_SIZE-1);
2816 	if (page->index >= end_index+1 || !offset) {
2817 		/*
2818 		 * The page may have dirty, unmapped buffers.  For example,
2819 		 * they may have been added in ext3_writepage().  Make them
2820 		 * freeable here, so the page does not leak.
2821 		 */
2822 		do_invalidatepage(page, 0);
2823 		unlock_page(page);
2824 		return 0; /* don't care */
2825 	}
2826 
2827 	/*
2828 	 * The page straddles i_size.  It must be zeroed out on each and every
2829 	 * writepage invokation because it may be mmapped.  "A file is mapped
2830 	 * in multiples of the page size.  For a file that is not a multiple of
2831 	 * the  page size, the remaining memory is zeroed when mapped, and
2832 	 * writes to that region are not written out to the file."
2833 	 */
2834 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2835 	return __block_write_full_page(inode, page, get_block, wbc);
2836 }
2837 
2838 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2839 			    get_block_t *get_block)
2840 {
2841 	struct buffer_head tmp;
2842 	struct inode *inode = mapping->host;
2843 	tmp.b_state = 0;
2844 	tmp.b_blocknr = 0;
2845 	tmp.b_size = 1 << inode->i_blkbits;
2846 	get_block(inode, block, &tmp, 0);
2847 	return tmp.b_blocknr;
2848 }
2849 
2850 static void end_bio_bh_io_sync(struct bio *bio, int err)
2851 {
2852 	struct buffer_head *bh = bio->bi_private;
2853 
2854 	if (err == -EOPNOTSUPP) {
2855 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2856 		set_bit(BH_Eopnotsupp, &bh->b_state);
2857 	}
2858 
2859 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2860 	bio_put(bio);
2861 }
2862 
2863 int submit_bh(int rw, struct buffer_head * bh)
2864 {
2865 	struct bio *bio;
2866 	int ret = 0;
2867 
2868 	BUG_ON(!buffer_locked(bh));
2869 	BUG_ON(!buffer_mapped(bh));
2870 	BUG_ON(!bh->b_end_io);
2871 
2872 	if (buffer_ordered(bh) && (rw == WRITE))
2873 		rw = WRITE_BARRIER;
2874 
2875 	/*
2876 	 * Only clear out a write error when rewriting, should this
2877 	 * include WRITE_SYNC as well?
2878 	 */
2879 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2880 		clear_buffer_write_io_error(bh);
2881 
2882 	/*
2883 	 * from here on down, it's all bio -- do the initial mapping,
2884 	 * submit_bio -> generic_make_request may further map this bio around
2885 	 */
2886 	bio = bio_alloc(GFP_NOIO, 1);
2887 
2888 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2889 	bio->bi_bdev = bh->b_bdev;
2890 	bio->bi_io_vec[0].bv_page = bh->b_page;
2891 	bio->bi_io_vec[0].bv_len = bh->b_size;
2892 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2893 
2894 	bio->bi_vcnt = 1;
2895 	bio->bi_idx = 0;
2896 	bio->bi_size = bh->b_size;
2897 
2898 	bio->bi_end_io = end_bio_bh_io_sync;
2899 	bio->bi_private = bh;
2900 
2901 	bio_get(bio);
2902 	submit_bio(rw, bio);
2903 
2904 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2905 		ret = -EOPNOTSUPP;
2906 
2907 	bio_put(bio);
2908 	return ret;
2909 }
2910 
2911 /**
2912  * ll_rw_block: low-level access to block devices (DEPRECATED)
2913  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2914  * @nr: number of &struct buffer_heads in the array
2915  * @bhs: array of pointers to &struct buffer_head
2916  *
2917  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2918  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2919  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2920  * are sent to disk. The fourth %READA option is described in the documentation
2921  * for generic_make_request() which ll_rw_block() calls.
2922  *
2923  * This function drops any buffer that it cannot get a lock on (with the
2924  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2925  * clean when doing a write request, and any buffer that appears to be
2926  * up-to-date when doing read request.  Further it marks as clean buffers that
2927  * are processed for writing (the buffer cache won't assume that they are
2928  * actually clean until the buffer gets unlocked).
2929  *
2930  * ll_rw_block sets b_end_io to simple completion handler that marks
2931  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2932  * any waiters.
2933  *
2934  * All of the buffers must be for the same device, and must also be a
2935  * multiple of the current approved size for the device.
2936  */
2937 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2938 {
2939 	int i;
2940 
2941 	for (i = 0; i < nr; i++) {
2942 		struct buffer_head *bh = bhs[i];
2943 
2944 		if (rw == SWRITE)
2945 			lock_buffer(bh);
2946 		else if (test_set_buffer_locked(bh))
2947 			continue;
2948 
2949 		if (rw == WRITE || rw == SWRITE) {
2950 			if (test_clear_buffer_dirty(bh)) {
2951 				bh->b_end_io = end_buffer_write_sync;
2952 				get_bh(bh);
2953 				submit_bh(WRITE, bh);
2954 				continue;
2955 			}
2956 		} else {
2957 			if (!buffer_uptodate(bh)) {
2958 				bh->b_end_io = end_buffer_read_sync;
2959 				get_bh(bh);
2960 				submit_bh(rw, bh);
2961 				continue;
2962 			}
2963 		}
2964 		unlock_buffer(bh);
2965 	}
2966 }
2967 
2968 /*
2969  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2970  * and then start new I/O and then wait upon it.  The caller must have a ref on
2971  * the buffer_head.
2972  */
2973 int sync_dirty_buffer(struct buffer_head *bh)
2974 {
2975 	int ret = 0;
2976 
2977 	WARN_ON(atomic_read(&bh->b_count) < 1);
2978 	lock_buffer(bh);
2979 	if (test_clear_buffer_dirty(bh)) {
2980 		get_bh(bh);
2981 		bh->b_end_io = end_buffer_write_sync;
2982 		ret = submit_bh(WRITE, bh);
2983 		wait_on_buffer(bh);
2984 		if (buffer_eopnotsupp(bh)) {
2985 			clear_buffer_eopnotsupp(bh);
2986 			ret = -EOPNOTSUPP;
2987 		}
2988 		if (!ret && !buffer_uptodate(bh))
2989 			ret = -EIO;
2990 	} else {
2991 		unlock_buffer(bh);
2992 	}
2993 	return ret;
2994 }
2995 
2996 /*
2997  * try_to_free_buffers() checks if all the buffers on this particular page
2998  * are unused, and releases them if so.
2999  *
3000  * Exclusion against try_to_free_buffers may be obtained by either
3001  * locking the page or by holding its mapping's private_lock.
3002  *
3003  * If the page is dirty but all the buffers are clean then we need to
3004  * be sure to mark the page clean as well.  This is because the page
3005  * may be against a block device, and a later reattachment of buffers
3006  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3007  * filesystem data on the same device.
3008  *
3009  * The same applies to regular filesystem pages: if all the buffers are
3010  * clean then we set the page clean and proceed.  To do that, we require
3011  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3012  * private_lock.
3013  *
3014  * try_to_free_buffers() is non-blocking.
3015  */
3016 static inline int buffer_busy(struct buffer_head *bh)
3017 {
3018 	return atomic_read(&bh->b_count) |
3019 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3020 }
3021 
3022 static int
3023 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3024 {
3025 	struct buffer_head *head = page_buffers(page);
3026 	struct buffer_head *bh;
3027 
3028 	bh = head;
3029 	do {
3030 		if (buffer_write_io_error(bh) && page->mapping)
3031 			set_bit(AS_EIO, &page->mapping->flags);
3032 		if (buffer_busy(bh))
3033 			goto failed;
3034 		bh = bh->b_this_page;
3035 	} while (bh != head);
3036 
3037 	do {
3038 		struct buffer_head *next = bh->b_this_page;
3039 
3040 		if (bh->b_assoc_map)
3041 			__remove_assoc_queue(bh);
3042 		bh = next;
3043 	} while (bh != head);
3044 	*buffers_to_free = head;
3045 	__clear_page_buffers(page);
3046 	return 1;
3047 failed:
3048 	return 0;
3049 }
3050 
3051 int try_to_free_buffers(struct page *page)
3052 {
3053 	struct address_space * const mapping = page->mapping;
3054 	struct buffer_head *buffers_to_free = NULL;
3055 	int ret = 0;
3056 
3057 	BUG_ON(!PageLocked(page));
3058 	if (PageWriteback(page))
3059 		return 0;
3060 
3061 	if (mapping == NULL) {		/* can this still happen? */
3062 		ret = drop_buffers(page, &buffers_to_free);
3063 		goto out;
3064 	}
3065 
3066 	spin_lock(&mapping->private_lock);
3067 	ret = drop_buffers(page, &buffers_to_free);
3068 
3069 	/*
3070 	 * If the filesystem writes its buffers by hand (eg ext3)
3071 	 * then we can have clean buffers against a dirty page.  We
3072 	 * clean the page here; otherwise the VM will never notice
3073 	 * that the filesystem did any IO at all.
3074 	 *
3075 	 * Also, during truncate, discard_buffer will have marked all
3076 	 * the page's buffers clean.  We discover that here and clean
3077 	 * the page also.
3078 	 *
3079 	 * private_lock must be held over this entire operation in order
3080 	 * to synchronise against __set_page_dirty_buffers and prevent the
3081 	 * dirty bit from being lost.
3082 	 */
3083 	if (ret)
3084 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3085 	spin_unlock(&mapping->private_lock);
3086 out:
3087 	if (buffers_to_free) {
3088 		struct buffer_head *bh = buffers_to_free;
3089 
3090 		do {
3091 			struct buffer_head *next = bh->b_this_page;
3092 			free_buffer_head(bh);
3093 			bh = next;
3094 		} while (bh != buffers_to_free);
3095 	}
3096 	return ret;
3097 }
3098 EXPORT_SYMBOL(try_to_free_buffers);
3099 
3100 void block_sync_page(struct page *page)
3101 {
3102 	struct address_space *mapping;
3103 
3104 	smp_mb();
3105 	mapping = page_mapping(page);
3106 	if (mapping)
3107 		blk_run_backing_dev(mapping->backing_dev_info, page);
3108 }
3109 
3110 /*
3111  * There are no bdflush tunables left.  But distributions are
3112  * still running obsolete flush daemons, so we terminate them here.
3113  *
3114  * Use of bdflush() is deprecated and will be removed in a future kernel.
3115  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3116  */
3117 asmlinkage long sys_bdflush(int func, long data)
3118 {
3119 	static int msg_count;
3120 
3121 	if (!capable(CAP_SYS_ADMIN))
3122 		return -EPERM;
3123 
3124 	if (msg_count < 5) {
3125 		msg_count++;
3126 		printk(KERN_INFO
3127 			"warning: process `%s' used the obsolete bdflush"
3128 			" system call\n", current->comm);
3129 		printk(KERN_INFO "Fix your initscripts?\n");
3130 	}
3131 
3132 	if (func == 1)
3133 		do_exit(0);
3134 	return 0;
3135 }
3136 
3137 /*
3138  * Buffer-head allocation
3139  */
3140 static struct kmem_cache *bh_cachep;
3141 
3142 /*
3143  * Once the number of bh's in the machine exceeds this level, we start
3144  * stripping them in writeback.
3145  */
3146 static int max_buffer_heads;
3147 
3148 int buffer_heads_over_limit;
3149 
3150 struct bh_accounting {
3151 	int nr;			/* Number of live bh's */
3152 	int ratelimit;		/* Limit cacheline bouncing */
3153 };
3154 
3155 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3156 
3157 static void recalc_bh_state(void)
3158 {
3159 	int i;
3160 	int tot = 0;
3161 
3162 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3163 		return;
3164 	__get_cpu_var(bh_accounting).ratelimit = 0;
3165 	for_each_online_cpu(i)
3166 		tot += per_cpu(bh_accounting, i).nr;
3167 	buffer_heads_over_limit = (tot > max_buffer_heads);
3168 }
3169 
3170 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3171 {
3172 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3173 				set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3174 	if (ret) {
3175 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3176 		get_cpu_var(bh_accounting).nr++;
3177 		recalc_bh_state();
3178 		put_cpu_var(bh_accounting);
3179 	}
3180 	return ret;
3181 }
3182 EXPORT_SYMBOL(alloc_buffer_head);
3183 
3184 void free_buffer_head(struct buffer_head *bh)
3185 {
3186 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3187 	kmem_cache_free(bh_cachep, bh);
3188 	get_cpu_var(bh_accounting).nr--;
3189 	recalc_bh_state();
3190 	put_cpu_var(bh_accounting);
3191 }
3192 EXPORT_SYMBOL(free_buffer_head);
3193 
3194 static void buffer_exit_cpu(int cpu)
3195 {
3196 	int i;
3197 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3198 
3199 	for (i = 0; i < BH_LRU_SIZE; i++) {
3200 		brelse(b->bhs[i]);
3201 		b->bhs[i] = NULL;
3202 	}
3203 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3204 	per_cpu(bh_accounting, cpu).nr = 0;
3205 	put_cpu_var(bh_accounting);
3206 }
3207 
3208 static int buffer_cpu_notify(struct notifier_block *self,
3209 			      unsigned long action, void *hcpu)
3210 {
3211 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3212 		buffer_exit_cpu((unsigned long)hcpu);
3213 	return NOTIFY_OK;
3214 }
3215 
3216 /**
3217  * bh_uptodate_or_lock: Test whether the buffer is uptodate
3218  * @bh: struct buffer_head
3219  *
3220  * Return true if the buffer is up-to-date and false,
3221  * with the buffer locked, if not.
3222  */
3223 int bh_uptodate_or_lock(struct buffer_head *bh)
3224 {
3225 	if (!buffer_uptodate(bh)) {
3226 		lock_buffer(bh);
3227 		if (!buffer_uptodate(bh))
3228 			return 0;
3229 		unlock_buffer(bh);
3230 	}
3231 	return 1;
3232 }
3233 EXPORT_SYMBOL(bh_uptodate_or_lock);
3234 
3235 /**
3236  * bh_submit_read: Submit a locked buffer for reading
3237  * @bh: struct buffer_head
3238  *
3239  * Returns zero on success and -EIO on error.
3240  */
3241 int bh_submit_read(struct buffer_head *bh)
3242 {
3243 	BUG_ON(!buffer_locked(bh));
3244 
3245 	if (buffer_uptodate(bh)) {
3246 		unlock_buffer(bh);
3247 		return 0;
3248 	}
3249 
3250 	get_bh(bh);
3251 	bh->b_end_io = end_buffer_read_sync;
3252 	submit_bh(READ, bh);
3253 	wait_on_buffer(bh);
3254 	if (buffer_uptodate(bh))
3255 		return 0;
3256 	return -EIO;
3257 }
3258 EXPORT_SYMBOL(bh_submit_read);
3259 
3260 static void
3261 init_buffer_head(struct kmem_cache *cachep, void *data)
3262 {
3263 	struct buffer_head *bh = data;
3264 
3265 	memset(bh, 0, sizeof(*bh));
3266 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3267 }
3268 
3269 void __init buffer_init(void)
3270 {
3271 	int nrpages;
3272 
3273 	bh_cachep = kmem_cache_create("buffer_head",
3274 			sizeof(struct buffer_head), 0,
3275 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3276 				SLAB_MEM_SPREAD),
3277 				init_buffer_head);
3278 
3279 	/*
3280 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3281 	 */
3282 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3283 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3284 	hotcpu_notifier(buffer_cpu_notify, 0);
3285 }
3286 
3287 EXPORT_SYMBOL(__bforget);
3288 EXPORT_SYMBOL(__brelse);
3289 EXPORT_SYMBOL(__wait_on_buffer);
3290 EXPORT_SYMBOL(block_commit_write);
3291 EXPORT_SYMBOL(block_prepare_write);
3292 EXPORT_SYMBOL(block_page_mkwrite);
3293 EXPORT_SYMBOL(block_read_full_page);
3294 EXPORT_SYMBOL(block_sync_page);
3295 EXPORT_SYMBOL(block_truncate_page);
3296 EXPORT_SYMBOL(block_write_full_page);
3297 EXPORT_SYMBOL(cont_write_begin);
3298 EXPORT_SYMBOL(end_buffer_read_sync);
3299 EXPORT_SYMBOL(end_buffer_write_sync);
3300 EXPORT_SYMBOL(file_fsync);
3301 EXPORT_SYMBOL(fsync_bdev);
3302 EXPORT_SYMBOL(generic_block_bmap);
3303 EXPORT_SYMBOL(generic_commit_write);
3304 EXPORT_SYMBOL(generic_cont_expand_simple);
3305 EXPORT_SYMBOL(init_buffer);
3306 EXPORT_SYMBOL(invalidate_bdev);
3307 EXPORT_SYMBOL(ll_rw_block);
3308 EXPORT_SYMBOL(mark_buffer_dirty);
3309 EXPORT_SYMBOL(submit_bh);
3310 EXPORT_SYMBOL(sync_dirty_buffer);
3311 EXPORT_SYMBOL(unlock_buffer);
3312