xref: /linux/fs/buffer.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
45 
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
48 
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 
51 inline void
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 {
54 	bh->b_end_io = handler;
55 	bh->b_private = private;
56 }
57 
58 static int sync_buffer(void *word)
59 {
60 	struct block_device *bd;
61 	struct buffer_head *bh
62 		= container_of(word, struct buffer_head, b_state);
63 
64 	smp_mb();
65 	bd = bh->b_bdev;
66 	if (bd)
67 		blk_run_address_space(bd->bd_inode->i_mapping);
68 	io_schedule();
69 	return 0;
70 }
71 
72 void fastcall __lock_buffer(struct buffer_head *bh)
73 {
74 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 							TASK_UNINTERRUPTIBLE);
76 }
77 EXPORT_SYMBOL(__lock_buffer);
78 
79 void fastcall unlock_buffer(struct buffer_head *bh)
80 {
81 	clear_buffer_locked(bh);
82 	smp_mb__after_clear_bit();
83 	wake_up_bit(&bh->b_state, BH_Lock);
84 }
85 
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95 
96 static void
97 __clear_page_buffers(struct page *page)
98 {
99 	ClearPagePrivate(page);
100 	set_page_private(page, 0);
101 	page_cache_release(page);
102 }
103 
104 static void buffer_io_error(struct buffer_head *bh)
105 {
106 	char b[BDEVNAME_SIZE];
107 
108 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 			bdevname(bh->b_bdev, b),
110 			(unsigned long long)bh->b_blocknr);
111 }
112 
113 /*
114  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
115  * unlock the buffer. This is what ll_rw_block uses too.
116  */
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
118 {
119 	if (uptodate) {
120 		set_buffer_uptodate(bh);
121 	} else {
122 		/* This happens, due to failed READA attempts. */
123 		clear_buffer_uptodate(bh);
124 	}
125 	unlock_buffer(bh);
126 	put_bh(bh);
127 }
128 
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 {
131 	char b[BDEVNAME_SIZE];
132 
133 	if (uptodate) {
134 		set_buffer_uptodate(bh);
135 	} else {
136 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 			buffer_io_error(bh);
138 			printk(KERN_WARNING "lost page write due to "
139 					"I/O error on %s\n",
140 				       bdevname(bh->b_bdev, b));
141 		}
142 		set_buffer_write_io_error(bh);
143 		clear_buffer_uptodate(bh);
144 	}
145 	unlock_buffer(bh);
146 	put_bh(bh);
147 }
148 
149 /*
150  * Write out and wait upon all the dirty data associated with a block
151  * device via its mapping.  Does not take the superblock lock.
152  */
153 int sync_blockdev(struct block_device *bdev)
154 {
155 	int ret = 0;
156 
157 	if (bdev)
158 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
159 	return ret;
160 }
161 EXPORT_SYMBOL(sync_blockdev);
162 
163 static void __fsync_super(struct super_block *sb)
164 {
165 	sync_inodes_sb(sb, 0);
166 	DQUOT_SYNC(sb);
167 	lock_super(sb);
168 	if (sb->s_dirt && sb->s_op->write_super)
169 		sb->s_op->write_super(sb);
170 	unlock_super(sb);
171 	if (sb->s_op->sync_fs)
172 		sb->s_op->sync_fs(sb, 1);
173 	sync_blockdev(sb->s_bdev);
174 	sync_inodes_sb(sb, 1);
175 }
176 
177 /*
178  * Write out and wait upon all dirty data associated with this
179  * superblock.  Filesystem data as well as the underlying block
180  * device.  Takes the superblock lock.
181  */
182 int fsync_super(struct super_block *sb)
183 {
184 	__fsync_super(sb);
185 	return sync_blockdev(sb->s_bdev);
186 }
187 
188 /*
189  * Write out and wait upon all dirty data associated with this
190  * device.   Filesystem data as well as the underlying block
191  * device.  Takes the superblock lock.
192  */
193 int fsync_bdev(struct block_device *bdev)
194 {
195 	struct super_block *sb = get_super(bdev);
196 	if (sb) {
197 		int res = fsync_super(sb);
198 		drop_super(sb);
199 		return res;
200 	}
201 	return sync_blockdev(bdev);
202 }
203 
204 /**
205  * freeze_bdev  --  lock a filesystem and force it into a consistent state
206  * @bdev:	blockdevice to lock
207  *
208  * This takes the block device bd_mount_mutex to make sure no new mounts
209  * happen on bdev until thaw_bdev() is called.
210  * If a superblock is found on this device, we take the s_umount semaphore
211  * on it to make sure nobody unmounts until the snapshot creation is done.
212  */
213 struct super_block *freeze_bdev(struct block_device *bdev)
214 {
215 	struct super_block *sb;
216 
217 	mutex_lock(&bdev->bd_mount_mutex);
218 	sb = get_super(bdev);
219 	if (sb && !(sb->s_flags & MS_RDONLY)) {
220 		sb->s_frozen = SB_FREEZE_WRITE;
221 		smp_wmb();
222 
223 		__fsync_super(sb);
224 
225 		sb->s_frozen = SB_FREEZE_TRANS;
226 		smp_wmb();
227 
228 		sync_blockdev(sb->s_bdev);
229 
230 		if (sb->s_op->write_super_lockfs)
231 			sb->s_op->write_super_lockfs(sb);
232 	}
233 
234 	sync_blockdev(bdev);
235 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
236 }
237 EXPORT_SYMBOL(freeze_bdev);
238 
239 /**
240  * thaw_bdev  -- unlock filesystem
241  * @bdev:	blockdevice to unlock
242  * @sb:		associated superblock
243  *
244  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
245  */
246 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
247 {
248 	if (sb) {
249 		BUG_ON(sb->s_bdev != bdev);
250 
251 		if (sb->s_op->unlockfs)
252 			sb->s_op->unlockfs(sb);
253 		sb->s_frozen = SB_UNFROZEN;
254 		smp_wmb();
255 		wake_up(&sb->s_wait_unfrozen);
256 		drop_super(sb);
257 	}
258 
259 	mutex_unlock(&bdev->bd_mount_mutex);
260 }
261 EXPORT_SYMBOL(thaw_bdev);
262 
263 /*
264  * sync everything.  Start out by waking pdflush, because that writes back
265  * all queues in parallel.
266  */
267 static void do_sync(unsigned long wait)
268 {
269 	wakeup_pdflush(0);
270 	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
271 	DQUOT_SYNC(NULL);
272 	sync_supers();		/* Write the superblocks */
273 	sync_filesystems(0);	/* Start syncing the filesystems */
274 	sync_filesystems(wait);	/* Waitingly sync the filesystems */
275 	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
276 	if (!wait)
277 		printk("Emergency Sync complete\n");
278 	if (unlikely(laptop_mode))
279 		laptop_sync_completion();
280 }
281 
282 asmlinkage long sys_sync(void)
283 {
284 	do_sync(1);
285 	return 0;
286 }
287 
288 void emergency_sync(void)
289 {
290 	pdflush_operation(do_sync, 0);
291 }
292 
293 /*
294  * Generic function to fsync a file.
295  *
296  * filp may be NULL if called via the msync of a vma.
297  */
298 
299 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
300 {
301 	struct inode * inode = dentry->d_inode;
302 	struct super_block * sb;
303 	int ret, err;
304 
305 	/* sync the inode to buffers */
306 	ret = write_inode_now(inode, 0);
307 
308 	/* sync the superblock to buffers */
309 	sb = inode->i_sb;
310 	lock_super(sb);
311 	if (sb->s_op->write_super)
312 		sb->s_op->write_super(sb);
313 	unlock_super(sb);
314 
315 	/* .. finally sync the buffers to disk */
316 	err = sync_blockdev(sb->s_bdev);
317 	if (!ret)
318 		ret = err;
319 	return ret;
320 }
321 
322 long do_fsync(struct file *file, int datasync)
323 {
324 	int ret;
325 	int err;
326 	struct address_space *mapping = file->f_mapping;
327 
328 	if (!file->f_op || !file->f_op->fsync) {
329 		/* Why?  We can still call filemap_fdatawrite */
330 		ret = -EINVAL;
331 		goto out;
332 	}
333 
334 	ret = filemap_fdatawrite(mapping);
335 
336 	/*
337 	 * We need to protect against concurrent writers, which could cause
338 	 * livelocks in fsync_buffers_list().
339 	 */
340 	mutex_lock(&mapping->host->i_mutex);
341 	err = file->f_op->fsync(file, file->f_dentry, datasync);
342 	if (!ret)
343 		ret = err;
344 	mutex_unlock(&mapping->host->i_mutex);
345 	err = filemap_fdatawait(mapping);
346 	if (!ret)
347 		ret = err;
348 out:
349 	return ret;
350 }
351 
352 static long __do_fsync(unsigned int fd, int datasync)
353 {
354 	struct file *file;
355 	int ret = -EBADF;
356 
357 	file = fget(fd);
358 	if (file) {
359 		ret = do_fsync(file, datasync);
360 		fput(file);
361 	}
362 	return ret;
363 }
364 
365 asmlinkage long sys_fsync(unsigned int fd)
366 {
367 	return __do_fsync(fd, 0);
368 }
369 
370 asmlinkage long sys_fdatasync(unsigned int fd)
371 {
372 	return __do_fsync(fd, 1);
373 }
374 
375 /*
376  * Various filesystems appear to want __find_get_block to be non-blocking.
377  * But it's the page lock which protects the buffers.  To get around this,
378  * we get exclusion from try_to_free_buffers with the blockdev mapping's
379  * private_lock.
380  *
381  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
382  * may be quite high.  This code could TryLock the page, and if that
383  * succeeds, there is no need to take private_lock. (But if
384  * private_lock is contended then so is mapping->tree_lock).
385  */
386 static struct buffer_head *
387 __find_get_block_slow(struct block_device *bdev, sector_t block)
388 {
389 	struct inode *bd_inode = bdev->bd_inode;
390 	struct address_space *bd_mapping = bd_inode->i_mapping;
391 	struct buffer_head *ret = NULL;
392 	pgoff_t index;
393 	struct buffer_head *bh;
394 	struct buffer_head *head;
395 	struct page *page;
396 	int all_mapped = 1;
397 
398 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
399 	page = find_get_page(bd_mapping, index);
400 	if (!page)
401 		goto out;
402 
403 	spin_lock(&bd_mapping->private_lock);
404 	if (!page_has_buffers(page))
405 		goto out_unlock;
406 	head = page_buffers(page);
407 	bh = head;
408 	do {
409 		if (bh->b_blocknr == block) {
410 			ret = bh;
411 			get_bh(bh);
412 			goto out_unlock;
413 		}
414 		if (!buffer_mapped(bh))
415 			all_mapped = 0;
416 		bh = bh->b_this_page;
417 	} while (bh != head);
418 
419 	/* we might be here because some of the buffers on this page are
420 	 * not mapped.  This is due to various races between
421 	 * file io on the block device and getblk.  It gets dealt with
422 	 * elsewhere, don't buffer_error if we had some unmapped buffers
423 	 */
424 	if (all_mapped) {
425 		printk("__find_get_block_slow() failed. "
426 			"block=%llu, b_blocknr=%llu\n",
427 			(unsigned long long)block,
428 			(unsigned long long)bh->b_blocknr);
429 		printk("b_state=0x%08lx, b_size=%zu\n",
430 			bh->b_state, bh->b_size);
431 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
432 	}
433 out_unlock:
434 	spin_unlock(&bd_mapping->private_lock);
435 	page_cache_release(page);
436 out:
437 	return ret;
438 }
439 
440 /* If invalidate_buffers() will trash dirty buffers, it means some kind
441    of fs corruption is going on. Trashing dirty data always imply losing
442    information that was supposed to be just stored on the physical layer
443    by the user.
444 
445    Thus invalidate_buffers in general usage is not allwowed to trash
446    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
447    be preserved.  These buffers are simply skipped.
448 
449    We also skip buffers which are still in use.  For example this can
450    happen if a userspace program is reading the block device.
451 
452    NOTE: In the case where the user removed a removable-media-disk even if
453    there's still dirty data not synced on disk (due a bug in the device driver
454    or due an error of the user), by not destroying the dirty buffers we could
455    generate corruption also on the next media inserted, thus a parameter is
456    necessary to handle this case in the most safe way possible (trying
457    to not corrupt also the new disk inserted with the data belonging to
458    the old now corrupted disk). Also for the ramdisk the natural thing
459    to do in order to release the ramdisk memory is to destroy dirty buffers.
460 
461    These are two special cases. Normal usage imply the device driver
462    to issue a sync on the device (without waiting I/O completion) and
463    then an invalidate_buffers call that doesn't trash dirty buffers.
464 
465    For handling cache coherency with the blkdev pagecache the 'update' case
466    is been introduced. It is needed to re-read from disk any pinned
467    buffer. NOTE: re-reading from disk is destructive so we can do it only
468    when we assume nobody is changing the buffercache under our I/O and when
469    we think the disk contains more recent information than the buffercache.
470    The update == 1 pass marks the buffers we need to update, the update == 2
471    pass does the actual I/O. */
472 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
473 {
474 	invalidate_bh_lrus();
475 	/*
476 	 * FIXME: what about destroy_dirty_buffers?
477 	 * We really want to use invalidate_inode_pages2() for
478 	 * that, but not until that's cleaned up.
479 	 */
480 	invalidate_inode_pages(bdev->bd_inode->i_mapping);
481 }
482 
483 /*
484  * Kick pdflush then try to free up some ZONE_NORMAL memory.
485  */
486 static void free_more_memory(void)
487 {
488 	struct zone **zones;
489 	pg_data_t *pgdat;
490 
491 	wakeup_pdflush(1024);
492 	yield();
493 
494 	for_each_online_pgdat(pgdat) {
495 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
496 		if (*zones)
497 			try_to_free_pages(zones, GFP_NOFS);
498 	}
499 }
500 
501 /*
502  * I/O completion handler for block_read_full_page() - pages
503  * which come unlocked at the end of I/O.
504  */
505 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
506 {
507 	unsigned long flags;
508 	struct buffer_head *first;
509 	struct buffer_head *tmp;
510 	struct page *page;
511 	int page_uptodate = 1;
512 
513 	BUG_ON(!buffer_async_read(bh));
514 
515 	page = bh->b_page;
516 	if (uptodate) {
517 		set_buffer_uptodate(bh);
518 	} else {
519 		clear_buffer_uptodate(bh);
520 		if (printk_ratelimit())
521 			buffer_io_error(bh);
522 		SetPageError(page);
523 	}
524 
525 	/*
526 	 * Be _very_ careful from here on. Bad things can happen if
527 	 * two buffer heads end IO at almost the same time and both
528 	 * decide that the page is now completely done.
529 	 */
530 	first = page_buffers(page);
531 	local_irq_save(flags);
532 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
533 	clear_buffer_async_read(bh);
534 	unlock_buffer(bh);
535 	tmp = bh;
536 	do {
537 		if (!buffer_uptodate(tmp))
538 			page_uptodate = 0;
539 		if (buffer_async_read(tmp)) {
540 			BUG_ON(!buffer_locked(tmp));
541 			goto still_busy;
542 		}
543 		tmp = tmp->b_this_page;
544 	} while (tmp != bh);
545 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
546 	local_irq_restore(flags);
547 
548 	/*
549 	 * If none of the buffers had errors and they are all
550 	 * uptodate then we can set the page uptodate.
551 	 */
552 	if (page_uptodate && !PageError(page))
553 		SetPageUptodate(page);
554 	unlock_page(page);
555 	return;
556 
557 still_busy:
558 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
559 	local_irq_restore(flags);
560 	return;
561 }
562 
563 /*
564  * Completion handler for block_write_full_page() - pages which are unlocked
565  * during I/O, and which have PageWriteback cleared upon I/O completion.
566  */
567 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
568 {
569 	char b[BDEVNAME_SIZE];
570 	unsigned long flags;
571 	struct buffer_head *first;
572 	struct buffer_head *tmp;
573 	struct page *page;
574 
575 	BUG_ON(!buffer_async_write(bh));
576 
577 	page = bh->b_page;
578 	if (uptodate) {
579 		set_buffer_uptodate(bh);
580 	} else {
581 		if (printk_ratelimit()) {
582 			buffer_io_error(bh);
583 			printk(KERN_WARNING "lost page write due to "
584 					"I/O error on %s\n",
585 			       bdevname(bh->b_bdev, b));
586 		}
587 		set_bit(AS_EIO, &page->mapping->flags);
588 		clear_buffer_uptodate(bh);
589 		SetPageError(page);
590 	}
591 
592 	first = page_buffers(page);
593 	local_irq_save(flags);
594 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
595 
596 	clear_buffer_async_write(bh);
597 	unlock_buffer(bh);
598 	tmp = bh->b_this_page;
599 	while (tmp != bh) {
600 		if (buffer_async_write(tmp)) {
601 			BUG_ON(!buffer_locked(tmp));
602 			goto still_busy;
603 		}
604 		tmp = tmp->b_this_page;
605 	}
606 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
607 	local_irq_restore(flags);
608 	end_page_writeback(page);
609 	return;
610 
611 still_busy:
612 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 	local_irq_restore(flags);
614 	return;
615 }
616 
617 /*
618  * If a page's buffers are under async readin (end_buffer_async_read
619  * completion) then there is a possibility that another thread of
620  * control could lock one of the buffers after it has completed
621  * but while some of the other buffers have not completed.  This
622  * locked buffer would confuse end_buffer_async_read() into not unlocking
623  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
624  * that this buffer is not under async I/O.
625  *
626  * The page comes unlocked when it has no locked buffer_async buffers
627  * left.
628  *
629  * PageLocked prevents anyone starting new async I/O reads any of
630  * the buffers.
631  *
632  * PageWriteback is used to prevent simultaneous writeout of the same
633  * page.
634  *
635  * PageLocked prevents anyone from starting writeback of a page which is
636  * under read I/O (PageWriteback is only ever set against a locked page).
637  */
638 static void mark_buffer_async_read(struct buffer_head *bh)
639 {
640 	bh->b_end_io = end_buffer_async_read;
641 	set_buffer_async_read(bh);
642 }
643 
644 void mark_buffer_async_write(struct buffer_head *bh)
645 {
646 	bh->b_end_io = end_buffer_async_write;
647 	set_buffer_async_write(bh);
648 }
649 EXPORT_SYMBOL(mark_buffer_async_write);
650 
651 
652 /*
653  * fs/buffer.c contains helper functions for buffer-backed address space's
654  * fsync functions.  A common requirement for buffer-based filesystems is
655  * that certain data from the backing blockdev needs to be written out for
656  * a successful fsync().  For example, ext2 indirect blocks need to be
657  * written back and waited upon before fsync() returns.
658  *
659  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
660  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
661  * management of a list of dependent buffers at ->i_mapping->private_list.
662  *
663  * Locking is a little subtle: try_to_free_buffers() will remove buffers
664  * from their controlling inode's queue when they are being freed.  But
665  * try_to_free_buffers() will be operating against the *blockdev* mapping
666  * at the time, not against the S_ISREG file which depends on those buffers.
667  * So the locking for private_list is via the private_lock in the address_space
668  * which backs the buffers.  Which is different from the address_space
669  * against which the buffers are listed.  So for a particular address_space,
670  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
671  * mapping->private_list will always be protected by the backing blockdev's
672  * ->private_lock.
673  *
674  * Which introduces a requirement: all buffers on an address_space's
675  * ->private_list must be from the same address_space: the blockdev's.
676  *
677  * address_spaces which do not place buffers at ->private_list via these
678  * utility functions are free to use private_lock and private_list for
679  * whatever they want.  The only requirement is that list_empty(private_list)
680  * be true at clear_inode() time.
681  *
682  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
683  * filesystems should do that.  invalidate_inode_buffers() should just go
684  * BUG_ON(!list_empty).
685  *
686  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
687  * take an address_space, not an inode.  And it should be called
688  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
689  * queued up.
690  *
691  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
692  * list if it is already on a list.  Because if the buffer is on a list,
693  * it *must* already be on the right one.  If not, the filesystem is being
694  * silly.  This will save a ton of locking.  But first we have to ensure
695  * that buffers are taken *off* the old inode's list when they are freed
696  * (presumably in truncate).  That requires careful auditing of all
697  * filesystems (do it inside bforget()).  It could also be done by bringing
698  * b_inode back.
699  */
700 
701 /*
702  * The buffer's backing address_space's private_lock must be held
703  */
704 static inline void __remove_assoc_queue(struct buffer_head *bh)
705 {
706 	list_del_init(&bh->b_assoc_buffers);
707 }
708 
709 int inode_has_buffers(struct inode *inode)
710 {
711 	return !list_empty(&inode->i_data.private_list);
712 }
713 
714 /*
715  * osync is designed to support O_SYNC io.  It waits synchronously for
716  * all already-submitted IO to complete, but does not queue any new
717  * writes to the disk.
718  *
719  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
720  * you dirty the buffers, and then use osync_inode_buffers to wait for
721  * completion.  Any other dirty buffers which are not yet queued for
722  * write will not be flushed to disk by the osync.
723  */
724 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
725 {
726 	struct buffer_head *bh;
727 	struct list_head *p;
728 	int err = 0;
729 
730 	spin_lock(lock);
731 repeat:
732 	list_for_each_prev(p, list) {
733 		bh = BH_ENTRY(p);
734 		if (buffer_locked(bh)) {
735 			get_bh(bh);
736 			spin_unlock(lock);
737 			wait_on_buffer(bh);
738 			if (!buffer_uptodate(bh))
739 				err = -EIO;
740 			brelse(bh);
741 			spin_lock(lock);
742 			goto repeat;
743 		}
744 	}
745 	spin_unlock(lock);
746 	return err;
747 }
748 
749 /**
750  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
751  *                        buffers
752  * @mapping: the mapping which wants those buffers written
753  *
754  * Starts I/O against the buffers at mapping->private_list, and waits upon
755  * that I/O.
756  *
757  * Basically, this is a convenience function for fsync().
758  * @mapping is a file or directory which needs those buffers to be written for
759  * a successful fsync().
760  */
761 int sync_mapping_buffers(struct address_space *mapping)
762 {
763 	struct address_space *buffer_mapping = mapping->assoc_mapping;
764 
765 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
766 		return 0;
767 
768 	return fsync_buffers_list(&buffer_mapping->private_lock,
769 					&mapping->private_list);
770 }
771 EXPORT_SYMBOL(sync_mapping_buffers);
772 
773 /*
774  * Called when we've recently written block `bblock', and it is known that
775  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
776  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
777  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
778  */
779 void write_boundary_block(struct block_device *bdev,
780 			sector_t bblock, unsigned blocksize)
781 {
782 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
783 	if (bh) {
784 		if (buffer_dirty(bh))
785 			ll_rw_block(WRITE, 1, &bh);
786 		put_bh(bh);
787 	}
788 }
789 
790 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
791 {
792 	struct address_space *mapping = inode->i_mapping;
793 	struct address_space *buffer_mapping = bh->b_page->mapping;
794 
795 	mark_buffer_dirty(bh);
796 	if (!mapping->assoc_mapping) {
797 		mapping->assoc_mapping = buffer_mapping;
798 	} else {
799 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
800 	}
801 	if (list_empty(&bh->b_assoc_buffers)) {
802 		spin_lock(&buffer_mapping->private_lock);
803 		list_move_tail(&bh->b_assoc_buffers,
804 				&mapping->private_list);
805 		spin_unlock(&buffer_mapping->private_lock);
806 	}
807 }
808 EXPORT_SYMBOL(mark_buffer_dirty_inode);
809 
810 /*
811  * Add a page to the dirty page list.
812  *
813  * It is a sad fact of life that this function is called from several places
814  * deeply under spinlocking.  It may not sleep.
815  *
816  * If the page has buffers, the uptodate buffers are set dirty, to preserve
817  * dirty-state coherency between the page and the buffers.  It the page does
818  * not have buffers then when they are later attached they will all be set
819  * dirty.
820  *
821  * The buffers are dirtied before the page is dirtied.  There's a small race
822  * window in which a writepage caller may see the page cleanness but not the
823  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
824  * before the buffers, a concurrent writepage caller could clear the page dirty
825  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
826  * page on the dirty page list.
827  *
828  * We use private_lock to lock against try_to_free_buffers while using the
829  * page's buffer list.  Also use this to protect against clean buffers being
830  * added to the page after it was set dirty.
831  *
832  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
833  * address_space though.
834  */
835 int __set_page_dirty_buffers(struct page *page)
836 {
837 	struct address_space * const mapping = page->mapping;
838 
839 	spin_lock(&mapping->private_lock);
840 	if (page_has_buffers(page)) {
841 		struct buffer_head *head = page_buffers(page);
842 		struct buffer_head *bh = head;
843 
844 		do {
845 			set_buffer_dirty(bh);
846 			bh = bh->b_this_page;
847 		} while (bh != head);
848 	}
849 	spin_unlock(&mapping->private_lock);
850 
851 	if (!TestSetPageDirty(page)) {
852 		write_lock_irq(&mapping->tree_lock);
853 		if (page->mapping) {	/* Race with truncate? */
854 			if (mapping_cap_account_dirty(mapping))
855 				__inc_zone_page_state(page, NR_FILE_DIRTY);
856 			radix_tree_tag_set(&mapping->page_tree,
857 						page_index(page),
858 						PAGECACHE_TAG_DIRTY);
859 		}
860 		write_unlock_irq(&mapping->tree_lock);
861 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
862 		return 1;
863 	}
864 	return 0;
865 }
866 EXPORT_SYMBOL(__set_page_dirty_buffers);
867 
868 /*
869  * Write out and wait upon a list of buffers.
870  *
871  * We have conflicting pressures: we want to make sure that all
872  * initially dirty buffers get waited on, but that any subsequently
873  * dirtied buffers don't.  After all, we don't want fsync to last
874  * forever if somebody is actively writing to the file.
875  *
876  * Do this in two main stages: first we copy dirty buffers to a
877  * temporary inode list, queueing the writes as we go.  Then we clean
878  * up, waiting for those writes to complete.
879  *
880  * During this second stage, any subsequent updates to the file may end
881  * up refiling the buffer on the original inode's dirty list again, so
882  * there is a chance we will end up with a buffer queued for write but
883  * not yet completed on that list.  So, as a final cleanup we go through
884  * the osync code to catch these locked, dirty buffers without requeuing
885  * any newly dirty buffers for write.
886  */
887 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
888 {
889 	struct buffer_head *bh;
890 	struct list_head tmp;
891 	int err = 0, err2;
892 
893 	INIT_LIST_HEAD(&tmp);
894 
895 	spin_lock(lock);
896 	while (!list_empty(list)) {
897 		bh = BH_ENTRY(list->next);
898 		list_del_init(&bh->b_assoc_buffers);
899 		if (buffer_dirty(bh) || buffer_locked(bh)) {
900 			list_add(&bh->b_assoc_buffers, &tmp);
901 			if (buffer_dirty(bh)) {
902 				get_bh(bh);
903 				spin_unlock(lock);
904 				/*
905 				 * Ensure any pending I/O completes so that
906 				 * ll_rw_block() actually writes the current
907 				 * contents - it is a noop if I/O is still in
908 				 * flight on potentially older contents.
909 				 */
910 				ll_rw_block(SWRITE, 1, &bh);
911 				brelse(bh);
912 				spin_lock(lock);
913 			}
914 		}
915 	}
916 
917 	while (!list_empty(&tmp)) {
918 		bh = BH_ENTRY(tmp.prev);
919 		__remove_assoc_queue(bh);
920 		get_bh(bh);
921 		spin_unlock(lock);
922 		wait_on_buffer(bh);
923 		if (!buffer_uptodate(bh))
924 			err = -EIO;
925 		brelse(bh);
926 		spin_lock(lock);
927 	}
928 
929 	spin_unlock(lock);
930 	err2 = osync_buffers_list(lock, list);
931 	if (err)
932 		return err;
933 	else
934 		return err2;
935 }
936 
937 /*
938  * Invalidate any and all dirty buffers on a given inode.  We are
939  * probably unmounting the fs, but that doesn't mean we have already
940  * done a sync().  Just drop the buffers from the inode list.
941  *
942  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
943  * assumes that all the buffers are against the blockdev.  Not true
944  * for reiserfs.
945  */
946 void invalidate_inode_buffers(struct inode *inode)
947 {
948 	if (inode_has_buffers(inode)) {
949 		struct address_space *mapping = &inode->i_data;
950 		struct list_head *list = &mapping->private_list;
951 		struct address_space *buffer_mapping = mapping->assoc_mapping;
952 
953 		spin_lock(&buffer_mapping->private_lock);
954 		while (!list_empty(list))
955 			__remove_assoc_queue(BH_ENTRY(list->next));
956 		spin_unlock(&buffer_mapping->private_lock);
957 	}
958 }
959 
960 /*
961  * Remove any clean buffers from the inode's buffer list.  This is called
962  * when we're trying to free the inode itself.  Those buffers can pin it.
963  *
964  * Returns true if all buffers were removed.
965  */
966 int remove_inode_buffers(struct inode *inode)
967 {
968 	int ret = 1;
969 
970 	if (inode_has_buffers(inode)) {
971 		struct address_space *mapping = &inode->i_data;
972 		struct list_head *list = &mapping->private_list;
973 		struct address_space *buffer_mapping = mapping->assoc_mapping;
974 
975 		spin_lock(&buffer_mapping->private_lock);
976 		while (!list_empty(list)) {
977 			struct buffer_head *bh = BH_ENTRY(list->next);
978 			if (buffer_dirty(bh)) {
979 				ret = 0;
980 				break;
981 			}
982 			__remove_assoc_queue(bh);
983 		}
984 		spin_unlock(&buffer_mapping->private_lock);
985 	}
986 	return ret;
987 }
988 
989 /*
990  * Create the appropriate buffers when given a page for data area and
991  * the size of each buffer.. Use the bh->b_this_page linked list to
992  * follow the buffers created.  Return NULL if unable to create more
993  * buffers.
994  *
995  * The retry flag is used to differentiate async IO (paging, swapping)
996  * which may not fail from ordinary buffer allocations.
997  */
998 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
999 		int retry)
1000 {
1001 	struct buffer_head *bh, *head;
1002 	long offset;
1003 
1004 try_again:
1005 	head = NULL;
1006 	offset = PAGE_SIZE;
1007 	while ((offset -= size) >= 0) {
1008 		bh = alloc_buffer_head(GFP_NOFS);
1009 		if (!bh)
1010 			goto no_grow;
1011 
1012 		bh->b_bdev = NULL;
1013 		bh->b_this_page = head;
1014 		bh->b_blocknr = -1;
1015 		head = bh;
1016 
1017 		bh->b_state = 0;
1018 		atomic_set(&bh->b_count, 0);
1019 		bh->b_private = NULL;
1020 		bh->b_size = size;
1021 
1022 		/* Link the buffer to its page */
1023 		set_bh_page(bh, page, offset);
1024 
1025 		init_buffer(bh, NULL, NULL);
1026 	}
1027 	return head;
1028 /*
1029  * In case anything failed, we just free everything we got.
1030  */
1031 no_grow:
1032 	if (head) {
1033 		do {
1034 			bh = head;
1035 			head = head->b_this_page;
1036 			free_buffer_head(bh);
1037 		} while (head);
1038 	}
1039 
1040 	/*
1041 	 * Return failure for non-async IO requests.  Async IO requests
1042 	 * are not allowed to fail, so we have to wait until buffer heads
1043 	 * become available.  But we don't want tasks sleeping with
1044 	 * partially complete buffers, so all were released above.
1045 	 */
1046 	if (!retry)
1047 		return NULL;
1048 
1049 	/* We're _really_ low on memory. Now we just
1050 	 * wait for old buffer heads to become free due to
1051 	 * finishing IO.  Since this is an async request and
1052 	 * the reserve list is empty, we're sure there are
1053 	 * async buffer heads in use.
1054 	 */
1055 	free_more_memory();
1056 	goto try_again;
1057 }
1058 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1059 
1060 static inline void
1061 link_dev_buffers(struct page *page, struct buffer_head *head)
1062 {
1063 	struct buffer_head *bh, *tail;
1064 
1065 	bh = head;
1066 	do {
1067 		tail = bh;
1068 		bh = bh->b_this_page;
1069 	} while (bh);
1070 	tail->b_this_page = head;
1071 	attach_page_buffers(page, head);
1072 }
1073 
1074 /*
1075  * Initialise the state of a blockdev page's buffers.
1076  */
1077 static void
1078 init_page_buffers(struct page *page, struct block_device *bdev,
1079 			sector_t block, int size)
1080 {
1081 	struct buffer_head *head = page_buffers(page);
1082 	struct buffer_head *bh = head;
1083 	int uptodate = PageUptodate(page);
1084 
1085 	do {
1086 		if (!buffer_mapped(bh)) {
1087 			init_buffer(bh, NULL, NULL);
1088 			bh->b_bdev = bdev;
1089 			bh->b_blocknr = block;
1090 			if (uptodate)
1091 				set_buffer_uptodate(bh);
1092 			set_buffer_mapped(bh);
1093 		}
1094 		block++;
1095 		bh = bh->b_this_page;
1096 	} while (bh != head);
1097 }
1098 
1099 /*
1100  * Create the page-cache page that contains the requested block.
1101  *
1102  * This is user purely for blockdev mappings.
1103  */
1104 static struct page *
1105 grow_dev_page(struct block_device *bdev, sector_t block,
1106 		pgoff_t index, int size)
1107 {
1108 	struct inode *inode = bdev->bd_inode;
1109 	struct page *page;
1110 	struct buffer_head *bh;
1111 
1112 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1113 	if (!page)
1114 		return NULL;
1115 
1116 	BUG_ON(!PageLocked(page));
1117 
1118 	if (page_has_buffers(page)) {
1119 		bh = page_buffers(page);
1120 		if (bh->b_size == size) {
1121 			init_page_buffers(page, bdev, block, size);
1122 			return page;
1123 		}
1124 		if (!try_to_free_buffers(page))
1125 			goto failed;
1126 	}
1127 
1128 	/*
1129 	 * Allocate some buffers for this page
1130 	 */
1131 	bh = alloc_page_buffers(page, size, 0);
1132 	if (!bh)
1133 		goto failed;
1134 
1135 	/*
1136 	 * Link the page to the buffers and initialise them.  Take the
1137 	 * lock to be atomic wrt __find_get_block(), which does not
1138 	 * run under the page lock.
1139 	 */
1140 	spin_lock(&inode->i_mapping->private_lock);
1141 	link_dev_buffers(page, bh);
1142 	init_page_buffers(page, bdev, block, size);
1143 	spin_unlock(&inode->i_mapping->private_lock);
1144 	return page;
1145 
1146 failed:
1147 	BUG();
1148 	unlock_page(page);
1149 	page_cache_release(page);
1150 	return NULL;
1151 }
1152 
1153 /*
1154  * Create buffers for the specified block device block's page.  If
1155  * that page was dirty, the buffers are set dirty also.
1156  *
1157  * Except that's a bug.  Attaching dirty buffers to a dirty
1158  * blockdev's page can result in filesystem corruption, because
1159  * some of those buffers may be aliases of filesystem data.
1160  * grow_dev_page() will go BUG() if this happens.
1161  */
1162 static int
1163 grow_buffers(struct block_device *bdev, sector_t block, int size)
1164 {
1165 	struct page *page;
1166 	pgoff_t index;
1167 	int sizebits;
1168 
1169 	sizebits = -1;
1170 	do {
1171 		sizebits++;
1172 	} while ((size << sizebits) < PAGE_SIZE);
1173 
1174 	index = block >> sizebits;
1175 	block = index << sizebits;
1176 
1177 	/* Create a page with the proper size buffers.. */
1178 	page = grow_dev_page(bdev, block, index, size);
1179 	if (!page)
1180 		return 0;
1181 	unlock_page(page);
1182 	page_cache_release(page);
1183 	return 1;
1184 }
1185 
1186 static struct buffer_head *
1187 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1188 {
1189 	/* Size must be multiple of hard sectorsize */
1190 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1191 			(size < 512 || size > PAGE_SIZE))) {
1192 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1193 					size);
1194 		printk(KERN_ERR "hardsect size: %d\n",
1195 					bdev_hardsect_size(bdev));
1196 
1197 		dump_stack();
1198 		return NULL;
1199 	}
1200 
1201 	for (;;) {
1202 		struct buffer_head * bh;
1203 
1204 		bh = __find_get_block(bdev, block, size);
1205 		if (bh)
1206 			return bh;
1207 
1208 		if (!grow_buffers(bdev, block, size))
1209 			free_more_memory();
1210 	}
1211 }
1212 
1213 /*
1214  * The relationship between dirty buffers and dirty pages:
1215  *
1216  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1217  * the page is tagged dirty in its radix tree.
1218  *
1219  * At all times, the dirtiness of the buffers represents the dirtiness of
1220  * subsections of the page.  If the page has buffers, the page dirty bit is
1221  * merely a hint about the true dirty state.
1222  *
1223  * When a page is set dirty in its entirety, all its buffers are marked dirty
1224  * (if the page has buffers).
1225  *
1226  * When a buffer is marked dirty, its page is dirtied, but the page's other
1227  * buffers are not.
1228  *
1229  * Also.  When blockdev buffers are explicitly read with bread(), they
1230  * individually become uptodate.  But their backing page remains not
1231  * uptodate - even if all of its buffers are uptodate.  A subsequent
1232  * block_read_full_page() against that page will discover all the uptodate
1233  * buffers, will set the page uptodate and will perform no I/O.
1234  */
1235 
1236 /**
1237  * mark_buffer_dirty - mark a buffer_head as needing writeout
1238  * @bh: the buffer_head to mark dirty
1239  *
1240  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1241  * backing page dirty, then tag the page as dirty in its address_space's radix
1242  * tree and then attach the address_space's inode to its superblock's dirty
1243  * inode list.
1244  *
1245  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1246  * mapping->tree_lock and the global inode_lock.
1247  */
1248 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1249 {
1250 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1251 		__set_page_dirty_nobuffers(bh->b_page);
1252 }
1253 
1254 /*
1255  * Decrement a buffer_head's reference count.  If all buffers against a page
1256  * have zero reference count, are clean and unlocked, and if the page is clean
1257  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1258  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1259  * a page but it ends up not being freed, and buffers may later be reattached).
1260  */
1261 void __brelse(struct buffer_head * buf)
1262 {
1263 	if (atomic_read(&buf->b_count)) {
1264 		put_bh(buf);
1265 		return;
1266 	}
1267 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1268 	WARN_ON(1);
1269 }
1270 
1271 /*
1272  * bforget() is like brelse(), except it discards any
1273  * potentially dirty data.
1274  */
1275 void __bforget(struct buffer_head *bh)
1276 {
1277 	clear_buffer_dirty(bh);
1278 	if (!list_empty(&bh->b_assoc_buffers)) {
1279 		struct address_space *buffer_mapping = bh->b_page->mapping;
1280 
1281 		spin_lock(&buffer_mapping->private_lock);
1282 		list_del_init(&bh->b_assoc_buffers);
1283 		spin_unlock(&buffer_mapping->private_lock);
1284 	}
1285 	__brelse(bh);
1286 }
1287 
1288 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1289 {
1290 	lock_buffer(bh);
1291 	if (buffer_uptodate(bh)) {
1292 		unlock_buffer(bh);
1293 		return bh;
1294 	} else {
1295 		get_bh(bh);
1296 		bh->b_end_io = end_buffer_read_sync;
1297 		submit_bh(READ, bh);
1298 		wait_on_buffer(bh);
1299 		if (buffer_uptodate(bh))
1300 			return bh;
1301 	}
1302 	brelse(bh);
1303 	return NULL;
1304 }
1305 
1306 /*
1307  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1308  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1309  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1310  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1311  * CPU's LRUs at the same time.
1312  *
1313  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1314  * sb_find_get_block().
1315  *
1316  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1317  * a local interrupt disable for that.
1318  */
1319 
1320 #define BH_LRU_SIZE	8
1321 
1322 struct bh_lru {
1323 	struct buffer_head *bhs[BH_LRU_SIZE];
1324 };
1325 
1326 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1327 
1328 #ifdef CONFIG_SMP
1329 #define bh_lru_lock()	local_irq_disable()
1330 #define bh_lru_unlock()	local_irq_enable()
1331 #else
1332 #define bh_lru_lock()	preempt_disable()
1333 #define bh_lru_unlock()	preempt_enable()
1334 #endif
1335 
1336 static inline void check_irqs_on(void)
1337 {
1338 #ifdef irqs_disabled
1339 	BUG_ON(irqs_disabled());
1340 #endif
1341 }
1342 
1343 /*
1344  * The LRU management algorithm is dopey-but-simple.  Sorry.
1345  */
1346 static void bh_lru_install(struct buffer_head *bh)
1347 {
1348 	struct buffer_head *evictee = NULL;
1349 	struct bh_lru *lru;
1350 
1351 	check_irqs_on();
1352 	bh_lru_lock();
1353 	lru = &__get_cpu_var(bh_lrus);
1354 	if (lru->bhs[0] != bh) {
1355 		struct buffer_head *bhs[BH_LRU_SIZE];
1356 		int in;
1357 		int out = 0;
1358 
1359 		get_bh(bh);
1360 		bhs[out++] = bh;
1361 		for (in = 0; in < BH_LRU_SIZE; in++) {
1362 			struct buffer_head *bh2 = lru->bhs[in];
1363 
1364 			if (bh2 == bh) {
1365 				__brelse(bh2);
1366 			} else {
1367 				if (out >= BH_LRU_SIZE) {
1368 					BUG_ON(evictee != NULL);
1369 					evictee = bh2;
1370 				} else {
1371 					bhs[out++] = bh2;
1372 				}
1373 			}
1374 		}
1375 		while (out < BH_LRU_SIZE)
1376 			bhs[out++] = NULL;
1377 		memcpy(lru->bhs, bhs, sizeof(bhs));
1378 	}
1379 	bh_lru_unlock();
1380 
1381 	if (evictee)
1382 		__brelse(evictee);
1383 }
1384 
1385 /*
1386  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1387  */
1388 static struct buffer_head *
1389 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1390 {
1391 	struct buffer_head *ret = NULL;
1392 	struct bh_lru *lru;
1393 	int i;
1394 
1395 	check_irqs_on();
1396 	bh_lru_lock();
1397 	lru = &__get_cpu_var(bh_lrus);
1398 	for (i = 0; i < BH_LRU_SIZE; i++) {
1399 		struct buffer_head *bh = lru->bhs[i];
1400 
1401 		if (bh && bh->b_bdev == bdev &&
1402 				bh->b_blocknr == block && bh->b_size == size) {
1403 			if (i) {
1404 				while (i) {
1405 					lru->bhs[i] = lru->bhs[i - 1];
1406 					i--;
1407 				}
1408 				lru->bhs[0] = bh;
1409 			}
1410 			get_bh(bh);
1411 			ret = bh;
1412 			break;
1413 		}
1414 	}
1415 	bh_lru_unlock();
1416 	return ret;
1417 }
1418 
1419 /*
1420  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1421  * it in the LRU and mark it as accessed.  If it is not present then return
1422  * NULL
1423  */
1424 struct buffer_head *
1425 __find_get_block(struct block_device *bdev, sector_t block, int size)
1426 {
1427 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1428 
1429 	if (bh == NULL) {
1430 		bh = __find_get_block_slow(bdev, block);
1431 		if (bh)
1432 			bh_lru_install(bh);
1433 	}
1434 	if (bh)
1435 		touch_buffer(bh);
1436 	return bh;
1437 }
1438 EXPORT_SYMBOL(__find_get_block);
1439 
1440 /*
1441  * __getblk will locate (and, if necessary, create) the buffer_head
1442  * which corresponds to the passed block_device, block and size. The
1443  * returned buffer has its reference count incremented.
1444  *
1445  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1446  * illegal block number, __getblk() will happily return a buffer_head
1447  * which represents the non-existent block.  Very weird.
1448  *
1449  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1450  * attempt is failing.  FIXME, perhaps?
1451  */
1452 struct buffer_head *
1453 __getblk(struct block_device *bdev, sector_t block, int size)
1454 {
1455 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1456 
1457 	might_sleep();
1458 	if (bh == NULL)
1459 		bh = __getblk_slow(bdev, block, size);
1460 	return bh;
1461 }
1462 EXPORT_SYMBOL(__getblk);
1463 
1464 /*
1465  * Do async read-ahead on a buffer..
1466  */
1467 void __breadahead(struct block_device *bdev, sector_t block, int size)
1468 {
1469 	struct buffer_head *bh = __getblk(bdev, block, size);
1470 	if (likely(bh)) {
1471 		ll_rw_block(READA, 1, &bh);
1472 		brelse(bh);
1473 	}
1474 }
1475 EXPORT_SYMBOL(__breadahead);
1476 
1477 /**
1478  *  __bread() - reads a specified block and returns the bh
1479  *  @bdev: the block_device to read from
1480  *  @block: number of block
1481  *  @size: size (in bytes) to read
1482  *
1483  *  Reads a specified block, and returns buffer head that contains it.
1484  *  It returns NULL if the block was unreadable.
1485  */
1486 struct buffer_head *
1487 __bread(struct block_device *bdev, sector_t block, int size)
1488 {
1489 	struct buffer_head *bh = __getblk(bdev, block, size);
1490 
1491 	if (likely(bh) && !buffer_uptodate(bh))
1492 		bh = __bread_slow(bh);
1493 	return bh;
1494 }
1495 EXPORT_SYMBOL(__bread);
1496 
1497 /*
1498  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1499  * This doesn't race because it runs in each cpu either in irq
1500  * or with preempt disabled.
1501  */
1502 static void invalidate_bh_lru(void *arg)
1503 {
1504 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1505 	int i;
1506 
1507 	for (i = 0; i < BH_LRU_SIZE; i++) {
1508 		brelse(b->bhs[i]);
1509 		b->bhs[i] = NULL;
1510 	}
1511 	put_cpu_var(bh_lrus);
1512 }
1513 
1514 static void invalidate_bh_lrus(void)
1515 {
1516 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1517 }
1518 
1519 void set_bh_page(struct buffer_head *bh,
1520 		struct page *page, unsigned long offset)
1521 {
1522 	bh->b_page = page;
1523 	BUG_ON(offset >= PAGE_SIZE);
1524 	if (PageHighMem(page))
1525 		/*
1526 		 * This catches illegal uses and preserves the offset:
1527 		 */
1528 		bh->b_data = (char *)(0 + offset);
1529 	else
1530 		bh->b_data = page_address(page) + offset;
1531 }
1532 EXPORT_SYMBOL(set_bh_page);
1533 
1534 /*
1535  * Called when truncating a buffer on a page completely.
1536  */
1537 static void discard_buffer(struct buffer_head * bh)
1538 {
1539 	lock_buffer(bh);
1540 	clear_buffer_dirty(bh);
1541 	bh->b_bdev = NULL;
1542 	clear_buffer_mapped(bh);
1543 	clear_buffer_req(bh);
1544 	clear_buffer_new(bh);
1545 	clear_buffer_delay(bh);
1546 	unlock_buffer(bh);
1547 }
1548 
1549 /**
1550  * try_to_release_page() - release old fs-specific metadata on a page
1551  *
1552  * @page: the page which the kernel is trying to free
1553  * @gfp_mask: memory allocation flags (and I/O mode)
1554  *
1555  * The address_space is to try to release any data against the page
1556  * (presumably at page->private).  If the release was successful, return `1'.
1557  * Otherwise return zero.
1558  *
1559  * The @gfp_mask argument specifies whether I/O may be performed to release
1560  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1561  *
1562  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1563  */
1564 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1565 {
1566 	struct address_space * const mapping = page->mapping;
1567 
1568 	BUG_ON(!PageLocked(page));
1569 	if (PageWriteback(page))
1570 		return 0;
1571 
1572 	if (mapping && mapping->a_ops->releasepage)
1573 		return mapping->a_ops->releasepage(page, gfp_mask);
1574 	return try_to_free_buffers(page);
1575 }
1576 EXPORT_SYMBOL(try_to_release_page);
1577 
1578 /**
1579  * block_invalidatepage - invalidate part of all of a buffer-backed page
1580  *
1581  * @page: the page which is affected
1582  * @offset: the index of the truncation point
1583  *
1584  * block_invalidatepage() is called when all or part of the page has become
1585  * invalidatedby a truncate operation.
1586  *
1587  * block_invalidatepage() does not have to release all buffers, but it must
1588  * ensure that no dirty buffer is left outside @offset and that no I/O
1589  * is underway against any of the blocks which are outside the truncation
1590  * point.  Because the caller is about to free (and possibly reuse) those
1591  * blocks on-disk.
1592  */
1593 void block_invalidatepage(struct page *page, unsigned long offset)
1594 {
1595 	struct buffer_head *head, *bh, *next;
1596 	unsigned int curr_off = 0;
1597 
1598 	BUG_ON(!PageLocked(page));
1599 	if (!page_has_buffers(page))
1600 		goto out;
1601 
1602 	head = page_buffers(page);
1603 	bh = head;
1604 	do {
1605 		unsigned int next_off = curr_off + bh->b_size;
1606 		next = bh->b_this_page;
1607 
1608 		/*
1609 		 * is this block fully invalidated?
1610 		 */
1611 		if (offset <= curr_off)
1612 			discard_buffer(bh);
1613 		curr_off = next_off;
1614 		bh = next;
1615 	} while (bh != head);
1616 
1617 	/*
1618 	 * We release buffers only if the entire page is being invalidated.
1619 	 * The get_block cached value has been unconditionally invalidated,
1620 	 * so real IO is not possible anymore.
1621 	 */
1622 	if (offset == 0)
1623 		try_to_release_page(page, 0);
1624 out:
1625 	return;
1626 }
1627 EXPORT_SYMBOL(block_invalidatepage);
1628 
1629 void do_invalidatepage(struct page *page, unsigned long offset)
1630 {
1631 	void (*invalidatepage)(struct page *, unsigned long);
1632 	invalidatepage = page->mapping->a_ops->invalidatepage ? :
1633 		block_invalidatepage;
1634 	(*invalidatepage)(page, offset);
1635 }
1636 
1637 /*
1638  * We attach and possibly dirty the buffers atomically wrt
1639  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1640  * is already excluded via the page lock.
1641  */
1642 void create_empty_buffers(struct page *page,
1643 			unsigned long blocksize, unsigned long b_state)
1644 {
1645 	struct buffer_head *bh, *head, *tail;
1646 
1647 	head = alloc_page_buffers(page, blocksize, 1);
1648 	bh = head;
1649 	do {
1650 		bh->b_state |= b_state;
1651 		tail = bh;
1652 		bh = bh->b_this_page;
1653 	} while (bh);
1654 	tail->b_this_page = head;
1655 
1656 	spin_lock(&page->mapping->private_lock);
1657 	if (PageUptodate(page) || PageDirty(page)) {
1658 		bh = head;
1659 		do {
1660 			if (PageDirty(page))
1661 				set_buffer_dirty(bh);
1662 			if (PageUptodate(page))
1663 				set_buffer_uptodate(bh);
1664 			bh = bh->b_this_page;
1665 		} while (bh != head);
1666 	}
1667 	attach_page_buffers(page, head);
1668 	spin_unlock(&page->mapping->private_lock);
1669 }
1670 EXPORT_SYMBOL(create_empty_buffers);
1671 
1672 /*
1673  * We are taking a block for data and we don't want any output from any
1674  * buffer-cache aliases starting from return from that function and
1675  * until the moment when something will explicitly mark the buffer
1676  * dirty (hopefully that will not happen until we will free that block ;-)
1677  * We don't even need to mark it not-uptodate - nobody can expect
1678  * anything from a newly allocated buffer anyway. We used to used
1679  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1680  * don't want to mark the alias unmapped, for example - it would confuse
1681  * anyone who might pick it with bread() afterwards...
1682  *
1683  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1684  * be writeout I/O going on against recently-freed buffers.  We don't
1685  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1686  * only if we really need to.  That happens here.
1687  */
1688 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1689 {
1690 	struct buffer_head *old_bh;
1691 
1692 	might_sleep();
1693 
1694 	old_bh = __find_get_block_slow(bdev, block);
1695 	if (old_bh) {
1696 		clear_buffer_dirty(old_bh);
1697 		wait_on_buffer(old_bh);
1698 		clear_buffer_req(old_bh);
1699 		__brelse(old_bh);
1700 	}
1701 }
1702 EXPORT_SYMBOL(unmap_underlying_metadata);
1703 
1704 /*
1705  * NOTE! All mapped/uptodate combinations are valid:
1706  *
1707  *	Mapped	Uptodate	Meaning
1708  *
1709  *	No	No		"unknown" - must do get_block()
1710  *	No	Yes		"hole" - zero-filled
1711  *	Yes	No		"allocated" - allocated on disk, not read in
1712  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1713  *
1714  * "Dirty" is valid only with the last case (mapped+uptodate).
1715  */
1716 
1717 /*
1718  * While block_write_full_page is writing back the dirty buffers under
1719  * the page lock, whoever dirtied the buffers may decide to clean them
1720  * again at any time.  We handle that by only looking at the buffer
1721  * state inside lock_buffer().
1722  *
1723  * If block_write_full_page() is called for regular writeback
1724  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1725  * locked buffer.   This only can happen if someone has written the buffer
1726  * directly, with submit_bh().  At the address_space level PageWriteback
1727  * prevents this contention from occurring.
1728  */
1729 static int __block_write_full_page(struct inode *inode, struct page *page,
1730 			get_block_t *get_block, struct writeback_control *wbc)
1731 {
1732 	int err;
1733 	sector_t block;
1734 	sector_t last_block;
1735 	struct buffer_head *bh, *head;
1736 	const unsigned blocksize = 1 << inode->i_blkbits;
1737 	int nr_underway = 0;
1738 
1739 	BUG_ON(!PageLocked(page));
1740 
1741 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1742 
1743 	if (!page_has_buffers(page)) {
1744 		create_empty_buffers(page, blocksize,
1745 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1746 	}
1747 
1748 	/*
1749 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1750 	 * here, and the (potentially unmapped) buffers may become dirty at
1751 	 * any time.  If a buffer becomes dirty here after we've inspected it
1752 	 * then we just miss that fact, and the page stays dirty.
1753 	 *
1754 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1755 	 * handle that here by just cleaning them.
1756 	 */
1757 
1758 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1759 	head = page_buffers(page);
1760 	bh = head;
1761 
1762 	/*
1763 	 * Get all the dirty buffers mapped to disk addresses and
1764 	 * handle any aliases from the underlying blockdev's mapping.
1765 	 */
1766 	do {
1767 		if (block > last_block) {
1768 			/*
1769 			 * mapped buffers outside i_size will occur, because
1770 			 * this page can be outside i_size when there is a
1771 			 * truncate in progress.
1772 			 */
1773 			/*
1774 			 * The buffer was zeroed by block_write_full_page()
1775 			 */
1776 			clear_buffer_dirty(bh);
1777 			set_buffer_uptodate(bh);
1778 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1779 			WARN_ON(bh->b_size != blocksize);
1780 			err = get_block(inode, block, bh, 1);
1781 			if (err)
1782 				goto recover;
1783 			if (buffer_new(bh)) {
1784 				/* blockdev mappings never come here */
1785 				clear_buffer_new(bh);
1786 				unmap_underlying_metadata(bh->b_bdev,
1787 							bh->b_blocknr);
1788 			}
1789 		}
1790 		bh = bh->b_this_page;
1791 		block++;
1792 	} while (bh != head);
1793 
1794 	do {
1795 		if (!buffer_mapped(bh))
1796 			continue;
1797 		/*
1798 		 * If it's a fully non-blocking write attempt and we cannot
1799 		 * lock the buffer then redirty the page.  Note that this can
1800 		 * potentially cause a busy-wait loop from pdflush and kswapd
1801 		 * activity, but those code paths have their own higher-level
1802 		 * throttling.
1803 		 */
1804 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1805 			lock_buffer(bh);
1806 		} else if (test_set_buffer_locked(bh)) {
1807 			redirty_page_for_writepage(wbc, page);
1808 			continue;
1809 		}
1810 		if (test_clear_buffer_dirty(bh)) {
1811 			mark_buffer_async_write(bh);
1812 		} else {
1813 			unlock_buffer(bh);
1814 		}
1815 	} while ((bh = bh->b_this_page) != head);
1816 
1817 	/*
1818 	 * The page and its buffers are protected by PageWriteback(), so we can
1819 	 * drop the bh refcounts early.
1820 	 */
1821 	BUG_ON(PageWriteback(page));
1822 	set_page_writeback(page);
1823 
1824 	do {
1825 		struct buffer_head *next = bh->b_this_page;
1826 		if (buffer_async_write(bh)) {
1827 			submit_bh(WRITE, bh);
1828 			nr_underway++;
1829 		}
1830 		bh = next;
1831 	} while (bh != head);
1832 	unlock_page(page);
1833 
1834 	err = 0;
1835 done:
1836 	if (nr_underway == 0) {
1837 		/*
1838 		 * The page was marked dirty, but the buffers were
1839 		 * clean.  Someone wrote them back by hand with
1840 		 * ll_rw_block/submit_bh.  A rare case.
1841 		 */
1842 		int uptodate = 1;
1843 		do {
1844 			if (!buffer_uptodate(bh)) {
1845 				uptodate = 0;
1846 				break;
1847 			}
1848 			bh = bh->b_this_page;
1849 		} while (bh != head);
1850 		if (uptodate)
1851 			SetPageUptodate(page);
1852 		end_page_writeback(page);
1853 		/*
1854 		 * The page and buffer_heads can be released at any time from
1855 		 * here on.
1856 		 */
1857 		wbc->pages_skipped++;	/* We didn't write this page */
1858 	}
1859 	return err;
1860 
1861 recover:
1862 	/*
1863 	 * ENOSPC, or some other error.  We may already have added some
1864 	 * blocks to the file, so we need to write these out to avoid
1865 	 * exposing stale data.
1866 	 * The page is currently locked and not marked for writeback
1867 	 */
1868 	bh = head;
1869 	/* Recovery: lock and submit the mapped buffers */
1870 	do {
1871 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1872 			lock_buffer(bh);
1873 			mark_buffer_async_write(bh);
1874 		} else {
1875 			/*
1876 			 * The buffer may have been set dirty during
1877 			 * attachment to a dirty page.
1878 			 */
1879 			clear_buffer_dirty(bh);
1880 		}
1881 	} while ((bh = bh->b_this_page) != head);
1882 	SetPageError(page);
1883 	BUG_ON(PageWriteback(page));
1884 	set_page_writeback(page);
1885 	unlock_page(page);
1886 	do {
1887 		struct buffer_head *next = bh->b_this_page;
1888 		if (buffer_async_write(bh)) {
1889 			clear_buffer_dirty(bh);
1890 			submit_bh(WRITE, bh);
1891 			nr_underway++;
1892 		}
1893 		bh = next;
1894 	} while (bh != head);
1895 	goto done;
1896 }
1897 
1898 static int __block_prepare_write(struct inode *inode, struct page *page,
1899 		unsigned from, unsigned to, get_block_t *get_block)
1900 {
1901 	unsigned block_start, block_end;
1902 	sector_t block;
1903 	int err = 0;
1904 	unsigned blocksize, bbits;
1905 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1906 
1907 	BUG_ON(!PageLocked(page));
1908 	BUG_ON(from > PAGE_CACHE_SIZE);
1909 	BUG_ON(to > PAGE_CACHE_SIZE);
1910 	BUG_ON(from > to);
1911 
1912 	blocksize = 1 << inode->i_blkbits;
1913 	if (!page_has_buffers(page))
1914 		create_empty_buffers(page, blocksize, 0);
1915 	head = page_buffers(page);
1916 
1917 	bbits = inode->i_blkbits;
1918 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1919 
1920 	for(bh = head, block_start = 0; bh != head || !block_start;
1921 	    block++, block_start=block_end, bh = bh->b_this_page) {
1922 		block_end = block_start + blocksize;
1923 		if (block_end <= from || block_start >= to) {
1924 			if (PageUptodate(page)) {
1925 				if (!buffer_uptodate(bh))
1926 					set_buffer_uptodate(bh);
1927 			}
1928 			continue;
1929 		}
1930 		if (buffer_new(bh))
1931 			clear_buffer_new(bh);
1932 		if (!buffer_mapped(bh)) {
1933 			WARN_ON(bh->b_size != blocksize);
1934 			err = get_block(inode, block, bh, 1);
1935 			if (err)
1936 				break;
1937 			if (buffer_new(bh)) {
1938 				unmap_underlying_metadata(bh->b_bdev,
1939 							bh->b_blocknr);
1940 				if (PageUptodate(page)) {
1941 					set_buffer_uptodate(bh);
1942 					continue;
1943 				}
1944 				if (block_end > to || block_start < from) {
1945 					void *kaddr;
1946 
1947 					kaddr = kmap_atomic(page, KM_USER0);
1948 					if (block_end > to)
1949 						memset(kaddr+to, 0,
1950 							block_end-to);
1951 					if (block_start < from)
1952 						memset(kaddr+block_start,
1953 							0, from-block_start);
1954 					flush_dcache_page(page);
1955 					kunmap_atomic(kaddr, KM_USER0);
1956 				}
1957 				continue;
1958 			}
1959 		}
1960 		if (PageUptodate(page)) {
1961 			if (!buffer_uptodate(bh))
1962 				set_buffer_uptodate(bh);
1963 			continue;
1964 		}
1965 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1966 		     (block_start < from || block_end > to)) {
1967 			ll_rw_block(READ, 1, &bh);
1968 			*wait_bh++=bh;
1969 		}
1970 	}
1971 	/*
1972 	 * If we issued read requests - let them complete.
1973 	 */
1974 	while(wait_bh > wait) {
1975 		wait_on_buffer(*--wait_bh);
1976 		if (!buffer_uptodate(*wait_bh))
1977 			err = -EIO;
1978 	}
1979 	if (!err) {
1980 		bh = head;
1981 		do {
1982 			if (buffer_new(bh))
1983 				clear_buffer_new(bh);
1984 		} while ((bh = bh->b_this_page) != head);
1985 		return 0;
1986 	}
1987 	/* Error case: */
1988 	/*
1989 	 * Zero out any newly allocated blocks to avoid exposing stale
1990 	 * data.  If BH_New is set, we know that the block was newly
1991 	 * allocated in the above loop.
1992 	 */
1993 	bh = head;
1994 	block_start = 0;
1995 	do {
1996 		block_end = block_start+blocksize;
1997 		if (block_end <= from)
1998 			goto next_bh;
1999 		if (block_start >= to)
2000 			break;
2001 		if (buffer_new(bh)) {
2002 			void *kaddr;
2003 
2004 			clear_buffer_new(bh);
2005 			kaddr = kmap_atomic(page, KM_USER0);
2006 			memset(kaddr+block_start, 0, bh->b_size);
2007 			kunmap_atomic(kaddr, KM_USER0);
2008 			set_buffer_uptodate(bh);
2009 			mark_buffer_dirty(bh);
2010 		}
2011 next_bh:
2012 		block_start = block_end;
2013 		bh = bh->b_this_page;
2014 	} while (bh != head);
2015 	return err;
2016 }
2017 
2018 static int __block_commit_write(struct inode *inode, struct page *page,
2019 		unsigned from, unsigned to)
2020 {
2021 	unsigned block_start, block_end;
2022 	int partial = 0;
2023 	unsigned blocksize;
2024 	struct buffer_head *bh, *head;
2025 
2026 	blocksize = 1 << inode->i_blkbits;
2027 
2028 	for(bh = head = page_buffers(page), block_start = 0;
2029 	    bh != head || !block_start;
2030 	    block_start=block_end, bh = bh->b_this_page) {
2031 		block_end = block_start + blocksize;
2032 		if (block_end <= from || block_start >= to) {
2033 			if (!buffer_uptodate(bh))
2034 				partial = 1;
2035 		} else {
2036 			set_buffer_uptodate(bh);
2037 			mark_buffer_dirty(bh);
2038 		}
2039 	}
2040 
2041 	/*
2042 	 * If this is a partial write which happened to make all buffers
2043 	 * uptodate then we can optimize away a bogus readpage() for
2044 	 * the next read(). Here we 'discover' whether the page went
2045 	 * uptodate as a result of this (potentially partial) write.
2046 	 */
2047 	if (!partial)
2048 		SetPageUptodate(page);
2049 	return 0;
2050 }
2051 
2052 /*
2053  * Generic "read page" function for block devices that have the normal
2054  * get_block functionality. This is most of the block device filesystems.
2055  * Reads the page asynchronously --- the unlock_buffer() and
2056  * set/clear_buffer_uptodate() functions propagate buffer state into the
2057  * page struct once IO has completed.
2058  */
2059 int block_read_full_page(struct page *page, get_block_t *get_block)
2060 {
2061 	struct inode *inode = page->mapping->host;
2062 	sector_t iblock, lblock;
2063 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2064 	unsigned int blocksize;
2065 	int nr, i;
2066 	int fully_mapped = 1;
2067 
2068 	BUG_ON(!PageLocked(page));
2069 	blocksize = 1 << inode->i_blkbits;
2070 	if (!page_has_buffers(page))
2071 		create_empty_buffers(page, blocksize, 0);
2072 	head = page_buffers(page);
2073 
2074 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2075 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2076 	bh = head;
2077 	nr = 0;
2078 	i = 0;
2079 
2080 	do {
2081 		if (buffer_uptodate(bh))
2082 			continue;
2083 
2084 		if (!buffer_mapped(bh)) {
2085 			int err = 0;
2086 
2087 			fully_mapped = 0;
2088 			if (iblock < lblock) {
2089 				WARN_ON(bh->b_size != blocksize);
2090 				err = get_block(inode, iblock, bh, 0);
2091 				if (err)
2092 					SetPageError(page);
2093 			}
2094 			if (!buffer_mapped(bh)) {
2095 				void *kaddr = kmap_atomic(page, KM_USER0);
2096 				memset(kaddr + i * blocksize, 0, blocksize);
2097 				flush_dcache_page(page);
2098 				kunmap_atomic(kaddr, KM_USER0);
2099 				if (!err)
2100 					set_buffer_uptodate(bh);
2101 				continue;
2102 			}
2103 			/*
2104 			 * get_block() might have updated the buffer
2105 			 * synchronously
2106 			 */
2107 			if (buffer_uptodate(bh))
2108 				continue;
2109 		}
2110 		arr[nr++] = bh;
2111 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2112 
2113 	if (fully_mapped)
2114 		SetPageMappedToDisk(page);
2115 
2116 	if (!nr) {
2117 		/*
2118 		 * All buffers are uptodate - we can set the page uptodate
2119 		 * as well. But not if get_block() returned an error.
2120 		 */
2121 		if (!PageError(page))
2122 			SetPageUptodate(page);
2123 		unlock_page(page);
2124 		return 0;
2125 	}
2126 
2127 	/* Stage two: lock the buffers */
2128 	for (i = 0; i < nr; i++) {
2129 		bh = arr[i];
2130 		lock_buffer(bh);
2131 		mark_buffer_async_read(bh);
2132 	}
2133 
2134 	/*
2135 	 * Stage 3: start the IO.  Check for uptodateness
2136 	 * inside the buffer lock in case another process reading
2137 	 * the underlying blockdev brought it uptodate (the sct fix).
2138 	 */
2139 	for (i = 0; i < nr; i++) {
2140 		bh = arr[i];
2141 		if (buffer_uptodate(bh))
2142 			end_buffer_async_read(bh, 1);
2143 		else
2144 			submit_bh(READ, bh);
2145 	}
2146 	return 0;
2147 }
2148 
2149 /* utility function for filesystems that need to do work on expanding
2150  * truncates.  Uses prepare/commit_write to allow the filesystem to
2151  * deal with the hole.
2152  */
2153 static int __generic_cont_expand(struct inode *inode, loff_t size,
2154 				 pgoff_t index, unsigned int offset)
2155 {
2156 	struct address_space *mapping = inode->i_mapping;
2157 	struct page *page;
2158 	unsigned long limit;
2159 	int err;
2160 
2161 	err = -EFBIG;
2162         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2163 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2164 		send_sig(SIGXFSZ, current, 0);
2165 		goto out;
2166 	}
2167 	if (size > inode->i_sb->s_maxbytes)
2168 		goto out;
2169 
2170 	err = -ENOMEM;
2171 	page = grab_cache_page(mapping, index);
2172 	if (!page)
2173 		goto out;
2174 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2175 	if (err) {
2176 		/*
2177 		 * ->prepare_write() may have instantiated a few blocks
2178 		 * outside i_size.  Trim these off again.
2179 		 */
2180 		unlock_page(page);
2181 		page_cache_release(page);
2182 		vmtruncate(inode, inode->i_size);
2183 		goto out;
2184 	}
2185 
2186 	err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2187 
2188 	unlock_page(page);
2189 	page_cache_release(page);
2190 	if (err > 0)
2191 		err = 0;
2192 out:
2193 	return err;
2194 }
2195 
2196 int generic_cont_expand(struct inode *inode, loff_t size)
2197 {
2198 	pgoff_t index;
2199 	unsigned int offset;
2200 
2201 	offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2202 
2203 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
2204 	** skip the prepare.  make sure we never send an offset for the start
2205 	** of a block
2206 	*/
2207 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2208 		/* caller must handle this extra byte. */
2209 		offset++;
2210 	}
2211 	index = size >> PAGE_CACHE_SHIFT;
2212 
2213 	return __generic_cont_expand(inode, size, index, offset);
2214 }
2215 
2216 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2217 {
2218 	loff_t pos = size - 1;
2219 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2220 	unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2221 
2222 	/* prepare/commit_write can handle even if from==to==start of block. */
2223 	return __generic_cont_expand(inode, size, index, offset);
2224 }
2225 
2226 /*
2227  * For moronic filesystems that do not allow holes in file.
2228  * We may have to extend the file.
2229  */
2230 
2231 int cont_prepare_write(struct page *page, unsigned offset,
2232 		unsigned to, get_block_t *get_block, loff_t *bytes)
2233 {
2234 	struct address_space *mapping = page->mapping;
2235 	struct inode *inode = mapping->host;
2236 	struct page *new_page;
2237 	pgoff_t pgpos;
2238 	long status;
2239 	unsigned zerofrom;
2240 	unsigned blocksize = 1 << inode->i_blkbits;
2241 	void *kaddr;
2242 
2243 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2244 		status = -ENOMEM;
2245 		new_page = grab_cache_page(mapping, pgpos);
2246 		if (!new_page)
2247 			goto out;
2248 		/* we might sleep */
2249 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2250 			unlock_page(new_page);
2251 			page_cache_release(new_page);
2252 			continue;
2253 		}
2254 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2255 		if (zerofrom & (blocksize-1)) {
2256 			*bytes |= (blocksize-1);
2257 			(*bytes)++;
2258 		}
2259 		status = __block_prepare_write(inode, new_page, zerofrom,
2260 						PAGE_CACHE_SIZE, get_block);
2261 		if (status)
2262 			goto out_unmap;
2263 		kaddr = kmap_atomic(new_page, KM_USER0);
2264 		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2265 		flush_dcache_page(new_page);
2266 		kunmap_atomic(kaddr, KM_USER0);
2267 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2268 		unlock_page(new_page);
2269 		page_cache_release(new_page);
2270 	}
2271 
2272 	if (page->index < pgpos) {
2273 		/* completely inside the area */
2274 		zerofrom = offset;
2275 	} else {
2276 		/* page covers the boundary, find the boundary offset */
2277 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2278 
2279 		/* if we will expand the thing last block will be filled */
2280 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
2281 			*bytes |= (blocksize-1);
2282 			(*bytes)++;
2283 		}
2284 
2285 		/* starting below the boundary? Nothing to zero out */
2286 		if (offset <= zerofrom)
2287 			zerofrom = offset;
2288 	}
2289 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2290 	if (status)
2291 		goto out1;
2292 	if (zerofrom < offset) {
2293 		kaddr = kmap_atomic(page, KM_USER0);
2294 		memset(kaddr+zerofrom, 0, offset-zerofrom);
2295 		flush_dcache_page(page);
2296 		kunmap_atomic(kaddr, KM_USER0);
2297 		__block_commit_write(inode, page, zerofrom, offset);
2298 	}
2299 	return 0;
2300 out1:
2301 	ClearPageUptodate(page);
2302 	return status;
2303 
2304 out_unmap:
2305 	ClearPageUptodate(new_page);
2306 	unlock_page(new_page);
2307 	page_cache_release(new_page);
2308 out:
2309 	return status;
2310 }
2311 
2312 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2313 			get_block_t *get_block)
2314 {
2315 	struct inode *inode = page->mapping->host;
2316 	int err = __block_prepare_write(inode, page, from, to, get_block);
2317 	if (err)
2318 		ClearPageUptodate(page);
2319 	return err;
2320 }
2321 
2322 int block_commit_write(struct page *page, unsigned from, unsigned to)
2323 {
2324 	struct inode *inode = page->mapping->host;
2325 	__block_commit_write(inode,page,from,to);
2326 	return 0;
2327 }
2328 
2329 int generic_commit_write(struct file *file, struct page *page,
2330 		unsigned from, unsigned to)
2331 {
2332 	struct inode *inode = page->mapping->host;
2333 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2334 	__block_commit_write(inode,page,from,to);
2335 	/*
2336 	 * No need to use i_size_read() here, the i_size
2337 	 * cannot change under us because we hold i_mutex.
2338 	 */
2339 	if (pos > inode->i_size) {
2340 		i_size_write(inode, pos);
2341 		mark_inode_dirty(inode);
2342 	}
2343 	return 0;
2344 }
2345 
2346 
2347 /*
2348  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2349  * immediately, while under the page lock.  So it needs a special end_io
2350  * handler which does not touch the bh after unlocking it.
2351  *
2352  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2353  * a race there is benign: unlock_buffer() only use the bh's address for
2354  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2355  * itself.
2356  */
2357 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2358 {
2359 	if (uptodate) {
2360 		set_buffer_uptodate(bh);
2361 	} else {
2362 		/* This happens, due to failed READA attempts. */
2363 		clear_buffer_uptodate(bh);
2364 	}
2365 	unlock_buffer(bh);
2366 }
2367 
2368 /*
2369  * On entry, the page is fully not uptodate.
2370  * On exit the page is fully uptodate in the areas outside (from,to)
2371  */
2372 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2373 			get_block_t *get_block)
2374 {
2375 	struct inode *inode = page->mapping->host;
2376 	const unsigned blkbits = inode->i_blkbits;
2377 	const unsigned blocksize = 1 << blkbits;
2378 	struct buffer_head map_bh;
2379 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2380 	unsigned block_in_page;
2381 	unsigned block_start;
2382 	sector_t block_in_file;
2383 	char *kaddr;
2384 	int nr_reads = 0;
2385 	int i;
2386 	int ret = 0;
2387 	int is_mapped_to_disk = 1;
2388 	int dirtied_it = 0;
2389 
2390 	if (PageMappedToDisk(page))
2391 		return 0;
2392 
2393 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2394 	map_bh.b_page = page;
2395 
2396 	/*
2397 	 * We loop across all blocks in the page, whether or not they are
2398 	 * part of the affected region.  This is so we can discover if the
2399 	 * page is fully mapped-to-disk.
2400 	 */
2401 	for (block_start = 0, block_in_page = 0;
2402 		  block_start < PAGE_CACHE_SIZE;
2403 		  block_in_page++, block_start += blocksize) {
2404 		unsigned block_end = block_start + blocksize;
2405 		int create;
2406 
2407 		map_bh.b_state = 0;
2408 		create = 1;
2409 		if (block_start >= to)
2410 			create = 0;
2411 		map_bh.b_size = blocksize;
2412 		ret = get_block(inode, block_in_file + block_in_page,
2413 					&map_bh, create);
2414 		if (ret)
2415 			goto failed;
2416 		if (!buffer_mapped(&map_bh))
2417 			is_mapped_to_disk = 0;
2418 		if (buffer_new(&map_bh))
2419 			unmap_underlying_metadata(map_bh.b_bdev,
2420 							map_bh.b_blocknr);
2421 		if (PageUptodate(page))
2422 			continue;
2423 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2424 			kaddr = kmap_atomic(page, KM_USER0);
2425 			if (block_start < from) {
2426 				memset(kaddr+block_start, 0, from-block_start);
2427 				dirtied_it = 1;
2428 			}
2429 			if (block_end > to) {
2430 				memset(kaddr + to, 0, block_end - to);
2431 				dirtied_it = 1;
2432 			}
2433 			flush_dcache_page(page);
2434 			kunmap_atomic(kaddr, KM_USER0);
2435 			continue;
2436 		}
2437 		if (buffer_uptodate(&map_bh))
2438 			continue;	/* reiserfs does this */
2439 		if (block_start < from || block_end > to) {
2440 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2441 
2442 			if (!bh) {
2443 				ret = -ENOMEM;
2444 				goto failed;
2445 			}
2446 			bh->b_state = map_bh.b_state;
2447 			atomic_set(&bh->b_count, 0);
2448 			bh->b_this_page = NULL;
2449 			bh->b_page = page;
2450 			bh->b_blocknr = map_bh.b_blocknr;
2451 			bh->b_size = blocksize;
2452 			bh->b_data = (char *)(long)block_start;
2453 			bh->b_bdev = map_bh.b_bdev;
2454 			bh->b_private = NULL;
2455 			read_bh[nr_reads++] = bh;
2456 		}
2457 	}
2458 
2459 	if (nr_reads) {
2460 		struct buffer_head *bh;
2461 
2462 		/*
2463 		 * The page is locked, so these buffers are protected from
2464 		 * any VM or truncate activity.  Hence we don't need to care
2465 		 * for the buffer_head refcounts.
2466 		 */
2467 		for (i = 0; i < nr_reads; i++) {
2468 			bh = read_bh[i];
2469 			lock_buffer(bh);
2470 			bh->b_end_io = end_buffer_read_nobh;
2471 			submit_bh(READ, bh);
2472 		}
2473 		for (i = 0; i < nr_reads; i++) {
2474 			bh = read_bh[i];
2475 			wait_on_buffer(bh);
2476 			if (!buffer_uptodate(bh))
2477 				ret = -EIO;
2478 			free_buffer_head(bh);
2479 			read_bh[i] = NULL;
2480 		}
2481 		if (ret)
2482 			goto failed;
2483 	}
2484 
2485 	if (is_mapped_to_disk)
2486 		SetPageMappedToDisk(page);
2487 	SetPageUptodate(page);
2488 
2489 	/*
2490 	 * Setting the page dirty here isn't necessary for the prepare_write
2491 	 * function - commit_write will do that.  But if/when this function is
2492 	 * used within the pagefault handler to ensure that all mmapped pages
2493 	 * have backing space in the filesystem, we will need to dirty the page
2494 	 * if its contents were altered.
2495 	 */
2496 	if (dirtied_it)
2497 		set_page_dirty(page);
2498 
2499 	return 0;
2500 
2501 failed:
2502 	for (i = 0; i < nr_reads; i++) {
2503 		if (read_bh[i])
2504 			free_buffer_head(read_bh[i]);
2505 	}
2506 
2507 	/*
2508 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
2509 	 * so we'll later zero out any blocks which _were_ allocated.
2510 	 */
2511 	kaddr = kmap_atomic(page, KM_USER0);
2512 	memset(kaddr, 0, PAGE_CACHE_SIZE);
2513 	kunmap_atomic(kaddr, KM_USER0);
2514 	SetPageUptodate(page);
2515 	set_page_dirty(page);
2516 	return ret;
2517 }
2518 EXPORT_SYMBOL(nobh_prepare_write);
2519 
2520 int nobh_commit_write(struct file *file, struct page *page,
2521 		unsigned from, unsigned to)
2522 {
2523 	struct inode *inode = page->mapping->host;
2524 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2525 
2526 	set_page_dirty(page);
2527 	if (pos > inode->i_size) {
2528 		i_size_write(inode, pos);
2529 		mark_inode_dirty(inode);
2530 	}
2531 	return 0;
2532 }
2533 EXPORT_SYMBOL(nobh_commit_write);
2534 
2535 /*
2536  * nobh_writepage() - based on block_full_write_page() except
2537  * that it tries to operate without attaching bufferheads to
2538  * the page.
2539  */
2540 int nobh_writepage(struct page *page, get_block_t *get_block,
2541 			struct writeback_control *wbc)
2542 {
2543 	struct inode * const inode = page->mapping->host;
2544 	loff_t i_size = i_size_read(inode);
2545 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2546 	unsigned offset;
2547 	void *kaddr;
2548 	int ret;
2549 
2550 	/* Is the page fully inside i_size? */
2551 	if (page->index < end_index)
2552 		goto out;
2553 
2554 	/* Is the page fully outside i_size? (truncate in progress) */
2555 	offset = i_size & (PAGE_CACHE_SIZE-1);
2556 	if (page->index >= end_index+1 || !offset) {
2557 		/*
2558 		 * The page may have dirty, unmapped buffers.  For example,
2559 		 * they may have been added in ext3_writepage().  Make them
2560 		 * freeable here, so the page does not leak.
2561 		 */
2562 #if 0
2563 		/* Not really sure about this  - do we need this ? */
2564 		if (page->mapping->a_ops->invalidatepage)
2565 			page->mapping->a_ops->invalidatepage(page, offset);
2566 #endif
2567 		unlock_page(page);
2568 		return 0; /* don't care */
2569 	}
2570 
2571 	/*
2572 	 * The page straddles i_size.  It must be zeroed out on each and every
2573 	 * writepage invocation because it may be mmapped.  "A file is mapped
2574 	 * in multiples of the page size.  For a file that is not a multiple of
2575 	 * the  page size, the remaining memory is zeroed when mapped, and
2576 	 * writes to that region are not written out to the file."
2577 	 */
2578 	kaddr = kmap_atomic(page, KM_USER0);
2579 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2580 	flush_dcache_page(page);
2581 	kunmap_atomic(kaddr, KM_USER0);
2582 out:
2583 	ret = mpage_writepage(page, get_block, wbc);
2584 	if (ret == -EAGAIN)
2585 		ret = __block_write_full_page(inode, page, get_block, wbc);
2586 	return ret;
2587 }
2588 EXPORT_SYMBOL(nobh_writepage);
2589 
2590 /*
2591  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2592  */
2593 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2594 {
2595 	struct inode *inode = mapping->host;
2596 	unsigned blocksize = 1 << inode->i_blkbits;
2597 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2598 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2599 	unsigned to;
2600 	struct page *page;
2601 	const struct address_space_operations *a_ops = mapping->a_ops;
2602 	char *kaddr;
2603 	int ret = 0;
2604 
2605 	if ((offset & (blocksize - 1)) == 0)
2606 		goto out;
2607 
2608 	ret = -ENOMEM;
2609 	page = grab_cache_page(mapping, index);
2610 	if (!page)
2611 		goto out;
2612 
2613 	to = (offset + blocksize) & ~(blocksize - 1);
2614 	ret = a_ops->prepare_write(NULL, page, offset, to);
2615 	if (ret == 0) {
2616 		kaddr = kmap_atomic(page, KM_USER0);
2617 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2618 		flush_dcache_page(page);
2619 		kunmap_atomic(kaddr, KM_USER0);
2620 		set_page_dirty(page);
2621 	}
2622 	unlock_page(page);
2623 	page_cache_release(page);
2624 out:
2625 	return ret;
2626 }
2627 EXPORT_SYMBOL(nobh_truncate_page);
2628 
2629 int block_truncate_page(struct address_space *mapping,
2630 			loff_t from, get_block_t *get_block)
2631 {
2632 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2633 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2634 	unsigned blocksize;
2635 	sector_t iblock;
2636 	unsigned length, pos;
2637 	struct inode *inode = mapping->host;
2638 	struct page *page;
2639 	struct buffer_head *bh;
2640 	void *kaddr;
2641 	int err;
2642 
2643 	blocksize = 1 << inode->i_blkbits;
2644 	length = offset & (blocksize - 1);
2645 
2646 	/* Block boundary? Nothing to do */
2647 	if (!length)
2648 		return 0;
2649 
2650 	length = blocksize - length;
2651 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2652 
2653 	page = grab_cache_page(mapping, index);
2654 	err = -ENOMEM;
2655 	if (!page)
2656 		goto out;
2657 
2658 	if (!page_has_buffers(page))
2659 		create_empty_buffers(page, blocksize, 0);
2660 
2661 	/* Find the buffer that contains "offset" */
2662 	bh = page_buffers(page);
2663 	pos = blocksize;
2664 	while (offset >= pos) {
2665 		bh = bh->b_this_page;
2666 		iblock++;
2667 		pos += blocksize;
2668 	}
2669 
2670 	err = 0;
2671 	if (!buffer_mapped(bh)) {
2672 		WARN_ON(bh->b_size != blocksize);
2673 		err = get_block(inode, iblock, bh, 0);
2674 		if (err)
2675 			goto unlock;
2676 		/* unmapped? It's a hole - nothing to do */
2677 		if (!buffer_mapped(bh))
2678 			goto unlock;
2679 	}
2680 
2681 	/* Ok, it's mapped. Make sure it's up-to-date */
2682 	if (PageUptodate(page))
2683 		set_buffer_uptodate(bh);
2684 
2685 	if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2686 		err = -EIO;
2687 		ll_rw_block(READ, 1, &bh);
2688 		wait_on_buffer(bh);
2689 		/* Uhhuh. Read error. Complain and punt. */
2690 		if (!buffer_uptodate(bh))
2691 			goto unlock;
2692 	}
2693 
2694 	kaddr = kmap_atomic(page, KM_USER0);
2695 	memset(kaddr + offset, 0, length);
2696 	flush_dcache_page(page);
2697 	kunmap_atomic(kaddr, KM_USER0);
2698 
2699 	mark_buffer_dirty(bh);
2700 	err = 0;
2701 
2702 unlock:
2703 	unlock_page(page);
2704 	page_cache_release(page);
2705 out:
2706 	return err;
2707 }
2708 
2709 /*
2710  * The generic ->writepage function for buffer-backed address_spaces
2711  */
2712 int block_write_full_page(struct page *page, get_block_t *get_block,
2713 			struct writeback_control *wbc)
2714 {
2715 	struct inode * const inode = page->mapping->host;
2716 	loff_t i_size = i_size_read(inode);
2717 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2718 	unsigned offset;
2719 	void *kaddr;
2720 
2721 	/* Is the page fully inside i_size? */
2722 	if (page->index < end_index)
2723 		return __block_write_full_page(inode, page, get_block, wbc);
2724 
2725 	/* Is the page fully outside i_size? (truncate in progress) */
2726 	offset = i_size & (PAGE_CACHE_SIZE-1);
2727 	if (page->index >= end_index+1 || !offset) {
2728 		/*
2729 		 * The page may have dirty, unmapped buffers.  For example,
2730 		 * they may have been added in ext3_writepage().  Make them
2731 		 * freeable here, so the page does not leak.
2732 		 */
2733 		do_invalidatepage(page, 0);
2734 		unlock_page(page);
2735 		return 0; /* don't care */
2736 	}
2737 
2738 	/*
2739 	 * The page straddles i_size.  It must be zeroed out on each and every
2740 	 * writepage invokation because it may be mmapped.  "A file is mapped
2741 	 * in multiples of the page size.  For a file that is not a multiple of
2742 	 * the  page size, the remaining memory is zeroed when mapped, and
2743 	 * writes to that region are not written out to the file."
2744 	 */
2745 	kaddr = kmap_atomic(page, KM_USER0);
2746 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2747 	flush_dcache_page(page);
2748 	kunmap_atomic(kaddr, KM_USER0);
2749 	return __block_write_full_page(inode, page, get_block, wbc);
2750 }
2751 
2752 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2753 			    get_block_t *get_block)
2754 {
2755 	struct buffer_head tmp;
2756 	struct inode *inode = mapping->host;
2757 	tmp.b_state = 0;
2758 	tmp.b_blocknr = 0;
2759 	tmp.b_size = 1 << inode->i_blkbits;
2760 	get_block(inode, block, &tmp, 0);
2761 	return tmp.b_blocknr;
2762 }
2763 
2764 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2765 {
2766 	struct buffer_head *bh = bio->bi_private;
2767 
2768 	if (bio->bi_size)
2769 		return 1;
2770 
2771 	if (err == -EOPNOTSUPP) {
2772 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2773 		set_bit(BH_Eopnotsupp, &bh->b_state);
2774 	}
2775 
2776 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2777 	bio_put(bio);
2778 	return 0;
2779 }
2780 
2781 int submit_bh(int rw, struct buffer_head * bh)
2782 {
2783 	struct bio *bio;
2784 	int ret = 0;
2785 
2786 	BUG_ON(!buffer_locked(bh));
2787 	BUG_ON(!buffer_mapped(bh));
2788 	BUG_ON(!bh->b_end_io);
2789 
2790 	if (buffer_ordered(bh) && (rw == WRITE))
2791 		rw = WRITE_BARRIER;
2792 
2793 	/*
2794 	 * Only clear out a write error when rewriting, should this
2795 	 * include WRITE_SYNC as well?
2796 	 */
2797 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2798 		clear_buffer_write_io_error(bh);
2799 
2800 	/*
2801 	 * from here on down, it's all bio -- do the initial mapping,
2802 	 * submit_bio -> generic_make_request may further map this bio around
2803 	 */
2804 	bio = bio_alloc(GFP_NOIO, 1);
2805 
2806 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2807 	bio->bi_bdev = bh->b_bdev;
2808 	bio->bi_io_vec[0].bv_page = bh->b_page;
2809 	bio->bi_io_vec[0].bv_len = bh->b_size;
2810 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2811 
2812 	bio->bi_vcnt = 1;
2813 	bio->bi_idx = 0;
2814 	bio->bi_size = bh->b_size;
2815 
2816 	bio->bi_end_io = end_bio_bh_io_sync;
2817 	bio->bi_private = bh;
2818 
2819 	bio_get(bio);
2820 	submit_bio(rw, bio);
2821 
2822 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2823 		ret = -EOPNOTSUPP;
2824 
2825 	bio_put(bio);
2826 	return ret;
2827 }
2828 
2829 /**
2830  * ll_rw_block: low-level access to block devices (DEPRECATED)
2831  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2832  * @nr: number of &struct buffer_heads in the array
2833  * @bhs: array of pointers to &struct buffer_head
2834  *
2835  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2836  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2837  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2838  * are sent to disk. The fourth %READA option is described in the documentation
2839  * for generic_make_request() which ll_rw_block() calls.
2840  *
2841  * This function drops any buffer that it cannot get a lock on (with the
2842  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2843  * clean when doing a write request, and any buffer that appears to be
2844  * up-to-date when doing read request.  Further it marks as clean buffers that
2845  * are processed for writing (the buffer cache won't assume that they are
2846  * actually clean until the buffer gets unlocked).
2847  *
2848  * ll_rw_block sets b_end_io to simple completion handler that marks
2849  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2850  * any waiters.
2851  *
2852  * All of the buffers must be for the same device, and must also be a
2853  * multiple of the current approved size for the device.
2854  */
2855 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2856 {
2857 	int i;
2858 
2859 	for (i = 0; i < nr; i++) {
2860 		struct buffer_head *bh = bhs[i];
2861 
2862 		if (rw == SWRITE)
2863 			lock_buffer(bh);
2864 		else if (test_set_buffer_locked(bh))
2865 			continue;
2866 
2867 		if (rw == WRITE || rw == SWRITE) {
2868 			if (test_clear_buffer_dirty(bh)) {
2869 				bh->b_end_io = end_buffer_write_sync;
2870 				get_bh(bh);
2871 				submit_bh(WRITE, bh);
2872 				continue;
2873 			}
2874 		} else {
2875 			if (!buffer_uptodate(bh)) {
2876 				bh->b_end_io = end_buffer_read_sync;
2877 				get_bh(bh);
2878 				submit_bh(rw, bh);
2879 				continue;
2880 			}
2881 		}
2882 		unlock_buffer(bh);
2883 	}
2884 }
2885 
2886 /*
2887  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2888  * and then start new I/O and then wait upon it.  The caller must have a ref on
2889  * the buffer_head.
2890  */
2891 int sync_dirty_buffer(struct buffer_head *bh)
2892 {
2893 	int ret = 0;
2894 
2895 	WARN_ON(atomic_read(&bh->b_count) < 1);
2896 	lock_buffer(bh);
2897 	if (test_clear_buffer_dirty(bh)) {
2898 		get_bh(bh);
2899 		bh->b_end_io = end_buffer_write_sync;
2900 		ret = submit_bh(WRITE, bh);
2901 		wait_on_buffer(bh);
2902 		if (buffer_eopnotsupp(bh)) {
2903 			clear_buffer_eopnotsupp(bh);
2904 			ret = -EOPNOTSUPP;
2905 		}
2906 		if (!ret && !buffer_uptodate(bh))
2907 			ret = -EIO;
2908 	} else {
2909 		unlock_buffer(bh);
2910 	}
2911 	return ret;
2912 }
2913 
2914 /*
2915  * try_to_free_buffers() checks if all the buffers on this particular page
2916  * are unused, and releases them if so.
2917  *
2918  * Exclusion against try_to_free_buffers may be obtained by either
2919  * locking the page or by holding its mapping's private_lock.
2920  *
2921  * If the page is dirty but all the buffers are clean then we need to
2922  * be sure to mark the page clean as well.  This is because the page
2923  * may be against a block device, and a later reattachment of buffers
2924  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2925  * filesystem data on the same device.
2926  *
2927  * The same applies to regular filesystem pages: if all the buffers are
2928  * clean then we set the page clean and proceed.  To do that, we require
2929  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2930  * private_lock.
2931  *
2932  * try_to_free_buffers() is non-blocking.
2933  */
2934 static inline int buffer_busy(struct buffer_head *bh)
2935 {
2936 	return atomic_read(&bh->b_count) |
2937 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2938 }
2939 
2940 static int
2941 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2942 {
2943 	struct buffer_head *head = page_buffers(page);
2944 	struct buffer_head *bh;
2945 
2946 	bh = head;
2947 	do {
2948 		if (buffer_write_io_error(bh) && page->mapping)
2949 			set_bit(AS_EIO, &page->mapping->flags);
2950 		if (buffer_busy(bh))
2951 			goto failed;
2952 		bh = bh->b_this_page;
2953 	} while (bh != head);
2954 
2955 	do {
2956 		struct buffer_head *next = bh->b_this_page;
2957 
2958 		if (!list_empty(&bh->b_assoc_buffers))
2959 			__remove_assoc_queue(bh);
2960 		bh = next;
2961 	} while (bh != head);
2962 	*buffers_to_free = head;
2963 	__clear_page_buffers(page);
2964 	return 1;
2965 failed:
2966 	return 0;
2967 }
2968 
2969 int try_to_free_buffers(struct page *page)
2970 {
2971 	struct address_space * const mapping = page->mapping;
2972 	struct buffer_head *buffers_to_free = NULL;
2973 	int ret = 0;
2974 
2975 	BUG_ON(!PageLocked(page));
2976 	if (PageWriteback(page))
2977 		return 0;
2978 
2979 	if (mapping == NULL) {		/* can this still happen? */
2980 		ret = drop_buffers(page, &buffers_to_free);
2981 		goto out;
2982 	}
2983 
2984 	spin_lock(&mapping->private_lock);
2985 	ret = drop_buffers(page, &buffers_to_free);
2986 	if (ret) {
2987 		/*
2988 		 * If the filesystem writes its buffers by hand (eg ext3)
2989 		 * then we can have clean buffers against a dirty page.  We
2990 		 * clean the page here; otherwise later reattachment of buffers
2991 		 * could encounter a non-uptodate page, which is unresolvable.
2992 		 * This only applies in the rare case where try_to_free_buffers
2993 		 * succeeds but the page is not freed.
2994 		 */
2995 		clear_page_dirty(page);
2996 	}
2997 	spin_unlock(&mapping->private_lock);
2998 out:
2999 	if (buffers_to_free) {
3000 		struct buffer_head *bh = buffers_to_free;
3001 
3002 		do {
3003 			struct buffer_head *next = bh->b_this_page;
3004 			free_buffer_head(bh);
3005 			bh = next;
3006 		} while (bh != buffers_to_free);
3007 	}
3008 	return ret;
3009 }
3010 EXPORT_SYMBOL(try_to_free_buffers);
3011 
3012 void block_sync_page(struct page *page)
3013 {
3014 	struct address_space *mapping;
3015 
3016 	smp_mb();
3017 	mapping = page_mapping(page);
3018 	if (mapping)
3019 		blk_run_backing_dev(mapping->backing_dev_info, page);
3020 }
3021 
3022 /*
3023  * There are no bdflush tunables left.  But distributions are
3024  * still running obsolete flush daemons, so we terminate them here.
3025  *
3026  * Use of bdflush() is deprecated and will be removed in a future kernel.
3027  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3028  */
3029 asmlinkage long sys_bdflush(int func, long data)
3030 {
3031 	static int msg_count;
3032 
3033 	if (!capable(CAP_SYS_ADMIN))
3034 		return -EPERM;
3035 
3036 	if (msg_count < 5) {
3037 		msg_count++;
3038 		printk(KERN_INFO
3039 			"warning: process `%s' used the obsolete bdflush"
3040 			" system call\n", current->comm);
3041 		printk(KERN_INFO "Fix your initscripts?\n");
3042 	}
3043 
3044 	if (func == 1)
3045 		do_exit(0);
3046 	return 0;
3047 }
3048 
3049 /*
3050  * Buffer-head allocation
3051  */
3052 static kmem_cache_t *bh_cachep;
3053 
3054 /*
3055  * Once the number of bh's in the machine exceeds this level, we start
3056  * stripping them in writeback.
3057  */
3058 static int max_buffer_heads;
3059 
3060 int buffer_heads_over_limit;
3061 
3062 struct bh_accounting {
3063 	int nr;			/* Number of live bh's */
3064 	int ratelimit;		/* Limit cacheline bouncing */
3065 };
3066 
3067 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3068 
3069 static void recalc_bh_state(void)
3070 {
3071 	int i;
3072 	int tot = 0;
3073 
3074 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3075 		return;
3076 	__get_cpu_var(bh_accounting).ratelimit = 0;
3077 	for_each_online_cpu(i)
3078 		tot += per_cpu(bh_accounting, i).nr;
3079 	buffer_heads_over_limit = (tot > max_buffer_heads);
3080 }
3081 
3082 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3083 {
3084 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3085 	if (ret) {
3086 		get_cpu_var(bh_accounting).nr++;
3087 		recalc_bh_state();
3088 		put_cpu_var(bh_accounting);
3089 	}
3090 	return ret;
3091 }
3092 EXPORT_SYMBOL(alloc_buffer_head);
3093 
3094 void free_buffer_head(struct buffer_head *bh)
3095 {
3096 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3097 	kmem_cache_free(bh_cachep, bh);
3098 	get_cpu_var(bh_accounting).nr--;
3099 	recalc_bh_state();
3100 	put_cpu_var(bh_accounting);
3101 }
3102 EXPORT_SYMBOL(free_buffer_head);
3103 
3104 static void
3105 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3106 {
3107 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3108 			    SLAB_CTOR_CONSTRUCTOR) {
3109 		struct buffer_head * bh = (struct buffer_head *)data;
3110 
3111 		memset(bh, 0, sizeof(*bh));
3112 		INIT_LIST_HEAD(&bh->b_assoc_buffers);
3113 	}
3114 }
3115 
3116 #ifdef CONFIG_HOTPLUG_CPU
3117 static void buffer_exit_cpu(int cpu)
3118 {
3119 	int i;
3120 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3121 
3122 	for (i = 0; i < BH_LRU_SIZE; i++) {
3123 		brelse(b->bhs[i]);
3124 		b->bhs[i] = NULL;
3125 	}
3126 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3127 	per_cpu(bh_accounting, cpu).nr = 0;
3128 	put_cpu_var(bh_accounting);
3129 }
3130 
3131 static int buffer_cpu_notify(struct notifier_block *self,
3132 			      unsigned long action, void *hcpu)
3133 {
3134 	if (action == CPU_DEAD)
3135 		buffer_exit_cpu((unsigned long)hcpu);
3136 	return NOTIFY_OK;
3137 }
3138 #endif /* CONFIG_HOTPLUG_CPU */
3139 
3140 void __init buffer_init(void)
3141 {
3142 	int nrpages;
3143 
3144 	bh_cachep = kmem_cache_create("buffer_head",
3145 					sizeof(struct buffer_head), 0,
3146 					(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3147 					SLAB_MEM_SPREAD),
3148 					init_buffer_head,
3149 					NULL);
3150 
3151 	/*
3152 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3153 	 */
3154 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3155 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3156 	hotcpu_notifier(buffer_cpu_notify, 0);
3157 }
3158 
3159 EXPORT_SYMBOL(__bforget);
3160 EXPORT_SYMBOL(__brelse);
3161 EXPORT_SYMBOL(__wait_on_buffer);
3162 EXPORT_SYMBOL(block_commit_write);
3163 EXPORT_SYMBOL(block_prepare_write);
3164 EXPORT_SYMBOL(block_read_full_page);
3165 EXPORT_SYMBOL(block_sync_page);
3166 EXPORT_SYMBOL(block_truncate_page);
3167 EXPORT_SYMBOL(block_write_full_page);
3168 EXPORT_SYMBOL(cont_prepare_write);
3169 EXPORT_SYMBOL(end_buffer_read_sync);
3170 EXPORT_SYMBOL(end_buffer_write_sync);
3171 EXPORT_SYMBOL(file_fsync);
3172 EXPORT_SYMBOL(fsync_bdev);
3173 EXPORT_SYMBOL(generic_block_bmap);
3174 EXPORT_SYMBOL(generic_commit_write);
3175 EXPORT_SYMBOL(generic_cont_expand);
3176 EXPORT_SYMBOL(generic_cont_expand_simple);
3177 EXPORT_SYMBOL(init_buffer);
3178 EXPORT_SYMBOL(invalidate_bdev);
3179 EXPORT_SYMBOL(ll_rw_block);
3180 EXPORT_SYMBOL(mark_buffer_dirty);
3181 EXPORT_SYMBOL(submit_bh);
3182 EXPORT_SYMBOL(sync_dirty_buffer);
3183 EXPORT_SYMBOL(unlock_buffer);
3184