xref: /linux/fs/buffer.c (revision 776cfebb430c7b22c208b1b17add97f354d97cab)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 
44 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
45 static void invalidate_bh_lrus(void);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void fastcall __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void fastcall unlock_buffer(struct buffer_head *bh)
78 {
79 	clear_buffer_locked(bh);
80 	smp_mb__after_clear_bit();
81 	wake_up_bit(&bh->b_state, BH_Lock);
82 }
83 
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93 
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97 	ClearPagePrivate(page);
98 	page->private = 0;
99 	page_cache_release(page);
100 }
101 
102 static void buffer_io_error(struct buffer_head *bh)
103 {
104 	char b[BDEVNAME_SIZE];
105 
106 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
107 			bdevname(bh->b_bdev, b),
108 			(unsigned long long)bh->b_blocknr);
109 }
110 
111 /*
112  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
113  * unlock the buffer. This is what ll_rw_block uses too.
114  */
115 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
116 {
117 	if (uptodate) {
118 		set_buffer_uptodate(bh);
119 	} else {
120 		/* This happens, due to failed READA attempts. */
121 		clear_buffer_uptodate(bh);
122 	}
123 	unlock_buffer(bh);
124 	put_bh(bh);
125 }
126 
127 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
128 {
129 	char b[BDEVNAME_SIZE];
130 
131 	if (uptodate) {
132 		set_buffer_uptodate(bh);
133 	} else {
134 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
135 			buffer_io_error(bh);
136 			printk(KERN_WARNING "lost page write due to "
137 					"I/O error on %s\n",
138 				       bdevname(bh->b_bdev, b));
139 		}
140 		set_buffer_write_io_error(bh);
141 		clear_buffer_uptodate(bh);
142 	}
143 	unlock_buffer(bh);
144 	put_bh(bh);
145 }
146 
147 /*
148  * Write out and wait upon all the dirty data associated with a block
149  * device via its mapping.  Does not take the superblock lock.
150  */
151 int sync_blockdev(struct block_device *bdev)
152 {
153 	int ret = 0;
154 
155 	if (bdev) {
156 		int err;
157 
158 		ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
159 		err = filemap_fdatawait(bdev->bd_inode->i_mapping);
160 		if (!ret)
161 			ret = err;
162 	}
163 	return ret;
164 }
165 EXPORT_SYMBOL(sync_blockdev);
166 
167 /*
168  * Write out and wait upon all dirty data associated with this
169  * superblock.  Filesystem data as well as the underlying block
170  * device.  Takes the superblock lock.
171  */
172 int fsync_super(struct super_block *sb)
173 {
174 	sync_inodes_sb(sb, 0);
175 	DQUOT_SYNC(sb);
176 	lock_super(sb);
177 	if (sb->s_dirt && sb->s_op->write_super)
178 		sb->s_op->write_super(sb);
179 	unlock_super(sb);
180 	if (sb->s_op->sync_fs)
181 		sb->s_op->sync_fs(sb, 1);
182 	sync_blockdev(sb->s_bdev);
183 	sync_inodes_sb(sb, 1);
184 
185 	return sync_blockdev(sb->s_bdev);
186 }
187 
188 /*
189  * Write out and wait upon all dirty data associated with this
190  * device.   Filesystem data as well as the underlying block
191  * device.  Takes the superblock lock.
192  */
193 int fsync_bdev(struct block_device *bdev)
194 {
195 	struct super_block *sb = get_super(bdev);
196 	if (sb) {
197 		int res = fsync_super(sb);
198 		drop_super(sb);
199 		return res;
200 	}
201 	return sync_blockdev(bdev);
202 }
203 
204 /**
205  * freeze_bdev  --  lock a filesystem and force it into a consistent state
206  * @bdev:	blockdevice to lock
207  *
208  * This takes the block device bd_mount_sem to make sure no new mounts
209  * happen on bdev until thaw_bdev() is called.
210  * If a superblock is found on this device, we take the s_umount semaphore
211  * on it to make sure nobody unmounts until the snapshot creation is done.
212  */
213 struct super_block *freeze_bdev(struct block_device *bdev)
214 {
215 	struct super_block *sb;
216 
217 	down(&bdev->bd_mount_sem);
218 	sb = get_super(bdev);
219 	if (sb && !(sb->s_flags & MS_RDONLY)) {
220 		sb->s_frozen = SB_FREEZE_WRITE;
221 		smp_wmb();
222 
223 		sync_inodes_sb(sb, 0);
224 		DQUOT_SYNC(sb);
225 
226 		lock_super(sb);
227 		if (sb->s_dirt && sb->s_op->write_super)
228 			sb->s_op->write_super(sb);
229 		unlock_super(sb);
230 
231 		if (sb->s_op->sync_fs)
232 			sb->s_op->sync_fs(sb, 1);
233 
234 		sync_blockdev(sb->s_bdev);
235 		sync_inodes_sb(sb, 1);
236 
237 		sb->s_frozen = SB_FREEZE_TRANS;
238 		smp_wmb();
239 
240 		sync_blockdev(sb->s_bdev);
241 
242 		if (sb->s_op->write_super_lockfs)
243 			sb->s_op->write_super_lockfs(sb);
244 	}
245 
246 	sync_blockdev(bdev);
247 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
248 }
249 EXPORT_SYMBOL(freeze_bdev);
250 
251 /**
252  * thaw_bdev  -- unlock filesystem
253  * @bdev:	blockdevice to unlock
254  * @sb:		associated superblock
255  *
256  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
257  */
258 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
259 {
260 	if (sb) {
261 		BUG_ON(sb->s_bdev != bdev);
262 
263 		if (sb->s_op->unlockfs)
264 			sb->s_op->unlockfs(sb);
265 		sb->s_frozen = SB_UNFROZEN;
266 		smp_wmb();
267 		wake_up(&sb->s_wait_unfrozen);
268 		drop_super(sb);
269 	}
270 
271 	up(&bdev->bd_mount_sem);
272 }
273 EXPORT_SYMBOL(thaw_bdev);
274 
275 /*
276  * sync everything.  Start out by waking pdflush, because that writes back
277  * all queues in parallel.
278  */
279 static void do_sync(unsigned long wait)
280 {
281 	wakeup_bdflush(0);
282 	sync_inodes(0);		/* All mappings, inodes and their blockdevs */
283 	DQUOT_SYNC(NULL);
284 	sync_supers();		/* Write the superblocks */
285 	sync_filesystems(0);	/* Start syncing the filesystems */
286 	sync_filesystems(wait);	/* Waitingly sync the filesystems */
287 	sync_inodes(wait);	/* Mappings, inodes and blockdevs, again. */
288 	if (!wait)
289 		printk("Emergency Sync complete\n");
290 	if (unlikely(laptop_mode))
291 		laptop_sync_completion();
292 }
293 
294 asmlinkage long sys_sync(void)
295 {
296 	do_sync(1);
297 	return 0;
298 }
299 
300 void emergency_sync(void)
301 {
302 	pdflush_operation(do_sync, 0);
303 }
304 
305 /*
306  * Generic function to fsync a file.
307  *
308  * filp may be NULL if called via the msync of a vma.
309  */
310 
311 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
312 {
313 	struct inode * inode = dentry->d_inode;
314 	struct super_block * sb;
315 	int ret, err;
316 
317 	/* sync the inode to buffers */
318 	ret = write_inode_now(inode, 0);
319 
320 	/* sync the superblock to buffers */
321 	sb = inode->i_sb;
322 	lock_super(sb);
323 	if (sb->s_op->write_super)
324 		sb->s_op->write_super(sb);
325 	unlock_super(sb);
326 
327 	/* .. finally sync the buffers to disk */
328 	err = sync_blockdev(sb->s_bdev);
329 	if (!ret)
330 		ret = err;
331 	return ret;
332 }
333 
334 asmlinkage long sys_fsync(unsigned int fd)
335 {
336 	struct file * file;
337 	struct address_space *mapping;
338 	int ret, err;
339 
340 	ret = -EBADF;
341 	file = fget(fd);
342 	if (!file)
343 		goto out;
344 
345 	mapping = file->f_mapping;
346 
347 	ret = -EINVAL;
348 	if (!file->f_op || !file->f_op->fsync) {
349 		/* Why?  We can still call filemap_fdatawrite */
350 		goto out_putf;
351 	}
352 
353 	current->flags |= PF_SYNCWRITE;
354 	ret = filemap_fdatawrite(mapping);
355 
356 	/*
357 	 * We need to protect against concurrent writers,
358 	 * which could cause livelocks in fsync_buffers_list
359 	 */
360 	down(&mapping->host->i_sem);
361 	err = file->f_op->fsync(file, file->f_dentry, 0);
362 	if (!ret)
363 		ret = err;
364 	up(&mapping->host->i_sem);
365 	err = filemap_fdatawait(mapping);
366 	if (!ret)
367 		ret = err;
368 	current->flags &= ~PF_SYNCWRITE;
369 
370 out_putf:
371 	fput(file);
372 out:
373 	return ret;
374 }
375 
376 asmlinkage long sys_fdatasync(unsigned int fd)
377 {
378 	struct file * file;
379 	struct address_space *mapping;
380 	int ret, err;
381 
382 	ret = -EBADF;
383 	file = fget(fd);
384 	if (!file)
385 		goto out;
386 
387 	ret = -EINVAL;
388 	if (!file->f_op || !file->f_op->fsync)
389 		goto out_putf;
390 
391 	mapping = file->f_mapping;
392 
393 	current->flags |= PF_SYNCWRITE;
394 	ret = filemap_fdatawrite(mapping);
395 	down(&mapping->host->i_sem);
396 	err = file->f_op->fsync(file, file->f_dentry, 1);
397 	if (!ret)
398 		ret = err;
399 	up(&mapping->host->i_sem);
400 	err = filemap_fdatawait(mapping);
401 	if (!ret)
402 		ret = err;
403 	current->flags &= ~PF_SYNCWRITE;
404 
405 out_putf:
406 	fput(file);
407 out:
408 	return ret;
409 }
410 
411 /*
412  * Various filesystems appear to want __find_get_block to be non-blocking.
413  * But it's the page lock which protects the buffers.  To get around this,
414  * we get exclusion from try_to_free_buffers with the blockdev mapping's
415  * private_lock.
416  *
417  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
418  * may be quite high.  This code could TryLock the page, and if that
419  * succeeds, there is no need to take private_lock. (But if
420  * private_lock is contended then so is mapping->tree_lock).
421  */
422 static struct buffer_head *
423 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
424 {
425 	struct inode *bd_inode = bdev->bd_inode;
426 	struct address_space *bd_mapping = bd_inode->i_mapping;
427 	struct buffer_head *ret = NULL;
428 	pgoff_t index;
429 	struct buffer_head *bh;
430 	struct buffer_head *head;
431 	struct page *page;
432 	int all_mapped = 1;
433 
434 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
435 	page = find_get_page(bd_mapping, index);
436 	if (!page)
437 		goto out;
438 
439 	spin_lock(&bd_mapping->private_lock);
440 	if (!page_has_buffers(page))
441 		goto out_unlock;
442 	head = page_buffers(page);
443 	bh = head;
444 	do {
445 		if (bh->b_blocknr == block) {
446 			ret = bh;
447 			get_bh(bh);
448 			goto out_unlock;
449 		}
450 		if (!buffer_mapped(bh))
451 			all_mapped = 0;
452 		bh = bh->b_this_page;
453 	} while (bh != head);
454 
455 	/* we might be here because some of the buffers on this page are
456 	 * not mapped.  This is due to various races between
457 	 * file io on the block device and getblk.  It gets dealt with
458 	 * elsewhere, don't buffer_error if we had some unmapped buffers
459 	 */
460 	if (all_mapped) {
461 		printk("__find_get_block_slow() failed. "
462 			"block=%llu, b_blocknr=%llu\n",
463 			(unsigned long long)block, (unsigned long long)bh->b_blocknr);
464 		printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
465 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
466 	}
467 out_unlock:
468 	spin_unlock(&bd_mapping->private_lock);
469 	page_cache_release(page);
470 out:
471 	return ret;
472 }
473 
474 /* If invalidate_buffers() will trash dirty buffers, it means some kind
475    of fs corruption is going on. Trashing dirty data always imply losing
476    information that was supposed to be just stored on the physical layer
477    by the user.
478 
479    Thus invalidate_buffers in general usage is not allwowed to trash
480    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
481    be preserved.  These buffers are simply skipped.
482 
483    We also skip buffers which are still in use.  For example this can
484    happen if a userspace program is reading the block device.
485 
486    NOTE: In the case where the user removed a removable-media-disk even if
487    there's still dirty data not synced on disk (due a bug in the device driver
488    or due an error of the user), by not destroying the dirty buffers we could
489    generate corruption also on the next media inserted, thus a parameter is
490    necessary to handle this case in the most safe way possible (trying
491    to not corrupt also the new disk inserted with the data belonging to
492    the old now corrupted disk). Also for the ramdisk the natural thing
493    to do in order to release the ramdisk memory is to destroy dirty buffers.
494 
495    These are two special cases. Normal usage imply the device driver
496    to issue a sync on the device (without waiting I/O completion) and
497    then an invalidate_buffers call that doesn't trash dirty buffers.
498 
499    For handling cache coherency with the blkdev pagecache the 'update' case
500    is been introduced. It is needed to re-read from disk any pinned
501    buffer. NOTE: re-reading from disk is destructive so we can do it only
502    when we assume nobody is changing the buffercache under our I/O and when
503    we think the disk contains more recent information than the buffercache.
504    The update == 1 pass marks the buffers we need to update, the update == 2
505    pass does the actual I/O. */
506 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
507 {
508 	invalidate_bh_lrus();
509 	/*
510 	 * FIXME: what about destroy_dirty_buffers?
511 	 * We really want to use invalidate_inode_pages2() for
512 	 * that, but not until that's cleaned up.
513 	 */
514 	invalidate_inode_pages(bdev->bd_inode->i_mapping);
515 }
516 
517 /*
518  * Kick pdflush then try to free up some ZONE_NORMAL memory.
519  */
520 static void free_more_memory(void)
521 {
522 	struct zone **zones;
523 	pg_data_t *pgdat;
524 
525 	wakeup_bdflush(1024);
526 	yield();
527 
528 	for_each_pgdat(pgdat) {
529 		zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
530 		if (*zones)
531 			try_to_free_pages(zones, GFP_NOFS, 0);
532 	}
533 }
534 
535 /*
536  * I/O completion handler for block_read_full_page() - pages
537  * which come unlocked at the end of I/O.
538  */
539 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
540 {
541 	static DEFINE_SPINLOCK(page_uptodate_lock);
542 	unsigned long flags;
543 	struct buffer_head *tmp;
544 	struct page *page;
545 	int page_uptodate = 1;
546 
547 	BUG_ON(!buffer_async_read(bh));
548 
549 	page = bh->b_page;
550 	if (uptodate) {
551 		set_buffer_uptodate(bh);
552 	} else {
553 		clear_buffer_uptodate(bh);
554 		if (printk_ratelimit())
555 			buffer_io_error(bh);
556 		SetPageError(page);
557 	}
558 
559 	/*
560 	 * Be _very_ careful from here on. Bad things can happen if
561 	 * two buffer heads end IO at almost the same time and both
562 	 * decide that the page is now completely done.
563 	 */
564 	spin_lock_irqsave(&page_uptodate_lock, flags);
565 	clear_buffer_async_read(bh);
566 	unlock_buffer(bh);
567 	tmp = bh;
568 	do {
569 		if (!buffer_uptodate(tmp))
570 			page_uptodate = 0;
571 		if (buffer_async_read(tmp)) {
572 			BUG_ON(!buffer_locked(tmp));
573 			goto still_busy;
574 		}
575 		tmp = tmp->b_this_page;
576 	} while (tmp != bh);
577 	spin_unlock_irqrestore(&page_uptodate_lock, flags);
578 
579 	/*
580 	 * If none of the buffers had errors and they are all
581 	 * uptodate then we can set the page uptodate.
582 	 */
583 	if (page_uptodate && !PageError(page))
584 		SetPageUptodate(page);
585 	unlock_page(page);
586 	return;
587 
588 still_busy:
589 	spin_unlock_irqrestore(&page_uptodate_lock, flags);
590 	return;
591 }
592 
593 /*
594  * Completion handler for block_write_full_page() - pages which are unlocked
595  * during I/O, and which have PageWriteback cleared upon I/O completion.
596  */
597 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
598 {
599 	char b[BDEVNAME_SIZE];
600 	static DEFINE_SPINLOCK(page_uptodate_lock);
601 	unsigned long flags;
602 	struct buffer_head *tmp;
603 	struct page *page;
604 
605 	BUG_ON(!buffer_async_write(bh));
606 
607 	page = bh->b_page;
608 	if (uptodate) {
609 		set_buffer_uptodate(bh);
610 	} else {
611 		if (printk_ratelimit()) {
612 			buffer_io_error(bh);
613 			printk(KERN_WARNING "lost page write due to "
614 					"I/O error on %s\n",
615 			       bdevname(bh->b_bdev, b));
616 		}
617 		set_bit(AS_EIO, &page->mapping->flags);
618 		clear_buffer_uptodate(bh);
619 		SetPageError(page);
620 	}
621 
622 	spin_lock_irqsave(&page_uptodate_lock, flags);
623 	clear_buffer_async_write(bh);
624 	unlock_buffer(bh);
625 	tmp = bh->b_this_page;
626 	while (tmp != bh) {
627 		if (buffer_async_write(tmp)) {
628 			BUG_ON(!buffer_locked(tmp));
629 			goto still_busy;
630 		}
631 		tmp = tmp->b_this_page;
632 	}
633 	spin_unlock_irqrestore(&page_uptodate_lock, flags);
634 	end_page_writeback(page);
635 	return;
636 
637 still_busy:
638 	spin_unlock_irqrestore(&page_uptodate_lock, flags);
639 	return;
640 }
641 
642 /*
643  * If a page's buffers are under async readin (end_buffer_async_read
644  * completion) then there is a possibility that another thread of
645  * control could lock one of the buffers after it has completed
646  * but while some of the other buffers have not completed.  This
647  * locked buffer would confuse end_buffer_async_read() into not unlocking
648  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
649  * that this buffer is not under async I/O.
650  *
651  * The page comes unlocked when it has no locked buffer_async buffers
652  * left.
653  *
654  * PageLocked prevents anyone starting new async I/O reads any of
655  * the buffers.
656  *
657  * PageWriteback is used to prevent simultaneous writeout of the same
658  * page.
659  *
660  * PageLocked prevents anyone from starting writeback of a page which is
661  * under read I/O (PageWriteback is only ever set against a locked page).
662  */
663 static void mark_buffer_async_read(struct buffer_head *bh)
664 {
665 	bh->b_end_io = end_buffer_async_read;
666 	set_buffer_async_read(bh);
667 }
668 
669 void mark_buffer_async_write(struct buffer_head *bh)
670 {
671 	bh->b_end_io = end_buffer_async_write;
672 	set_buffer_async_write(bh);
673 }
674 EXPORT_SYMBOL(mark_buffer_async_write);
675 
676 
677 /*
678  * fs/buffer.c contains helper functions for buffer-backed address space's
679  * fsync functions.  A common requirement for buffer-based filesystems is
680  * that certain data from the backing blockdev needs to be written out for
681  * a successful fsync().  For example, ext2 indirect blocks need to be
682  * written back and waited upon before fsync() returns.
683  *
684  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
685  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
686  * management of a list of dependent buffers at ->i_mapping->private_list.
687  *
688  * Locking is a little subtle: try_to_free_buffers() will remove buffers
689  * from their controlling inode's queue when they are being freed.  But
690  * try_to_free_buffers() will be operating against the *blockdev* mapping
691  * at the time, not against the S_ISREG file which depends on those buffers.
692  * So the locking for private_list is via the private_lock in the address_space
693  * which backs the buffers.  Which is different from the address_space
694  * against which the buffers are listed.  So for a particular address_space,
695  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
696  * mapping->private_list will always be protected by the backing blockdev's
697  * ->private_lock.
698  *
699  * Which introduces a requirement: all buffers on an address_space's
700  * ->private_list must be from the same address_space: the blockdev's.
701  *
702  * address_spaces which do not place buffers at ->private_list via these
703  * utility functions are free to use private_lock and private_list for
704  * whatever they want.  The only requirement is that list_empty(private_list)
705  * be true at clear_inode() time.
706  *
707  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
708  * filesystems should do that.  invalidate_inode_buffers() should just go
709  * BUG_ON(!list_empty).
710  *
711  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
712  * take an address_space, not an inode.  And it should be called
713  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
714  * queued up.
715  *
716  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
717  * list if it is already on a list.  Because if the buffer is on a list,
718  * it *must* already be on the right one.  If not, the filesystem is being
719  * silly.  This will save a ton of locking.  But first we have to ensure
720  * that buffers are taken *off* the old inode's list when they are freed
721  * (presumably in truncate).  That requires careful auditing of all
722  * filesystems (do it inside bforget()).  It could also be done by bringing
723  * b_inode back.
724  */
725 
726 /*
727  * The buffer's backing address_space's private_lock must be held
728  */
729 static inline void __remove_assoc_queue(struct buffer_head *bh)
730 {
731 	list_del_init(&bh->b_assoc_buffers);
732 }
733 
734 int inode_has_buffers(struct inode *inode)
735 {
736 	return !list_empty(&inode->i_data.private_list);
737 }
738 
739 /*
740  * osync is designed to support O_SYNC io.  It waits synchronously for
741  * all already-submitted IO to complete, but does not queue any new
742  * writes to the disk.
743  *
744  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
745  * you dirty the buffers, and then use osync_inode_buffers to wait for
746  * completion.  Any other dirty buffers which are not yet queued for
747  * write will not be flushed to disk by the osync.
748  */
749 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
750 {
751 	struct buffer_head *bh;
752 	struct list_head *p;
753 	int err = 0;
754 
755 	spin_lock(lock);
756 repeat:
757 	list_for_each_prev(p, list) {
758 		bh = BH_ENTRY(p);
759 		if (buffer_locked(bh)) {
760 			get_bh(bh);
761 			spin_unlock(lock);
762 			wait_on_buffer(bh);
763 			if (!buffer_uptodate(bh))
764 				err = -EIO;
765 			brelse(bh);
766 			spin_lock(lock);
767 			goto repeat;
768 		}
769 	}
770 	spin_unlock(lock);
771 	return err;
772 }
773 
774 /**
775  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
776  *                        buffers
777  * @mapping: the mapping which wants those buffers written
778  *
779  * Starts I/O against the buffers at mapping->private_list, and waits upon
780  * that I/O.
781  *
782  * Basically, this is a convenience function for fsync().
783  * @mapping is a file or directory which needs those buffers to be written for
784  * a successful fsync().
785  */
786 int sync_mapping_buffers(struct address_space *mapping)
787 {
788 	struct address_space *buffer_mapping = mapping->assoc_mapping;
789 
790 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
791 		return 0;
792 
793 	return fsync_buffers_list(&buffer_mapping->private_lock,
794 					&mapping->private_list);
795 }
796 EXPORT_SYMBOL(sync_mapping_buffers);
797 
798 /*
799  * Called when we've recently written block `bblock', and it is known that
800  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
801  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
802  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
803  */
804 void write_boundary_block(struct block_device *bdev,
805 			sector_t bblock, unsigned blocksize)
806 {
807 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
808 	if (bh) {
809 		if (buffer_dirty(bh))
810 			ll_rw_block(WRITE, 1, &bh);
811 		put_bh(bh);
812 	}
813 }
814 
815 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
816 {
817 	struct address_space *mapping = inode->i_mapping;
818 	struct address_space *buffer_mapping = bh->b_page->mapping;
819 
820 	mark_buffer_dirty(bh);
821 	if (!mapping->assoc_mapping) {
822 		mapping->assoc_mapping = buffer_mapping;
823 	} else {
824 		if (mapping->assoc_mapping != buffer_mapping)
825 			BUG();
826 	}
827 	if (list_empty(&bh->b_assoc_buffers)) {
828 		spin_lock(&buffer_mapping->private_lock);
829 		list_move_tail(&bh->b_assoc_buffers,
830 				&mapping->private_list);
831 		spin_unlock(&buffer_mapping->private_lock);
832 	}
833 }
834 EXPORT_SYMBOL(mark_buffer_dirty_inode);
835 
836 /*
837  * Add a page to the dirty page list.
838  *
839  * It is a sad fact of life that this function is called from several places
840  * deeply under spinlocking.  It may not sleep.
841  *
842  * If the page has buffers, the uptodate buffers are set dirty, to preserve
843  * dirty-state coherency between the page and the buffers.  It the page does
844  * not have buffers then when they are later attached they will all be set
845  * dirty.
846  *
847  * The buffers are dirtied before the page is dirtied.  There's a small race
848  * window in which a writepage caller may see the page cleanness but not the
849  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
850  * before the buffers, a concurrent writepage caller could clear the page dirty
851  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
852  * page on the dirty page list.
853  *
854  * We use private_lock to lock against try_to_free_buffers while using the
855  * page's buffer list.  Also use this to protect against clean buffers being
856  * added to the page after it was set dirty.
857  *
858  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
859  * address_space though.
860  */
861 int __set_page_dirty_buffers(struct page *page)
862 {
863 	struct address_space * const mapping = page->mapping;
864 
865 	spin_lock(&mapping->private_lock);
866 	if (page_has_buffers(page)) {
867 		struct buffer_head *head = page_buffers(page);
868 		struct buffer_head *bh = head;
869 
870 		do {
871 			set_buffer_dirty(bh);
872 			bh = bh->b_this_page;
873 		} while (bh != head);
874 	}
875 	spin_unlock(&mapping->private_lock);
876 
877 	if (!TestSetPageDirty(page)) {
878 		write_lock_irq(&mapping->tree_lock);
879 		if (page->mapping) {	/* Race with truncate? */
880 			if (mapping_cap_account_dirty(mapping))
881 				inc_page_state(nr_dirty);
882 			radix_tree_tag_set(&mapping->page_tree,
883 						page_index(page),
884 						PAGECACHE_TAG_DIRTY);
885 		}
886 		write_unlock_irq(&mapping->tree_lock);
887 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
888 	}
889 
890 	return 0;
891 }
892 EXPORT_SYMBOL(__set_page_dirty_buffers);
893 
894 /*
895  * Write out and wait upon a list of buffers.
896  *
897  * We have conflicting pressures: we want to make sure that all
898  * initially dirty buffers get waited on, but that any subsequently
899  * dirtied buffers don't.  After all, we don't want fsync to last
900  * forever if somebody is actively writing to the file.
901  *
902  * Do this in two main stages: first we copy dirty buffers to a
903  * temporary inode list, queueing the writes as we go.  Then we clean
904  * up, waiting for those writes to complete.
905  *
906  * During this second stage, any subsequent updates to the file may end
907  * up refiling the buffer on the original inode's dirty list again, so
908  * there is a chance we will end up with a buffer queued for write but
909  * not yet completed on that list.  So, as a final cleanup we go through
910  * the osync code to catch these locked, dirty buffers without requeuing
911  * any newly dirty buffers for write.
912  */
913 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
914 {
915 	struct buffer_head *bh;
916 	struct list_head tmp;
917 	int err = 0, err2;
918 
919 	INIT_LIST_HEAD(&tmp);
920 
921 	spin_lock(lock);
922 	while (!list_empty(list)) {
923 		bh = BH_ENTRY(list->next);
924 		list_del_init(&bh->b_assoc_buffers);
925 		if (buffer_dirty(bh) || buffer_locked(bh)) {
926 			list_add(&bh->b_assoc_buffers, &tmp);
927 			if (buffer_dirty(bh)) {
928 				get_bh(bh);
929 				spin_unlock(lock);
930 				/*
931 				 * Ensure any pending I/O completes so that
932 				 * ll_rw_block() actually writes the current
933 				 * contents - it is a noop if I/O is still in
934 				 * flight on potentially older contents.
935 				 */
936 				wait_on_buffer(bh);
937 				ll_rw_block(WRITE, 1, &bh);
938 				brelse(bh);
939 				spin_lock(lock);
940 			}
941 		}
942 	}
943 
944 	while (!list_empty(&tmp)) {
945 		bh = BH_ENTRY(tmp.prev);
946 		__remove_assoc_queue(bh);
947 		get_bh(bh);
948 		spin_unlock(lock);
949 		wait_on_buffer(bh);
950 		if (!buffer_uptodate(bh))
951 			err = -EIO;
952 		brelse(bh);
953 		spin_lock(lock);
954 	}
955 
956 	spin_unlock(lock);
957 	err2 = osync_buffers_list(lock, list);
958 	if (err)
959 		return err;
960 	else
961 		return err2;
962 }
963 
964 /*
965  * Invalidate any and all dirty buffers on a given inode.  We are
966  * probably unmounting the fs, but that doesn't mean we have already
967  * done a sync().  Just drop the buffers from the inode list.
968  *
969  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
970  * assumes that all the buffers are against the blockdev.  Not true
971  * for reiserfs.
972  */
973 void invalidate_inode_buffers(struct inode *inode)
974 {
975 	if (inode_has_buffers(inode)) {
976 		struct address_space *mapping = &inode->i_data;
977 		struct list_head *list = &mapping->private_list;
978 		struct address_space *buffer_mapping = mapping->assoc_mapping;
979 
980 		spin_lock(&buffer_mapping->private_lock);
981 		while (!list_empty(list))
982 			__remove_assoc_queue(BH_ENTRY(list->next));
983 		spin_unlock(&buffer_mapping->private_lock);
984 	}
985 }
986 
987 /*
988  * Remove any clean buffers from the inode's buffer list.  This is called
989  * when we're trying to free the inode itself.  Those buffers can pin it.
990  *
991  * Returns true if all buffers were removed.
992  */
993 int remove_inode_buffers(struct inode *inode)
994 {
995 	int ret = 1;
996 
997 	if (inode_has_buffers(inode)) {
998 		struct address_space *mapping = &inode->i_data;
999 		struct list_head *list = &mapping->private_list;
1000 		struct address_space *buffer_mapping = mapping->assoc_mapping;
1001 
1002 		spin_lock(&buffer_mapping->private_lock);
1003 		while (!list_empty(list)) {
1004 			struct buffer_head *bh = BH_ENTRY(list->next);
1005 			if (buffer_dirty(bh)) {
1006 				ret = 0;
1007 				break;
1008 			}
1009 			__remove_assoc_queue(bh);
1010 		}
1011 		spin_unlock(&buffer_mapping->private_lock);
1012 	}
1013 	return ret;
1014 }
1015 
1016 /*
1017  * Create the appropriate buffers when given a page for data area and
1018  * the size of each buffer.. Use the bh->b_this_page linked list to
1019  * follow the buffers created.  Return NULL if unable to create more
1020  * buffers.
1021  *
1022  * The retry flag is used to differentiate async IO (paging, swapping)
1023  * which may not fail from ordinary buffer allocations.
1024  */
1025 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1026 		int retry)
1027 {
1028 	struct buffer_head *bh, *head;
1029 	long offset;
1030 
1031 try_again:
1032 	head = NULL;
1033 	offset = PAGE_SIZE;
1034 	while ((offset -= size) >= 0) {
1035 		bh = alloc_buffer_head(GFP_NOFS);
1036 		if (!bh)
1037 			goto no_grow;
1038 
1039 		bh->b_bdev = NULL;
1040 		bh->b_this_page = head;
1041 		bh->b_blocknr = -1;
1042 		head = bh;
1043 
1044 		bh->b_state = 0;
1045 		atomic_set(&bh->b_count, 0);
1046 		bh->b_size = size;
1047 
1048 		/* Link the buffer to its page */
1049 		set_bh_page(bh, page, offset);
1050 
1051 		bh->b_end_io = NULL;
1052 	}
1053 	return head;
1054 /*
1055  * In case anything failed, we just free everything we got.
1056  */
1057 no_grow:
1058 	if (head) {
1059 		do {
1060 			bh = head;
1061 			head = head->b_this_page;
1062 			free_buffer_head(bh);
1063 		} while (head);
1064 	}
1065 
1066 	/*
1067 	 * Return failure for non-async IO requests.  Async IO requests
1068 	 * are not allowed to fail, so we have to wait until buffer heads
1069 	 * become available.  But we don't want tasks sleeping with
1070 	 * partially complete buffers, so all were released above.
1071 	 */
1072 	if (!retry)
1073 		return NULL;
1074 
1075 	/* We're _really_ low on memory. Now we just
1076 	 * wait for old buffer heads to become free due to
1077 	 * finishing IO.  Since this is an async request and
1078 	 * the reserve list is empty, we're sure there are
1079 	 * async buffer heads in use.
1080 	 */
1081 	free_more_memory();
1082 	goto try_again;
1083 }
1084 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1085 
1086 static inline void
1087 link_dev_buffers(struct page *page, struct buffer_head *head)
1088 {
1089 	struct buffer_head *bh, *tail;
1090 
1091 	bh = head;
1092 	do {
1093 		tail = bh;
1094 		bh = bh->b_this_page;
1095 	} while (bh);
1096 	tail->b_this_page = head;
1097 	attach_page_buffers(page, head);
1098 }
1099 
1100 /*
1101  * Initialise the state of a blockdev page's buffers.
1102  */
1103 static void
1104 init_page_buffers(struct page *page, struct block_device *bdev,
1105 			sector_t block, int size)
1106 {
1107 	struct buffer_head *head = page_buffers(page);
1108 	struct buffer_head *bh = head;
1109 	int uptodate = PageUptodate(page);
1110 
1111 	do {
1112 		if (!buffer_mapped(bh)) {
1113 			init_buffer(bh, NULL, NULL);
1114 			bh->b_bdev = bdev;
1115 			bh->b_blocknr = block;
1116 			if (uptodate)
1117 				set_buffer_uptodate(bh);
1118 			set_buffer_mapped(bh);
1119 		}
1120 		block++;
1121 		bh = bh->b_this_page;
1122 	} while (bh != head);
1123 }
1124 
1125 /*
1126  * Create the page-cache page that contains the requested block.
1127  *
1128  * This is user purely for blockdev mappings.
1129  */
1130 static struct page *
1131 grow_dev_page(struct block_device *bdev, sector_t block,
1132 		pgoff_t index, int size)
1133 {
1134 	struct inode *inode = bdev->bd_inode;
1135 	struct page *page;
1136 	struct buffer_head *bh;
1137 
1138 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1139 	if (!page)
1140 		return NULL;
1141 
1142 	if (!PageLocked(page))
1143 		BUG();
1144 
1145 	if (page_has_buffers(page)) {
1146 		bh = page_buffers(page);
1147 		if (bh->b_size == size) {
1148 			init_page_buffers(page, bdev, block, size);
1149 			return page;
1150 		}
1151 		if (!try_to_free_buffers(page))
1152 			goto failed;
1153 	}
1154 
1155 	/*
1156 	 * Allocate some buffers for this page
1157 	 */
1158 	bh = alloc_page_buffers(page, size, 0);
1159 	if (!bh)
1160 		goto failed;
1161 
1162 	/*
1163 	 * Link the page to the buffers and initialise them.  Take the
1164 	 * lock to be atomic wrt __find_get_block(), which does not
1165 	 * run under the page lock.
1166 	 */
1167 	spin_lock(&inode->i_mapping->private_lock);
1168 	link_dev_buffers(page, bh);
1169 	init_page_buffers(page, bdev, block, size);
1170 	spin_unlock(&inode->i_mapping->private_lock);
1171 	return page;
1172 
1173 failed:
1174 	BUG();
1175 	unlock_page(page);
1176 	page_cache_release(page);
1177 	return NULL;
1178 }
1179 
1180 /*
1181  * Create buffers for the specified block device block's page.  If
1182  * that page was dirty, the buffers are set dirty also.
1183  *
1184  * Except that's a bug.  Attaching dirty buffers to a dirty
1185  * blockdev's page can result in filesystem corruption, because
1186  * some of those buffers may be aliases of filesystem data.
1187  * grow_dev_page() will go BUG() if this happens.
1188  */
1189 static inline int
1190 grow_buffers(struct block_device *bdev, sector_t block, int size)
1191 {
1192 	struct page *page;
1193 	pgoff_t index;
1194 	int sizebits;
1195 
1196 	sizebits = -1;
1197 	do {
1198 		sizebits++;
1199 	} while ((size << sizebits) < PAGE_SIZE);
1200 
1201 	index = block >> sizebits;
1202 	block = index << sizebits;
1203 
1204 	/* Create a page with the proper size buffers.. */
1205 	page = grow_dev_page(bdev, block, index, size);
1206 	if (!page)
1207 		return 0;
1208 	unlock_page(page);
1209 	page_cache_release(page);
1210 	return 1;
1211 }
1212 
1213 struct buffer_head *
1214 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1215 {
1216 	/* Size must be multiple of hard sectorsize */
1217 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1218 			(size < 512 || size > PAGE_SIZE))) {
1219 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1220 					size);
1221 		printk(KERN_ERR "hardsect size: %d\n",
1222 					bdev_hardsect_size(bdev));
1223 
1224 		dump_stack();
1225 		return NULL;
1226 	}
1227 
1228 	for (;;) {
1229 		struct buffer_head * bh;
1230 
1231 		bh = __find_get_block(bdev, block, size);
1232 		if (bh)
1233 			return bh;
1234 
1235 		if (!grow_buffers(bdev, block, size))
1236 			free_more_memory();
1237 	}
1238 }
1239 
1240 /*
1241  * The relationship between dirty buffers and dirty pages:
1242  *
1243  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1244  * the page is tagged dirty in its radix tree.
1245  *
1246  * At all times, the dirtiness of the buffers represents the dirtiness of
1247  * subsections of the page.  If the page has buffers, the page dirty bit is
1248  * merely a hint about the true dirty state.
1249  *
1250  * When a page is set dirty in its entirety, all its buffers are marked dirty
1251  * (if the page has buffers).
1252  *
1253  * When a buffer is marked dirty, its page is dirtied, but the page's other
1254  * buffers are not.
1255  *
1256  * Also.  When blockdev buffers are explicitly read with bread(), they
1257  * individually become uptodate.  But their backing page remains not
1258  * uptodate - even if all of its buffers are uptodate.  A subsequent
1259  * block_read_full_page() against that page will discover all the uptodate
1260  * buffers, will set the page uptodate and will perform no I/O.
1261  */
1262 
1263 /**
1264  * mark_buffer_dirty - mark a buffer_head as needing writeout
1265  * @bh: the buffer_head to mark dirty
1266  *
1267  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1268  * backing page dirty, then tag the page as dirty in its address_space's radix
1269  * tree and then attach the address_space's inode to its superblock's dirty
1270  * inode list.
1271  *
1272  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1273  * mapping->tree_lock and the global inode_lock.
1274  */
1275 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1276 {
1277 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1278 		__set_page_dirty_nobuffers(bh->b_page);
1279 }
1280 
1281 /*
1282  * Decrement a buffer_head's reference count.  If all buffers against a page
1283  * have zero reference count, are clean and unlocked, and if the page is clean
1284  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1285  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1286  * a page but it ends up not being freed, and buffers may later be reattached).
1287  */
1288 void __brelse(struct buffer_head * buf)
1289 {
1290 	if (atomic_read(&buf->b_count)) {
1291 		put_bh(buf);
1292 		return;
1293 	}
1294 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1295 	WARN_ON(1);
1296 }
1297 
1298 /*
1299  * bforget() is like brelse(), except it discards any
1300  * potentially dirty data.
1301  */
1302 void __bforget(struct buffer_head *bh)
1303 {
1304 	clear_buffer_dirty(bh);
1305 	if (!list_empty(&bh->b_assoc_buffers)) {
1306 		struct address_space *buffer_mapping = bh->b_page->mapping;
1307 
1308 		spin_lock(&buffer_mapping->private_lock);
1309 		list_del_init(&bh->b_assoc_buffers);
1310 		spin_unlock(&buffer_mapping->private_lock);
1311 	}
1312 	__brelse(bh);
1313 }
1314 
1315 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1316 {
1317 	lock_buffer(bh);
1318 	if (buffer_uptodate(bh)) {
1319 		unlock_buffer(bh);
1320 		return bh;
1321 	} else {
1322 		get_bh(bh);
1323 		bh->b_end_io = end_buffer_read_sync;
1324 		submit_bh(READ, bh);
1325 		wait_on_buffer(bh);
1326 		if (buffer_uptodate(bh))
1327 			return bh;
1328 	}
1329 	brelse(bh);
1330 	return NULL;
1331 }
1332 
1333 /*
1334  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1335  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1336  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1337  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1338  * CPU's LRUs at the same time.
1339  *
1340  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1341  * sb_find_get_block().
1342  *
1343  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1344  * a local interrupt disable for that.
1345  */
1346 
1347 #define BH_LRU_SIZE	8
1348 
1349 struct bh_lru {
1350 	struct buffer_head *bhs[BH_LRU_SIZE];
1351 };
1352 
1353 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1354 
1355 #ifdef CONFIG_SMP
1356 #define bh_lru_lock()	local_irq_disable()
1357 #define bh_lru_unlock()	local_irq_enable()
1358 #else
1359 #define bh_lru_lock()	preempt_disable()
1360 #define bh_lru_unlock()	preempt_enable()
1361 #endif
1362 
1363 static inline void check_irqs_on(void)
1364 {
1365 #ifdef irqs_disabled
1366 	BUG_ON(irqs_disabled());
1367 #endif
1368 }
1369 
1370 /*
1371  * The LRU management algorithm is dopey-but-simple.  Sorry.
1372  */
1373 static void bh_lru_install(struct buffer_head *bh)
1374 {
1375 	struct buffer_head *evictee = NULL;
1376 	struct bh_lru *lru;
1377 
1378 	check_irqs_on();
1379 	bh_lru_lock();
1380 	lru = &__get_cpu_var(bh_lrus);
1381 	if (lru->bhs[0] != bh) {
1382 		struct buffer_head *bhs[BH_LRU_SIZE];
1383 		int in;
1384 		int out = 0;
1385 
1386 		get_bh(bh);
1387 		bhs[out++] = bh;
1388 		for (in = 0; in < BH_LRU_SIZE; in++) {
1389 			struct buffer_head *bh2 = lru->bhs[in];
1390 
1391 			if (bh2 == bh) {
1392 				__brelse(bh2);
1393 			} else {
1394 				if (out >= BH_LRU_SIZE) {
1395 					BUG_ON(evictee != NULL);
1396 					evictee = bh2;
1397 				} else {
1398 					bhs[out++] = bh2;
1399 				}
1400 			}
1401 		}
1402 		while (out < BH_LRU_SIZE)
1403 			bhs[out++] = NULL;
1404 		memcpy(lru->bhs, bhs, sizeof(bhs));
1405 	}
1406 	bh_lru_unlock();
1407 
1408 	if (evictee)
1409 		__brelse(evictee);
1410 }
1411 
1412 /*
1413  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1414  */
1415 static inline struct buffer_head *
1416 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1417 {
1418 	struct buffer_head *ret = NULL;
1419 	struct bh_lru *lru;
1420 	int i;
1421 
1422 	check_irqs_on();
1423 	bh_lru_lock();
1424 	lru = &__get_cpu_var(bh_lrus);
1425 	for (i = 0; i < BH_LRU_SIZE; i++) {
1426 		struct buffer_head *bh = lru->bhs[i];
1427 
1428 		if (bh && bh->b_bdev == bdev &&
1429 				bh->b_blocknr == block && bh->b_size == size) {
1430 			if (i) {
1431 				while (i) {
1432 					lru->bhs[i] = lru->bhs[i - 1];
1433 					i--;
1434 				}
1435 				lru->bhs[0] = bh;
1436 			}
1437 			get_bh(bh);
1438 			ret = bh;
1439 			break;
1440 		}
1441 	}
1442 	bh_lru_unlock();
1443 	return ret;
1444 }
1445 
1446 /*
1447  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1448  * it in the LRU and mark it as accessed.  If it is not present then return
1449  * NULL
1450  */
1451 struct buffer_head *
1452 __find_get_block(struct block_device *bdev, sector_t block, int size)
1453 {
1454 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1455 
1456 	if (bh == NULL) {
1457 		bh = __find_get_block_slow(bdev, block, size);
1458 		if (bh)
1459 			bh_lru_install(bh);
1460 	}
1461 	if (bh)
1462 		touch_buffer(bh);
1463 	return bh;
1464 }
1465 EXPORT_SYMBOL(__find_get_block);
1466 
1467 /*
1468  * __getblk will locate (and, if necessary, create) the buffer_head
1469  * which corresponds to the passed block_device, block and size. The
1470  * returned buffer has its reference count incremented.
1471  *
1472  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1473  * illegal block number, __getblk() will happily return a buffer_head
1474  * which represents the non-existent block.  Very weird.
1475  *
1476  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1477  * attempt is failing.  FIXME, perhaps?
1478  */
1479 struct buffer_head *
1480 __getblk(struct block_device *bdev, sector_t block, int size)
1481 {
1482 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1483 
1484 	might_sleep();
1485 	if (bh == NULL)
1486 		bh = __getblk_slow(bdev, block, size);
1487 	return bh;
1488 }
1489 EXPORT_SYMBOL(__getblk);
1490 
1491 /*
1492  * Do async read-ahead on a buffer..
1493  */
1494 void __breadahead(struct block_device *bdev, sector_t block, int size)
1495 {
1496 	struct buffer_head *bh = __getblk(bdev, block, size);
1497 	ll_rw_block(READA, 1, &bh);
1498 	brelse(bh);
1499 }
1500 EXPORT_SYMBOL(__breadahead);
1501 
1502 /**
1503  *  __bread() - reads a specified block and returns the bh
1504  *  @bdev: the block_device to read from
1505  *  @block: number of block
1506  *  @size: size (in bytes) to read
1507  *
1508  *  Reads a specified block, and returns buffer head that contains it.
1509  *  It returns NULL if the block was unreadable.
1510  */
1511 struct buffer_head *
1512 __bread(struct block_device *bdev, sector_t block, int size)
1513 {
1514 	struct buffer_head *bh = __getblk(bdev, block, size);
1515 
1516 	if (!buffer_uptodate(bh))
1517 		bh = __bread_slow(bh);
1518 	return bh;
1519 }
1520 EXPORT_SYMBOL(__bread);
1521 
1522 /*
1523  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1524  * This doesn't race because it runs in each cpu either in irq
1525  * or with preempt disabled.
1526  */
1527 static void invalidate_bh_lru(void *arg)
1528 {
1529 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1530 	int i;
1531 
1532 	for (i = 0; i < BH_LRU_SIZE; i++) {
1533 		brelse(b->bhs[i]);
1534 		b->bhs[i] = NULL;
1535 	}
1536 	put_cpu_var(bh_lrus);
1537 }
1538 
1539 static void invalidate_bh_lrus(void)
1540 {
1541 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1542 }
1543 
1544 void set_bh_page(struct buffer_head *bh,
1545 		struct page *page, unsigned long offset)
1546 {
1547 	bh->b_page = page;
1548 	if (offset >= PAGE_SIZE)
1549 		BUG();
1550 	if (PageHighMem(page))
1551 		/*
1552 		 * This catches illegal uses and preserves the offset:
1553 		 */
1554 		bh->b_data = (char *)(0 + offset);
1555 	else
1556 		bh->b_data = page_address(page) + offset;
1557 }
1558 EXPORT_SYMBOL(set_bh_page);
1559 
1560 /*
1561  * Called when truncating a buffer on a page completely.
1562  */
1563 static inline void discard_buffer(struct buffer_head * bh)
1564 {
1565 	lock_buffer(bh);
1566 	clear_buffer_dirty(bh);
1567 	bh->b_bdev = NULL;
1568 	clear_buffer_mapped(bh);
1569 	clear_buffer_req(bh);
1570 	clear_buffer_new(bh);
1571 	clear_buffer_delay(bh);
1572 	unlock_buffer(bh);
1573 }
1574 
1575 /**
1576  * try_to_release_page() - release old fs-specific metadata on a page
1577  *
1578  * @page: the page which the kernel is trying to free
1579  * @gfp_mask: memory allocation flags (and I/O mode)
1580  *
1581  * The address_space is to try to release any data against the page
1582  * (presumably at page->private).  If the release was successful, return `1'.
1583  * Otherwise return zero.
1584  *
1585  * The @gfp_mask argument specifies whether I/O may be performed to release
1586  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1587  *
1588  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1589  */
1590 int try_to_release_page(struct page *page, int gfp_mask)
1591 {
1592 	struct address_space * const mapping = page->mapping;
1593 
1594 	BUG_ON(!PageLocked(page));
1595 	if (PageWriteback(page))
1596 		return 0;
1597 
1598 	if (mapping && mapping->a_ops->releasepage)
1599 		return mapping->a_ops->releasepage(page, gfp_mask);
1600 	return try_to_free_buffers(page);
1601 }
1602 EXPORT_SYMBOL(try_to_release_page);
1603 
1604 /**
1605  * block_invalidatepage - invalidate part of all of a buffer-backed page
1606  *
1607  * @page: the page which is affected
1608  * @offset: the index of the truncation point
1609  *
1610  * block_invalidatepage() is called when all or part of the page has become
1611  * invalidatedby a truncate operation.
1612  *
1613  * block_invalidatepage() does not have to release all buffers, but it must
1614  * ensure that no dirty buffer is left outside @offset and that no I/O
1615  * is underway against any of the blocks which are outside the truncation
1616  * point.  Because the caller is about to free (and possibly reuse) those
1617  * blocks on-disk.
1618  */
1619 int block_invalidatepage(struct page *page, unsigned long offset)
1620 {
1621 	struct buffer_head *head, *bh, *next;
1622 	unsigned int curr_off = 0;
1623 	int ret = 1;
1624 
1625 	BUG_ON(!PageLocked(page));
1626 	if (!page_has_buffers(page))
1627 		goto out;
1628 
1629 	head = page_buffers(page);
1630 	bh = head;
1631 	do {
1632 		unsigned int next_off = curr_off + bh->b_size;
1633 		next = bh->b_this_page;
1634 
1635 		/*
1636 		 * is this block fully invalidated?
1637 		 */
1638 		if (offset <= curr_off)
1639 			discard_buffer(bh);
1640 		curr_off = next_off;
1641 		bh = next;
1642 	} while (bh != head);
1643 
1644 	/*
1645 	 * We release buffers only if the entire page is being invalidated.
1646 	 * The get_block cached value has been unconditionally invalidated,
1647 	 * so real IO is not possible anymore.
1648 	 */
1649 	if (offset == 0)
1650 		ret = try_to_release_page(page, 0);
1651 out:
1652 	return ret;
1653 }
1654 EXPORT_SYMBOL(block_invalidatepage);
1655 
1656 /*
1657  * We attach and possibly dirty the buffers atomically wrt
1658  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1659  * is already excluded via the page lock.
1660  */
1661 void create_empty_buffers(struct page *page,
1662 			unsigned long blocksize, unsigned long b_state)
1663 {
1664 	struct buffer_head *bh, *head, *tail;
1665 
1666 	head = alloc_page_buffers(page, blocksize, 1);
1667 	bh = head;
1668 	do {
1669 		bh->b_state |= b_state;
1670 		tail = bh;
1671 		bh = bh->b_this_page;
1672 	} while (bh);
1673 	tail->b_this_page = head;
1674 
1675 	spin_lock(&page->mapping->private_lock);
1676 	if (PageUptodate(page) || PageDirty(page)) {
1677 		bh = head;
1678 		do {
1679 			if (PageDirty(page))
1680 				set_buffer_dirty(bh);
1681 			if (PageUptodate(page))
1682 				set_buffer_uptodate(bh);
1683 			bh = bh->b_this_page;
1684 		} while (bh != head);
1685 	}
1686 	attach_page_buffers(page, head);
1687 	spin_unlock(&page->mapping->private_lock);
1688 }
1689 EXPORT_SYMBOL(create_empty_buffers);
1690 
1691 /*
1692  * We are taking a block for data and we don't want any output from any
1693  * buffer-cache aliases starting from return from that function and
1694  * until the moment when something will explicitly mark the buffer
1695  * dirty (hopefully that will not happen until we will free that block ;-)
1696  * We don't even need to mark it not-uptodate - nobody can expect
1697  * anything from a newly allocated buffer anyway. We used to used
1698  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1699  * don't want to mark the alias unmapped, for example - it would confuse
1700  * anyone who might pick it with bread() afterwards...
1701  *
1702  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1703  * be writeout I/O going on against recently-freed buffers.  We don't
1704  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1705  * only if we really need to.  That happens here.
1706  */
1707 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1708 {
1709 	struct buffer_head *old_bh;
1710 
1711 	might_sleep();
1712 
1713 	old_bh = __find_get_block_slow(bdev, block, 0);
1714 	if (old_bh) {
1715 		clear_buffer_dirty(old_bh);
1716 		wait_on_buffer(old_bh);
1717 		clear_buffer_req(old_bh);
1718 		__brelse(old_bh);
1719 	}
1720 }
1721 EXPORT_SYMBOL(unmap_underlying_metadata);
1722 
1723 /*
1724  * NOTE! All mapped/uptodate combinations are valid:
1725  *
1726  *	Mapped	Uptodate	Meaning
1727  *
1728  *	No	No		"unknown" - must do get_block()
1729  *	No	Yes		"hole" - zero-filled
1730  *	Yes	No		"allocated" - allocated on disk, not read in
1731  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1732  *
1733  * "Dirty" is valid only with the last case (mapped+uptodate).
1734  */
1735 
1736 /*
1737  * While block_write_full_page is writing back the dirty buffers under
1738  * the page lock, whoever dirtied the buffers may decide to clean them
1739  * again at any time.  We handle that by only looking at the buffer
1740  * state inside lock_buffer().
1741  *
1742  * If block_write_full_page() is called for regular writeback
1743  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1744  * locked buffer.   This only can happen if someone has written the buffer
1745  * directly, with submit_bh().  At the address_space level PageWriteback
1746  * prevents this contention from occurring.
1747  */
1748 static int __block_write_full_page(struct inode *inode, struct page *page,
1749 			get_block_t *get_block, struct writeback_control *wbc)
1750 {
1751 	int err;
1752 	sector_t block;
1753 	sector_t last_block;
1754 	struct buffer_head *bh, *head;
1755 	int nr_underway = 0;
1756 
1757 	BUG_ON(!PageLocked(page));
1758 
1759 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1760 
1761 	if (!page_has_buffers(page)) {
1762 		create_empty_buffers(page, 1 << inode->i_blkbits,
1763 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1764 	}
1765 
1766 	/*
1767 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1768 	 * here, and the (potentially unmapped) buffers may become dirty at
1769 	 * any time.  If a buffer becomes dirty here after we've inspected it
1770 	 * then we just miss that fact, and the page stays dirty.
1771 	 *
1772 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1773 	 * handle that here by just cleaning them.
1774 	 */
1775 
1776 	block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1777 	head = page_buffers(page);
1778 	bh = head;
1779 
1780 	/*
1781 	 * Get all the dirty buffers mapped to disk addresses and
1782 	 * handle any aliases from the underlying blockdev's mapping.
1783 	 */
1784 	do {
1785 		if (block > last_block) {
1786 			/*
1787 			 * mapped buffers outside i_size will occur, because
1788 			 * this page can be outside i_size when there is a
1789 			 * truncate in progress.
1790 			 */
1791 			/*
1792 			 * The buffer was zeroed by block_write_full_page()
1793 			 */
1794 			clear_buffer_dirty(bh);
1795 			set_buffer_uptodate(bh);
1796 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1797 			err = get_block(inode, block, bh, 1);
1798 			if (err)
1799 				goto recover;
1800 			if (buffer_new(bh)) {
1801 				/* blockdev mappings never come here */
1802 				clear_buffer_new(bh);
1803 				unmap_underlying_metadata(bh->b_bdev,
1804 							bh->b_blocknr);
1805 			}
1806 		}
1807 		bh = bh->b_this_page;
1808 		block++;
1809 	} while (bh != head);
1810 
1811 	do {
1812 		get_bh(bh);
1813 		if (!buffer_mapped(bh))
1814 			continue;
1815 		/*
1816 		 * If it's a fully non-blocking write attempt and we cannot
1817 		 * lock the buffer then redirty the page.  Note that this can
1818 		 * potentially cause a busy-wait loop from pdflush and kswapd
1819 		 * activity, but those code paths have their own higher-level
1820 		 * throttling.
1821 		 */
1822 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1823 			lock_buffer(bh);
1824 		} else if (test_set_buffer_locked(bh)) {
1825 			redirty_page_for_writepage(wbc, page);
1826 			continue;
1827 		}
1828 		if (test_clear_buffer_dirty(bh)) {
1829 			mark_buffer_async_write(bh);
1830 		} else {
1831 			unlock_buffer(bh);
1832 		}
1833 	} while ((bh = bh->b_this_page) != head);
1834 
1835 	/*
1836 	 * The page and its buffers are protected by PageWriteback(), so we can
1837 	 * drop the bh refcounts early.
1838 	 */
1839 	BUG_ON(PageWriteback(page));
1840 	set_page_writeback(page);
1841 	unlock_page(page);
1842 
1843 	do {
1844 		struct buffer_head *next = bh->b_this_page;
1845 		if (buffer_async_write(bh)) {
1846 			submit_bh(WRITE, bh);
1847 			nr_underway++;
1848 		}
1849 		put_bh(bh);
1850 		bh = next;
1851 	} while (bh != head);
1852 
1853 	err = 0;
1854 done:
1855 	if (nr_underway == 0) {
1856 		/*
1857 		 * The page was marked dirty, but the buffers were
1858 		 * clean.  Someone wrote them back by hand with
1859 		 * ll_rw_block/submit_bh.  A rare case.
1860 		 */
1861 		int uptodate = 1;
1862 		do {
1863 			if (!buffer_uptodate(bh)) {
1864 				uptodate = 0;
1865 				break;
1866 			}
1867 			bh = bh->b_this_page;
1868 		} while (bh != head);
1869 		if (uptodate)
1870 			SetPageUptodate(page);
1871 		end_page_writeback(page);
1872 		/*
1873 		 * The page and buffer_heads can be released at any time from
1874 		 * here on.
1875 		 */
1876 		wbc->pages_skipped++;	/* We didn't write this page */
1877 	}
1878 	return err;
1879 
1880 recover:
1881 	/*
1882 	 * ENOSPC, or some other error.  We may already have added some
1883 	 * blocks to the file, so we need to write these out to avoid
1884 	 * exposing stale data.
1885 	 * The page is currently locked and not marked for writeback
1886 	 */
1887 	bh = head;
1888 	/* Recovery: lock and submit the mapped buffers */
1889 	do {
1890 		get_bh(bh);
1891 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1892 			lock_buffer(bh);
1893 			mark_buffer_async_write(bh);
1894 		} else {
1895 			/*
1896 			 * The buffer may have been set dirty during
1897 			 * attachment to a dirty page.
1898 			 */
1899 			clear_buffer_dirty(bh);
1900 		}
1901 	} while ((bh = bh->b_this_page) != head);
1902 	SetPageError(page);
1903 	BUG_ON(PageWriteback(page));
1904 	set_page_writeback(page);
1905 	unlock_page(page);
1906 	do {
1907 		struct buffer_head *next = bh->b_this_page;
1908 		if (buffer_async_write(bh)) {
1909 			clear_buffer_dirty(bh);
1910 			submit_bh(WRITE, bh);
1911 			nr_underway++;
1912 		}
1913 		put_bh(bh);
1914 		bh = next;
1915 	} while (bh != head);
1916 	goto done;
1917 }
1918 
1919 static int __block_prepare_write(struct inode *inode, struct page *page,
1920 		unsigned from, unsigned to, get_block_t *get_block)
1921 {
1922 	unsigned block_start, block_end;
1923 	sector_t block;
1924 	int err = 0;
1925 	unsigned blocksize, bbits;
1926 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1927 
1928 	BUG_ON(!PageLocked(page));
1929 	BUG_ON(from > PAGE_CACHE_SIZE);
1930 	BUG_ON(to > PAGE_CACHE_SIZE);
1931 	BUG_ON(from > to);
1932 
1933 	blocksize = 1 << inode->i_blkbits;
1934 	if (!page_has_buffers(page))
1935 		create_empty_buffers(page, blocksize, 0);
1936 	head = page_buffers(page);
1937 
1938 	bbits = inode->i_blkbits;
1939 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1940 
1941 	for(bh = head, block_start = 0; bh != head || !block_start;
1942 	    block++, block_start=block_end, bh = bh->b_this_page) {
1943 		block_end = block_start + blocksize;
1944 		if (block_end <= from || block_start >= to) {
1945 			if (PageUptodate(page)) {
1946 				if (!buffer_uptodate(bh))
1947 					set_buffer_uptodate(bh);
1948 			}
1949 			continue;
1950 		}
1951 		if (buffer_new(bh))
1952 			clear_buffer_new(bh);
1953 		if (!buffer_mapped(bh)) {
1954 			err = get_block(inode, block, bh, 1);
1955 			if (err)
1956 				goto out;
1957 			if (buffer_new(bh)) {
1958 				clear_buffer_new(bh);
1959 				unmap_underlying_metadata(bh->b_bdev,
1960 							bh->b_blocknr);
1961 				if (PageUptodate(page)) {
1962 					set_buffer_uptodate(bh);
1963 					continue;
1964 				}
1965 				if (block_end > to || block_start < from) {
1966 					void *kaddr;
1967 
1968 					kaddr = kmap_atomic(page, KM_USER0);
1969 					if (block_end > to)
1970 						memset(kaddr+to, 0,
1971 							block_end-to);
1972 					if (block_start < from)
1973 						memset(kaddr+block_start,
1974 							0, from-block_start);
1975 					flush_dcache_page(page);
1976 					kunmap_atomic(kaddr, KM_USER0);
1977 				}
1978 				continue;
1979 			}
1980 		}
1981 		if (PageUptodate(page)) {
1982 			if (!buffer_uptodate(bh))
1983 				set_buffer_uptodate(bh);
1984 			continue;
1985 		}
1986 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1987 		     (block_start < from || block_end > to)) {
1988 			ll_rw_block(READ, 1, &bh);
1989 			*wait_bh++=bh;
1990 		}
1991 	}
1992 	/*
1993 	 * If we issued read requests - let them complete.
1994 	 */
1995 	while(wait_bh > wait) {
1996 		wait_on_buffer(*--wait_bh);
1997 		if (!buffer_uptodate(*wait_bh))
1998 			return -EIO;
1999 	}
2000 	return 0;
2001 out:
2002 	/*
2003 	 * Zero out any newly allocated blocks to avoid exposing stale
2004 	 * data.  If BH_New is set, we know that the block was newly
2005 	 * allocated in the above loop.
2006 	 */
2007 	bh = head;
2008 	block_start = 0;
2009 	do {
2010 		block_end = block_start+blocksize;
2011 		if (block_end <= from)
2012 			goto next_bh;
2013 		if (block_start >= to)
2014 			break;
2015 		if (buffer_new(bh)) {
2016 			void *kaddr;
2017 
2018 			clear_buffer_new(bh);
2019 			kaddr = kmap_atomic(page, KM_USER0);
2020 			memset(kaddr+block_start, 0, bh->b_size);
2021 			kunmap_atomic(kaddr, KM_USER0);
2022 			set_buffer_uptodate(bh);
2023 			mark_buffer_dirty(bh);
2024 		}
2025 next_bh:
2026 		block_start = block_end;
2027 		bh = bh->b_this_page;
2028 	} while (bh != head);
2029 	return err;
2030 }
2031 
2032 static int __block_commit_write(struct inode *inode, struct page *page,
2033 		unsigned from, unsigned to)
2034 {
2035 	unsigned block_start, block_end;
2036 	int partial = 0;
2037 	unsigned blocksize;
2038 	struct buffer_head *bh, *head;
2039 
2040 	blocksize = 1 << inode->i_blkbits;
2041 
2042 	for(bh = head = page_buffers(page), block_start = 0;
2043 	    bh != head || !block_start;
2044 	    block_start=block_end, bh = bh->b_this_page) {
2045 		block_end = block_start + blocksize;
2046 		if (block_end <= from || block_start >= to) {
2047 			if (!buffer_uptodate(bh))
2048 				partial = 1;
2049 		} else {
2050 			set_buffer_uptodate(bh);
2051 			mark_buffer_dirty(bh);
2052 		}
2053 	}
2054 
2055 	/*
2056 	 * If this is a partial write which happened to make all buffers
2057 	 * uptodate then we can optimize away a bogus readpage() for
2058 	 * the next read(). Here we 'discover' whether the page went
2059 	 * uptodate as a result of this (potentially partial) write.
2060 	 */
2061 	if (!partial)
2062 		SetPageUptodate(page);
2063 	return 0;
2064 }
2065 
2066 /*
2067  * Generic "read page" function for block devices that have the normal
2068  * get_block functionality. This is most of the block device filesystems.
2069  * Reads the page asynchronously --- the unlock_buffer() and
2070  * set/clear_buffer_uptodate() functions propagate buffer state into the
2071  * page struct once IO has completed.
2072  */
2073 int block_read_full_page(struct page *page, get_block_t *get_block)
2074 {
2075 	struct inode *inode = page->mapping->host;
2076 	sector_t iblock, lblock;
2077 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2078 	unsigned int blocksize;
2079 	int nr, i;
2080 	int fully_mapped = 1;
2081 
2082 	BUG_ON(!PageLocked(page));
2083 	blocksize = 1 << inode->i_blkbits;
2084 	if (!page_has_buffers(page))
2085 		create_empty_buffers(page, blocksize, 0);
2086 	head = page_buffers(page);
2087 
2088 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2089 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2090 	bh = head;
2091 	nr = 0;
2092 	i = 0;
2093 
2094 	do {
2095 		if (buffer_uptodate(bh))
2096 			continue;
2097 
2098 		if (!buffer_mapped(bh)) {
2099 			fully_mapped = 0;
2100 			if (iblock < lblock) {
2101 				if (get_block(inode, iblock, bh, 0))
2102 					SetPageError(page);
2103 			}
2104 			if (!buffer_mapped(bh)) {
2105 				void *kaddr = kmap_atomic(page, KM_USER0);
2106 				memset(kaddr + i * blocksize, 0, blocksize);
2107 				flush_dcache_page(page);
2108 				kunmap_atomic(kaddr, KM_USER0);
2109 				set_buffer_uptodate(bh);
2110 				continue;
2111 			}
2112 			/*
2113 			 * get_block() might have updated the buffer
2114 			 * synchronously
2115 			 */
2116 			if (buffer_uptodate(bh))
2117 				continue;
2118 		}
2119 		arr[nr++] = bh;
2120 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2121 
2122 	if (fully_mapped)
2123 		SetPageMappedToDisk(page);
2124 
2125 	if (!nr) {
2126 		/*
2127 		 * All buffers are uptodate - we can set the page uptodate
2128 		 * as well. But not if get_block() returned an error.
2129 		 */
2130 		if (!PageError(page))
2131 			SetPageUptodate(page);
2132 		unlock_page(page);
2133 		return 0;
2134 	}
2135 
2136 	/* Stage two: lock the buffers */
2137 	for (i = 0; i < nr; i++) {
2138 		bh = arr[i];
2139 		lock_buffer(bh);
2140 		mark_buffer_async_read(bh);
2141 	}
2142 
2143 	/*
2144 	 * Stage 3: start the IO.  Check for uptodateness
2145 	 * inside the buffer lock in case another process reading
2146 	 * the underlying blockdev brought it uptodate (the sct fix).
2147 	 */
2148 	for (i = 0; i < nr; i++) {
2149 		bh = arr[i];
2150 		if (buffer_uptodate(bh))
2151 			end_buffer_async_read(bh, 1);
2152 		else
2153 			submit_bh(READ, bh);
2154 	}
2155 	return 0;
2156 }
2157 
2158 /* utility function for filesystems that need to do work on expanding
2159  * truncates.  Uses prepare/commit_write to allow the filesystem to
2160  * deal with the hole.
2161  */
2162 int generic_cont_expand(struct inode *inode, loff_t size)
2163 {
2164 	struct address_space *mapping = inode->i_mapping;
2165 	struct page *page;
2166 	unsigned long index, offset, limit;
2167 	int err;
2168 
2169 	err = -EFBIG;
2170         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2171 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2172 		send_sig(SIGXFSZ, current, 0);
2173 		goto out;
2174 	}
2175 	if (size > inode->i_sb->s_maxbytes)
2176 		goto out;
2177 
2178 	offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2179 
2180 	/* ugh.  in prepare/commit_write, if from==to==start of block, we
2181 	** skip the prepare.  make sure we never send an offset for the start
2182 	** of a block
2183 	*/
2184 	if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2185 		offset++;
2186 	}
2187 	index = size >> PAGE_CACHE_SHIFT;
2188 	err = -ENOMEM;
2189 	page = grab_cache_page(mapping, index);
2190 	if (!page)
2191 		goto out;
2192 	err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2193 	if (!err) {
2194 		err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2195 	}
2196 	unlock_page(page);
2197 	page_cache_release(page);
2198 	if (err > 0)
2199 		err = 0;
2200 out:
2201 	return err;
2202 }
2203 
2204 /*
2205  * For moronic filesystems that do not allow holes in file.
2206  * We may have to extend the file.
2207  */
2208 
2209 int cont_prepare_write(struct page *page, unsigned offset,
2210 		unsigned to, get_block_t *get_block, loff_t *bytes)
2211 {
2212 	struct address_space *mapping = page->mapping;
2213 	struct inode *inode = mapping->host;
2214 	struct page *new_page;
2215 	pgoff_t pgpos;
2216 	long status;
2217 	unsigned zerofrom;
2218 	unsigned blocksize = 1 << inode->i_blkbits;
2219 	void *kaddr;
2220 
2221 	while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2222 		status = -ENOMEM;
2223 		new_page = grab_cache_page(mapping, pgpos);
2224 		if (!new_page)
2225 			goto out;
2226 		/* we might sleep */
2227 		if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2228 			unlock_page(new_page);
2229 			page_cache_release(new_page);
2230 			continue;
2231 		}
2232 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2233 		if (zerofrom & (blocksize-1)) {
2234 			*bytes |= (blocksize-1);
2235 			(*bytes)++;
2236 		}
2237 		status = __block_prepare_write(inode, new_page, zerofrom,
2238 						PAGE_CACHE_SIZE, get_block);
2239 		if (status)
2240 			goto out_unmap;
2241 		kaddr = kmap_atomic(new_page, KM_USER0);
2242 		memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2243 		flush_dcache_page(new_page);
2244 		kunmap_atomic(kaddr, KM_USER0);
2245 		generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2246 		unlock_page(new_page);
2247 		page_cache_release(new_page);
2248 	}
2249 
2250 	if (page->index < pgpos) {
2251 		/* completely inside the area */
2252 		zerofrom = offset;
2253 	} else {
2254 		/* page covers the boundary, find the boundary offset */
2255 		zerofrom = *bytes & ~PAGE_CACHE_MASK;
2256 
2257 		/* if we will expand the thing last block will be filled */
2258 		if (to > zerofrom && (zerofrom & (blocksize-1))) {
2259 			*bytes |= (blocksize-1);
2260 			(*bytes)++;
2261 		}
2262 
2263 		/* starting below the boundary? Nothing to zero out */
2264 		if (offset <= zerofrom)
2265 			zerofrom = offset;
2266 	}
2267 	status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2268 	if (status)
2269 		goto out1;
2270 	if (zerofrom < offset) {
2271 		kaddr = kmap_atomic(page, KM_USER0);
2272 		memset(kaddr+zerofrom, 0, offset-zerofrom);
2273 		flush_dcache_page(page);
2274 		kunmap_atomic(kaddr, KM_USER0);
2275 		__block_commit_write(inode, page, zerofrom, offset);
2276 	}
2277 	return 0;
2278 out1:
2279 	ClearPageUptodate(page);
2280 	return status;
2281 
2282 out_unmap:
2283 	ClearPageUptodate(new_page);
2284 	unlock_page(new_page);
2285 	page_cache_release(new_page);
2286 out:
2287 	return status;
2288 }
2289 
2290 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2291 			get_block_t *get_block)
2292 {
2293 	struct inode *inode = page->mapping->host;
2294 	int err = __block_prepare_write(inode, page, from, to, get_block);
2295 	if (err)
2296 		ClearPageUptodate(page);
2297 	return err;
2298 }
2299 
2300 int block_commit_write(struct page *page, unsigned from, unsigned to)
2301 {
2302 	struct inode *inode = page->mapping->host;
2303 	__block_commit_write(inode,page,from,to);
2304 	return 0;
2305 }
2306 
2307 int generic_commit_write(struct file *file, struct page *page,
2308 		unsigned from, unsigned to)
2309 {
2310 	struct inode *inode = page->mapping->host;
2311 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2312 	__block_commit_write(inode,page,from,to);
2313 	/*
2314 	 * No need to use i_size_read() here, the i_size
2315 	 * cannot change under us because we hold i_sem.
2316 	 */
2317 	if (pos > inode->i_size) {
2318 		i_size_write(inode, pos);
2319 		mark_inode_dirty(inode);
2320 	}
2321 	return 0;
2322 }
2323 
2324 
2325 /*
2326  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2327  * immediately, while under the page lock.  So it needs a special end_io
2328  * handler which does not touch the bh after unlocking it.
2329  *
2330  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2331  * a race there is benign: unlock_buffer() only use the bh's address for
2332  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2333  * itself.
2334  */
2335 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2336 {
2337 	if (uptodate) {
2338 		set_buffer_uptodate(bh);
2339 	} else {
2340 		/* This happens, due to failed READA attempts. */
2341 		clear_buffer_uptodate(bh);
2342 	}
2343 	unlock_buffer(bh);
2344 }
2345 
2346 /*
2347  * On entry, the page is fully not uptodate.
2348  * On exit the page is fully uptodate in the areas outside (from,to)
2349  */
2350 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2351 			get_block_t *get_block)
2352 {
2353 	struct inode *inode = page->mapping->host;
2354 	const unsigned blkbits = inode->i_blkbits;
2355 	const unsigned blocksize = 1 << blkbits;
2356 	struct buffer_head map_bh;
2357 	struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2358 	unsigned block_in_page;
2359 	unsigned block_start;
2360 	sector_t block_in_file;
2361 	char *kaddr;
2362 	int nr_reads = 0;
2363 	int i;
2364 	int ret = 0;
2365 	int is_mapped_to_disk = 1;
2366 	int dirtied_it = 0;
2367 
2368 	if (PageMappedToDisk(page))
2369 		return 0;
2370 
2371 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2372 	map_bh.b_page = page;
2373 
2374 	/*
2375 	 * We loop across all blocks in the page, whether or not they are
2376 	 * part of the affected region.  This is so we can discover if the
2377 	 * page is fully mapped-to-disk.
2378 	 */
2379 	for (block_start = 0, block_in_page = 0;
2380 		  block_start < PAGE_CACHE_SIZE;
2381 		  block_in_page++, block_start += blocksize) {
2382 		unsigned block_end = block_start + blocksize;
2383 		int create;
2384 
2385 		map_bh.b_state = 0;
2386 		create = 1;
2387 		if (block_start >= to)
2388 			create = 0;
2389 		ret = get_block(inode, block_in_file + block_in_page,
2390 					&map_bh, create);
2391 		if (ret)
2392 			goto failed;
2393 		if (!buffer_mapped(&map_bh))
2394 			is_mapped_to_disk = 0;
2395 		if (buffer_new(&map_bh))
2396 			unmap_underlying_metadata(map_bh.b_bdev,
2397 							map_bh.b_blocknr);
2398 		if (PageUptodate(page))
2399 			continue;
2400 		if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2401 			kaddr = kmap_atomic(page, KM_USER0);
2402 			if (block_start < from) {
2403 				memset(kaddr+block_start, 0, from-block_start);
2404 				dirtied_it = 1;
2405 			}
2406 			if (block_end > to) {
2407 				memset(kaddr + to, 0, block_end - to);
2408 				dirtied_it = 1;
2409 			}
2410 			flush_dcache_page(page);
2411 			kunmap_atomic(kaddr, KM_USER0);
2412 			continue;
2413 		}
2414 		if (buffer_uptodate(&map_bh))
2415 			continue;	/* reiserfs does this */
2416 		if (block_start < from || block_end > to) {
2417 			struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2418 
2419 			if (!bh) {
2420 				ret = -ENOMEM;
2421 				goto failed;
2422 			}
2423 			bh->b_state = map_bh.b_state;
2424 			atomic_set(&bh->b_count, 0);
2425 			bh->b_this_page = NULL;
2426 			bh->b_page = page;
2427 			bh->b_blocknr = map_bh.b_blocknr;
2428 			bh->b_size = blocksize;
2429 			bh->b_data = (char *)(long)block_start;
2430 			bh->b_bdev = map_bh.b_bdev;
2431 			bh->b_private = NULL;
2432 			read_bh[nr_reads++] = bh;
2433 		}
2434 	}
2435 
2436 	if (nr_reads) {
2437 		struct buffer_head *bh;
2438 
2439 		/*
2440 		 * The page is locked, so these buffers are protected from
2441 		 * any VM or truncate activity.  Hence we don't need to care
2442 		 * for the buffer_head refcounts.
2443 		 */
2444 		for (i = 0; i < nr_reads; i++) {
2445 			bh = read_bh[i];
2446 			lock_buffer(bh);
2447 			bh->b_end_io = end_buffer_read_nobh;
2448 			submit_bh(READ, bh);
2449 		}
2450 		for (i = 0; i < nr_reads; i++) {
2451 			bh = read_bh[i];
2452 			wait_on_buffer(bh);
2453 			if (!buffer_uptodate(bh))
2454 				ret = -EIO;
2455 			free_buffer_head(bh);
2456 			read_bh[i] = NULL;
2457 		}
2458 		if (ret)
2459 			goto failed;
2460 	}
2461 
2462 	if (is_mapped_to_disk)
2463 		SetPageMappedToDisk(page);
2464 	SetPageUptodate(page);
2465 
2466 	/*
2467 	 * Setting the page dirty here isn't necessary for the prepare_write
2468 	 * function - commit_write will do that.  But if/when this function is
2469 	 * used within the pagefault handler to ensure that all mmapped pages
2470 	 * have backing space in the filesystem, we will need to dirty the page
2471 	 * if its contents were altered.
2472 	 */
2473 	if (dirtied_it)
2474 		set_page_dirty(page);
2475 
2476 	return 0;
2477 
2478 failed:
2479 	for (i = 0; i < nr_reads; i++) {
2480 		if (read_bh[i])
2481 			free_buffer_head(read_bh[i]);
2482 	}
2483 
2484 	/*
2485 	 * Error recovery is pretty slack.  Clear the page and mark it dirty
2486 	 * so we'll later zero out any blocks which _were_ allocated.
2487 	 */
2488 	kaddr = kmap_atomic(page, KM_USER0);
2489 	memset(kaddr, 0, PAGE_CACHE_SIZE);
2490 	kunmap_atomic(kaddr, KM_USER0);
2491 	SetPageUptodate(page);
2492 	set_page_dirty(page);
2493 	return ret;
2494 }
2495 EXPORT_SYMBOL(nobh_prepare_write);
2496 
2497 int nobh_commit_write(struct file *file, struct page *page,
2498 		unsigned from, unsigned to)
2499 {
2500 	struct inode *inode = page->mapping->host;
2501 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2502 
2503 	set_page_dirty(page);
2504 	if (pos > inode->i_size) {
2505 		i_size_write(inode, pos);
2506 		mark_inode_dirty(inode);
2507 	}
2508 	return 0;
2509 }
2510 EXPORT_SYMBOL(nobh_commit_write);
2511 
2512 /*
2513  * nobh_writepage() - based on block_full_write_page() except
2514  * that it tries to operate without attaching bufferheads to
2515  * the page.
2516  */
2517 int nobh_writepage(struct page *page, get_block_t *get_block,
2518 			struct writeback_control *wbc)
2519 {
2520 	struct inode * const inode = page->mapping->host;
2521 	loff_t i_size = i_size_read(inode);
2522 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2523 	unsigned offset;
2524 	void *kaddr;
2525 	int ret;
2526 
2527 	/* Is the page fully inside i_size? */
2528 	if (page->index < end_index)
2529 		goto out;
2530 
2531 	/* Is the page fully outside i_size? (truncate in progress) */
2532 	offset = i_size & (PAGE_CACHE_SIZE-1);
2533 	if (page->index >= end_index+1 || !offset) {
2534 		/*
2535 		 * The page may have dirty, unmapped buffers.  For example,
2536 		 * they may have been added in ext3_writepage().  Make them
2537 		 * freeable here, so the page does not leak.
2538 		 */
2539 #if 0
2540 		/* Not really sure about this  - do we need this ? */
2541 		if (page->mapping->a_ops->invalidatepage)
2542 			page->mapping->a_ops->invalidatepage(page, offset);
2543 #endif
2544 		unlock_page(page);
2545 		return 0; /* don't care */
2546 	}
2547 
2548 	/*
2549 	 * The page straddles i_size.  It must be zeroed out on each and every
2550 	 * writepage invocation because it may be mmapped.  "A file is mapped
2551 	 * in multiples of the page size.  For a file that is not a multiple of
2552 	 * the  page size, the remaining memory is zeroed when mapped, and
2553 	 * writes to that region are not written out to the file."
2554 	 */
2555 	kaddr = kmap_atomic(page, KM_USER0);
2556 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2557 	flush_dcache_page(page);
2558 	kunmap_atomic(kaddr, KM_USER0);
2559 out:
2560 	ret = mpage_writepage(page, get_block, wbc);
2561 	if (ret == -EAGAIN)
2562 		ret = __block_write_full_page(inode, page, get_block, wbc);
2563 	return ret;
2564 }
2565 EXPORT_SYMBOL(nobh_writepage);
2566 
2567 /*
2568  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2569  */
2570 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2571 {
2572 	struct inode *inode = mapping->host;
2573 	unsigned blocksize = 1 << inode->i_blkbits;
2574 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2575 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2576 	unsigned to;
2577 	struct page *page;
2578 	struct address_space_operations *a_ops = mapping->a_ops;
2579 	char *kaddr;
2580 	int ret = 0;
2581 
2582 	if ((offset & (blocksize - 1)) == 0)
2583 		goto out;
2584 
2585 	ret = -ENOMEM;
2586 	page = grab_cache_page(mapping, index);
2587 	if (!page)
2588 		goto out;
2589 
2590 	to = (offset + blocksize) & ~(blocksize - 1);
2591 	ret = a_ops->prepare_write(NULL, page, offset, to);
2592 	if (ret == 0) {
2593 		kaddr = kmap_atomic(page, KM_USER0);
2594 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2595 		flush_dcache_page(page);
2596 		kunmap_atomic(kaddr, KM_USER0);
2597 		set_page_dirty(page);
2598 	}
2599 	unlock_page(page);
2600 	page_cache_release(page);
2601 out:
2602 	return ret;
2603 }
2604 EXPORT_SYMBOL(nobh_truncate_page);
2605 
2606 int block_truncate_page(struct address_space *mapping,
2607 			loff_t from, get_block_t *get_block)
2608 {
2609 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2610 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2611 	unsigned blocksize;
2612 	pgoff_t iblock;
2613 	unsigned length, pos;
2614 	struct inode *inode = mapping->host;
2615 	struct page *page;
2616 	struct buffer_head *bh;
2617 	void *kaddr;
2618 	int err;
2619 
2620 	blocksize = 1 << inode->i_blkbits;
2621 	length = offset & (blocksize - 1);
2622 
2623 	/* Block boundary? Nothing to do */
2624 	if (!length)
2625 		return 0;
2626 
2627 	length = blocksize - length;
2628 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2629 
2630 	page = grab_cache_page(mapping, index);
2631 	err = -ENOMEM;
2632 	if (!page)
2633 		goto out;
2634 
2635 	if (!page_has_buffers(page))
2636 		create_empty_buffers(page, blocksize, 0);
2637 
2638 	/* Find the buffer that contains "offset" */
2639 	bh = page_buffers(page);
2640 	pos = blocksize;
2641 	while (offset >= pos) {
2642 		bh = bh->b_this_page;
2643 		iblock++;
2644 		pos += blocksize;
2645 	}
2646 
2647 	err = 0;
2648 	if (!buffer_mapped(bh)) {
2649 		err = get_block(inode, iblock, bh, 0);
2650 		if (err)
2651 			goto unlock;
2652 		/* unmapped? It's a hole - nothing to do */
2653 		if (!buffer_mapped(bh))
2654 			goto unlock;
2655 	}
2656 
2657 	/* Ok, it's mapped. Make sure it's up-to-date */
2658 	if (PageUptodate(page))
2659 		set_buffer_uptodate(bh);
2660 
2661 	if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2662 		err = -EIO;
2663 		ll_rw_block(READ, 1, &bh);
2664 		wait_on_buffer(bh);
2665 		/* Uhhuh. Read error. Complain and punt. */
2666 		if (!buffer_uptodate(bh))
2667 			goto unlock;
2668 	}
2669 
2670 	kaddr = kmap_atomic(page, KM_USER0);
2671 	memset(kaddr + offset, 0, length);
2672 	flush_dcache_page(page);
2673 	kunmap_atomic(kaddr, KM_USER0);
2674 
2675 	mark_buffer_dirty(bh);
2676 	err = 0;
2677 
2678 unlock:
2679 	unlock_page(page);
2680 	page_cache_release(page);
2681 out:
2682 	return err;
2683 }
2684 
2685 /*
2686  * The generic ->writepage function for buffer-backed address_spaces
2687  */
2688 int block_write_full_page(struct page *page, get_block_t *get_block,
2689 			struct writeback_control *wbc)
2690 {
2691 	struct inode * const inode = page->mapping->host;
2692 	loff_t i_size = i_size_read(inode);
2693 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2694 	unsigned offset;
2695 	void *kaddr;
2696 
2697 	/* Is the page fully inside i_size? */
2698 	if (page->index < end_index)
2699 		return __block_write_full_page(inode, page, get_block, wbc);
2700 
2701 	/* Is the page fully outside i_size? (truncate in progress) */
2702 	offset = i_size & (PAGE_CACHE_SIZE-1);
2703 	if (page->index >= end_index+1 || !offset) {
2704 		/*
2705 		 * The page may have dirty, unmapped buffers.  For example,
2706 		 * they may have been added in ext3_writepage().  Make them
2707 		 * freeable here, so the page does not leak.
2708 		 */
2709 		block_invalidatepage(page, 0);
2710 		unlock_page(page);
2711 		return 0; /* don't care */
2712 	}
2713 
2714 	/*
2715 	 * The page straddles i_size.  It must be zeroed out on each and every
2716 	 * writepage invokation because it may be mmapped.  "A file is mapped
2717 	 * in multiples of the page size.  For a file that is not a multiple of
2718 	 * the  page size, the remaining memory is zeroed when mapped, and
2719 	 * writes to that region are not written out to the file."
2720 	 */
2721 	kaddr = kmap_atomic(page, KM_USER0);
2722 	memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2723 	flush_dcache_page(page);
2724 	kunmap_atomic(kaddr, KM_USER0);
2725 	return __block_write_full_page(inode, page, get_block, wbc);
2726 }
2727 
2728 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2729 			    get_block_t *get_block)
2730 {
2731 	struct buffer_head tmp;
2732 	struct inode *inode = mapping->host;
2733 	tmp.b_state = 0;
2734 	tmp.b_blocknr = 0;
2735 	get_block(inode, block, &tmp, 0);
2736 	return tmp.b_blocknr;
2737 }
2738 
2739 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2740 {
2741 	struct buffer_head *bh = bio->bi_private;
2742 
2743 	if (bio->bi_size)
2744 		return 1;
2745 
2746 	if (err == -EOPNOTSUPP) {
2747 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2748 		set_bit(BH_Eopnotsupp, &bh->b_state);
2749 	}
2750 
2751 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2752 	bio_put(bio);
2753 	return 0;
2754 }
2755 
2756 int submit_bh(int rw, struct buffer_head * bh)
2757 {
2758 	struct bio *bio;
2759 	int ret = 0;
2760 
2761 	BUG_ON(!buffer_locked(bh));
2762 	BUG_ON(!buffer_mapped(bh));
2763 	BUG_ON(!bh->b_end_io);
2764 
2765 	if (buffer_ordered(bh) && (rw == WRITE))
2766 		rw = WRITE_BARRIER;
2767 
2768 	/*
2769 	 * Only clear out a write error when rewriting, should this
2770 	 * include WRITE_SYNC as well?
2771 	 */
2772 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2773 		clear_buffer_write_io_error(bh);
2774 
2775 	/*
2776 	 * from here on down, it's all bio -- do the initial mapping,
2777 	 * submit_bio -> generic_make_request may further map this bio around
2778 	 */
2779 	bio = bio_alloc(GFP_NOIO, 1);
2780 
2781 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2782 	bio->bi_bdev = bh->b_bdev;
2783 	bio->bi_io_vec[0].bv_page = bh->b_page;
2784 	bio->bi_io_vec[0].bv_len = bh->b_size;
2785 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2786 
2787 	bio->bi_vcnt = 1;
2788 	bio->bi_idx = 0;
2789 	bio->bi_size = bh->b_size;
2790 
2791 	bio->bi_end_io = end_bio_bh_io_sync;
2792 	bio->bi_private = bh;
2793 
2794 	bio_get(bio);
2795 	submit_bio(rw, bio);
2796 
2797 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2798 		ret = -EOPNOTSUPP;
2799 
2800 	bio_put(bio);
2801 	return ret;
2802 }
2803 
2804 /**
2805  * ll_rw_block: low-level access to block devices (DEPRECATED)
2806  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2807  * @nr: number of &struct buffer_heads in the array
2808  * @bhs: array of pointers to &struct buffer_head
2809  *
2810  * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2811  * and requests an I/O operation on them, either a %READ or a %WRITE.
2812  * The third %READA option is described in the documentation for
2813  * generic_make_request() which ll_rw_block() calls.
2814  *
2815  * This function drops any buffer that it cannot get a lock on (with the
2816  * BH_Lock state bit), any buffer that appears to be clean when doing a
2817  * write request, and any buffer that appears to be up-to-date when doing
2818  * read request.  Further it marks as clean buffers that are processed for
2819  * writing (the buffer cache won't assume that they are actually clean until
2820  * the buffer gets unlocked).
2821  *
2822  * ll_rw_block sets b_end_io to simple completion handler that marks
2823  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2824  * any waiters.
2825  *
2826  * All of the buffers must be for the same device, and must also be a
2827  * multiple of the current approved size for the device.
2828  */
2829 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2830 {
2831 	int i;
2832 
2833 	for (i = 0; i < nr; i++) {
2834 		struct buffer_head *bh = bhs[i];
2835 
2836 		if (test_set_buffer_locked(bh))
2837 			continue;
2838 
2839 		get_bh(bh);
2840 		if (rw == WRITE) {
2841 			if (test_clear_buffer_dirty(bh)) {
2842 				bh->b_end_io = end_buffer_write_sync;
2843 				submit_bh(WRITE, bh);
2844 				continue;
2845 			}
2846 		} else {
2847 			if (!buffer_uptodate(bh)) {
2848 				bh->b_end_io = end_buffer_read_sync;
2849 				submit_bh(rw, bh);
2850 				continue;
2851 			}
2852 		}
2853 		unlock_buffer(bh);
2854 		put_bh(bh);
2855 	}
2856 }
2857 
2858 /*
2859  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2860  * and then start new I/O and then wait upon it.  The caller must have a ref on
2861  * the buffer_head.
2862  */
2863 int sync_dirty_buffer(struct buffer_head *bh)
2864 {
2865 	int ret = 0;
2866 
2867 	WARN_ON(atomic_read(&bh->b_count) < 1);
2868 	lock_buffer(bh);
2869 	if (test_clear_buffer_dirty(bh)) {
2870 		get_bh(bh);
2871 		bh->b_end_io = end_buffer_write_sync;
2872 		ret = submit_bh(WRITE, bh);
2873 		wait_on_buffer(bh);
2874 		if (buffer_eopnotsupp(bh)) {
2875 			clear_buffer_eopnotsupp(bh);
2876 			ret = -EOPNOTSUPP;
2877 		}
2878 		if (!ret && !buffer_uptodate(bh))
2879 			ret = -EIO;
2880 	} else {
2881 		unlock_buffer(bh);
2882 	}
2883 	return ret;
2884 }
2885 
2886 /*
2887  * try_to_free_buffers() checks if all the buffers on this particular page
2888  * are unused, and releases them if so.
2889  *
2890  * Exclusion against try_to_free_buffers may be obtained by either
2891  * locking the page or by holding its mapping's private_lock.
2892  *
2893  * If the page is dirty but all the buffers are clean then we need to
2894  * be sure to mark the page clean as well.  This is because the page
2895  * may be against a block device, and a later reattachment of buffers
2896  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2897  * filesystem data on the same device.
2898  *
2899  * The same applies to regular filesystem pages: if all the buffers are
2900  * clean then we set the page clean and proceed.  To do that, we require
2901  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2902  * private_lock.
2903  *
2904  * try_to_free_buffers() is non-blocking.
2905  */
2906 static inline int buffer_busy(struct buffer_head *bh)
2907 {
2908 	return atomic_read(&bh->b_count) |
2909 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2910 }
2911 
2912 static int
2913 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2914 {
2915 	struct buffer_head *head = page_buffers(page);
2916 	struct buffer_head *bh;
2917 
2918 	bh = head;
2919 	do {
2920 		if (buffer_write_io_error(bh) && page->mapping)
2921 			set_bit(AS_EIO, &page->mapping->flags);
2922 		if (buffer_busy(bh))
2923 			goto failed;
2924 		bh = bh->b_this_page;
2925 	} while (bh != head);
2926 
2927 	do {
2928 		struct buffer_head *next = bh->b_this_page;
2929 
2930 		if (!list_empty(&bh->b_assoc_buffers))
2931 			__remove_assoc_queue(bh);
2932 		bh = next;
2933 	} while (bh != head);
2934 	*buffers_to_free = head;
2935 	__clear_page_buffers(page);
2936 	return 1;
2937 failed:
2938 	return 0;
2939 }
2940 
2941 int try_to_free_buffers(struct page *page)
2942 {
2943 	struct address_space * const mapping = page->mapping;
2944 	struct buffer_head *buffers_to_free = NULL;
2945 	int ret = 0;
2946 
2947 	BUG_ON(!PageLocked(page));
2948 	if (PageWriteback(page))
2949 		return 0;
2950 
2951 	if (mapping == NULL) {		/* can this still happen? */
2952 		ret = drop_buffers(page, &buffers_to_free);
2953 		goto out;
2954 	}
2955 
2956 	spin_lock(&mapping->private_lock);
2957 	ret = drop_buffers(page, &buffers_to_free);
2958 	if (ret) {
2959 		/*
2960 		 * If the filesystem writes its buffers by hand (eg ext3)
2961 		 * then we can have clean buffers against a dirty page.  We
2962 		 * clean the page here; otherwise later reattachment of buffers
2963 		 * could encounter a non-uptodate page, which is unresolvable.
2964 		 * This only applies in the rare case where try_to_free_buffers
2965 		 * succeeds but the page is not freed.
2966 		 */
2967 		clear_page_dirty(page);
2968 	}
2969 	spin_unlock(&mapping->private_lock);
2970 out:
2971 	if (buffers_to_free) {
2972 		struct buffer_head *bh = buffers_to_free;
2973 
2974 		do {
2975 			struct buffer_head *next = bh->b_this_page;
2976 			free_buffer_head(bh);
2977 			bh = next;
2978 		} while (bh != buffers_to_free);
2979 	}
2980 	return ret;
2981 }
2982 EXPORT_SYMBOL(try_to_free_buffers);
2983 
2984 int block_sync_page(struct page *page)
2985 {
2986 	struct address_space *mapping;
2987 
2988 	smp_mb();
2989 	mapping = page_mapping(page);
2990 	if (mapping)
2991 		blk_run_backing_dev(mapping->backing_dev_info, page);
2992 	return 0;
2993 }
2994 
2995 /*
2996  * There are no bdflush tunables left.  But distributions are
2997  * still running obsolete flush daemons, so we terminate them here.
2998  *
2999  * Use of bdflush() is deprecated and will be removed in a future kernel.
3000  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3001  */
3002 asmlinkage long sys_bdflush(int func, long data)
3003 {
3004 	static int msg_count;
3005 
3006 	if (!capable(CAP_SYS_ADMIN))
3007 		return -EPERM;
3008 
3009 	if (msg_count < 5) {
3010 		msg_count++;
3011 		printk(KERN_INFO
3012 			"warning: process `%s' used the obsolete bdflush"
3013 			" system call\n", current->comm);
3014 		printk(KERN_INFO "Fix your initscripts?\n");
3015 	}
3016 
3017 	if (func == 1)
3018 		do_exit(0);
3019 	return 0;
3020 }
3021 
3022 /*
3023  * Buffer-head allocation
3024  */
3025 static kmem_cache_t *bh_cachep;
3026 
3027 /*
3028  * Once the number of bh's in the machine exceeds this level, we start
3029  * stripping them in writeback.
3030  */
3031 static int max_buffer_heads;
3032 
3033 int buffer_heads_over_limit;
3034 
3035 struct bh_accounting {
3036 	int nr;			/* Number of live bh's */
3037 	int ratelimit;		/* Limit cacheline bouncing */
3038 };
3039 
3040 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3041 
3042 static void recalc_bh_state(void)
3043 {
3044 	int i;
3045 	int tot = 0;
3046 
3047 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3048 		return;
3049 	__get_cpu_var(bh_accounting).ratelimit = 0;
3050 	for_each_cpu(i)
3051 		tot += per_cpu(bh_accounting, i).nr;
3052 	buffer_heads_over_limit = (tot > max_buffer_heads);
3053 }
3054 
3055 struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
3056 {
3057 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3058 	if (ret) {
3059 		preempt_disable();
3060 		__get_cpu_var(bh_accounting).nr++;
3061 		recalc_bh_state();
3062 		preempt_enable();
3063 	}
3064 	return ret;
3065 }
3066 EXPORT_SYMBOL(alloc_buffer_head);
3067 
3068 void free_buffer_head(struct buffer_head *bh)
3069 {
3070 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3071 	kmem_cache_free(bh_cachep, bh);
3072 	preempt_disable();
3073 	__get_cpu_var(bh_accounting).nr--;
3074 	recalc_bh_state();
3075 	preempt_enable();
3076 }
3077 EXPORT_SYMBOL(free_buffer_head);
3078 
3079 static void
3080 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3081 {
3082 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3083 			    SLAB_CTOR_CONSTRUCTOR) {
3084 		struct buffer_head * bh = (struct buffer_head *)data;
3085 
3086 		memset(bh, 0, sizeof(*bh));
3087 		INIT_LIST_HEAD(&bh->b_assoc_buffers);
3088 	}
3089 }
3090 
3091 #ifdef CONFIG_HOTPLUG_CPU
3092 static void buffer_exit_cpu(int cpu)
3093 {
3094 	int i;
3095 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3096 
3097 	for (i = 0; i < BH_LRU_SIZE; i++) {
3098 		brelse(b->bhs[i]);
3099 		b->bhs[i] = NULL;
3100 	}
3101 }
3102 
3103 static int buffer_cpu_notify(struct notifier_block *self,
3104 			      unsigned long action, void *hcpu)
3105 {
3106 	if (action == CPU_DEAD)
3107 		buffer_exit_cpu((unsigned long)hcpu);
3108 	return NOTIFY_OK;
3109 }
3110 #endif /* CONFIG_HOTPLUG_CPU */
3111 
3112 void __init buffer_init(void)
3113 {
3114 	int nrpages;
3115 
3116 	bh_cachep = kmem_cache_create("buffer_head",
3117 			sizeof(struct buffer_head), 0,
3118 			SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3119 
3120 	/*
3121 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3122 	 */
3123 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3124 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3125 	hotcpu_notifier(buffer_cpu_notify, 0);
3126 }
3127 
3128 EXPORT_SYMBOL(__bforget);
3129 EXPORT_SYMBOL(__brelse);
3130 EXPORT_SYMBOL(__wait_on_buffer);
3131 EXPORT_SYMBOL(block_commit_write);
3132 EXPORT_SYMBOL(block_prepare_write);
3133 EXPORT_SYMBOL(block_read_full_page);
3134 EXPORT_SYMBOL(block_sync_page);
3135 EXPORT_SYMBOL(block_truncate_page);
3136 EXPORT_SYMBOL(block_write_full_page);
3137 EXPORT_SYMBOL(cont_prepare_write);
3138 EXPORT_SYMBOL(end_buffer_async_write);
3139 EXPORT_SYMBOL(end_buffer_read_sync);
3140 EXPORT_SYMBOL(end_buffer_write_sync);
3141 EXPORT_SYMBOL(file_fsync);
3142 EXPORT_SYMBOL(fsync_bdev);
3143 EXPORT_SYMBOL(generic_block_bmap);
3144 EXPORT_SYMBOL(generic_commit_write);
3145 EXPORT_SYMBOL(generic_cont_expand);
3146 EXPORT_SYMBOL(init_buffer);
3147 EXPORT_SYMBOL(invalidate_bdev);
3148 EXPORT_SYMBOL(ll_rw_block);
3149 EXPORT_SYMBOL(mark_buffer_dirty);
3150 EXPORT_SYMBOL(submit_bh);
3151 EXPORT_SYMBOL(sync_dirty_buffer);
3152 EXPORT_SYMBOL(unlock_buffer);
3153