xref: /linux/fs/buffer.c (revision a5c4300389bb33ade2515c082709217f0614cf15)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56 
57 static int sync_buffer(void *word)
58 {
59 	struct block_device *bd;
60 	struct buffer_head *bh
61 		= container_of(word, struct buffer_head, b_state);
62 
63 	smp_mb();
64 	bd = bh->b_bdev;
65 	if (bd)
66 		blk_run_address_space(bd->bd_inode->i_mapping);
67 	io_schedule();
68 	return 0;
69 }
70 
71 void __lock_buffer(struct buffer_head *bh)
72 {
73 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 							TASK_UNINTERRUPTIBLE);
75 }
76 EXPORT_SYMBOL(__lock_buffer);
77 
78 void unlock_buffer(struct buffer_head *bh)
79 {
80 	clear_bit_unlock(BH_Lock, &bh->b_state);
81 	smp_mb__after_clear_bit();
82 	wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 EXPORT_SYMBOL(unlock_buffer);
85 
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95 EXPORT_SYMBOL(__wait_on_buffer);
96 
97 static void
98 __clear_page_buffers(struct page *page)
99 {
100 	ClearPagePrivate(page);
101 	set_page_private(page, 0);
102 	page_cache_release(page);
103 }
104 
105 
106 static int quiet_error(struct buffer_head *bh)
107 {
108 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109 		return 0;
110 	return 1;
111 }
112 
113 
114 static void buffer_io_error(struct buffer_head *bh)
115 {
116 	char b[BDEVNAME_SIZE];
117 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 			bdevname(bh->b_bdev, b),
119 			(unsigned long long)bh->b_blocknr);
120 }
121 
122 /*
123  * End-of-IO handler helper function which does not touch the bh after
124  * unlocking it.
125  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126  * a race there is benign: unlock_buffer() only use the bh's address for
127  * hashing after unlocking the buffer, so it doesn't actually touch the bh
128  * itself.
129  */
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
131 {
132 	if (uptodate) {
133 		set_buffer_uptodate(bh);
134 	} else {
135 		/* This happens, due to failed READA attempts. */
136 		clear_buffer_uptodate(bh);
137 	}
138 	unlock_buffer(bh);
139 }
140 
141 /*
142  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
143  * unlock the buffer. This is what ll_rw_block uses too.
144  */
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146 {
147 	__end_buffer_read_notouch(bh, uptodate);
148 	put_bh(bh);
149 }
150 EXPORT_SYMBOL(end_buffer_read_sync);
151 
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153 {
154 	char b[BDEVNAME_SIZE];
155 
156 	if (uptodate) {
157 		set_buffer_uptodate(bh);
158 	} else {
159 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
160 			buffer_io_error(bh);
161 			printk(KERN_WARNING "lost page write due to "
162 					"I/O error on %s\n",
163 				       bdevname(bh->b_bdev, b));
164 		}
165 		set_buffer_write_io_error(bh);
166 		clear_buffer_uptodate(bh);
167 	}
168 	unlock_buffer(bh);
169 	put_bh(bh);
170 }
171 EXPORT_SYMBOL(end_buffer_write_sync);
172 
173 /*
174  * Various filesystems appear to want __find_get_block to be non-blocking.
175  * But it's the page lock which protects the buffers.  To get around this,
176  * we get exclusion from try_to_free_buffers with the blockdev mapping's
177  * private_lock.
178  *
179  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180  * may be quite high.  This code could TryLock the page, and if that
181  * succeeds, there is no need to take private_lock. (But if
182  * private_lock is contended then so is mapping->tree_lock).
183  */
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
186 {
187 	struct inode *bd_inode = bdev->bd_inode;
188 	struct address_space *bd_mapping = bd_inode->i_mapping;
189 	struct buffer_head *ret = NULL;
190 	pgoff_t index;
191 	struct buffer_head *bh;
192 	struct buffer_head *head;
193 	struct page *page;
194 	int all_mapped = 1;
195 
196 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 	page = find_get_page(bd_mapping, index);
198 	if (!page)
199 		goto out;
200 
201 	spin_lock(&bd_mapping->private_lock);
202 	if (!page_has_buffers(page))
203 		goto out_unlock;
204 	head = page_buffers(page);
205 	bh = head;
206 	do {
207 		if (!buffer_mapped(bh))
208 			all_mapped = 0;
209 		else if (bh->b_blocknr == block) {
210 			ret = bh;
211 			get_bh(bh);
212 			goto out_unlock;
213 		}
214 		bh = bh->b_this_page;
215 	} while (bh != head);
216 
217 	/* we might be here because some of the buffers on this page are
218 	 * not mapped.  This is due to various races between
219 	 * file io on the block device and getblk.  It gets dealt with
220 	 * elsewhere, don't buffer_error if we had some unmapped buffers
221 	 */
222 	if (all_mapped) {
223 		printk("__find_get_block_slow() failed. "
224 			"block=%llu, b_blocknr=%llu\n",
225 			(unsigned long long)block,
226 			(unsigned long long)bh->b_blocknr);
227 		printk("b_state=0x%08lx, b_size=%zu\n",
228 			bh->b_state, bh->b_size);
229 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230 	}
231 out_unlock:
232 	spin_unlock(&bd_mapping->private_lock);
233 	page_cache_release(page);
234 out:
235 	return ret;
236 }
237 
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239    of fs corruption is going on. Trashing dirty data always imply losing
240    information that was supposed to be just stored on the physical layer
241    by the user.
242 
243    Thus invalidate_buffers in general usage is not allwowed to trash
244    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245    be preserved.  These buffers are simply skipped.
246 
247    We also skip buffers which are still in use.  For example this can
248    happen if a userspace program is reading the block device.
249 
250    NOTE: In the case where the user removed a removable-media-disk even if
251    there's still dirty data not synced on disk (due a bug in the device driver
252    or due an error of the user), by not destroying the dirty buffers we could
253    generate corruption also on the next media inserted, thus a parameter is
254    necessary to handle this case in the most safe way possible (trying
255    to not corrupt also the new disk inserted with the data belonging to
256    the old now corrupted disk). Also for the ramdisk the natural thing
257    to do in order to release the ramdisk memory is to destroy dirty buffers.
258 
259    These are two special cases. Normal usage imply the device driver
260    to issue a sync on the device (without waiting I/O completion) and
261    then an invalidate_buffers call that doesn't trash dirty buffers.
262 
263    For handling cache coherency with the blkdev pagecache the 'update' case
264    is been introduced. It is needed to re-read from disk any pinned
265    buffer. NOTE: re-reading from disk is destructive so we can do it only
266    when we assume nobody is changing the buffercache under our I/O and when
267    we think the disk contains more recent information than the buffercache.
268    The update == 1 pass marks the buffers we need to update, the update == 2
269    pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
271 {
272 	struct address_space *mapping = bdev->bd_inode->i_mapping;
273 
274 	if (mapping->nrpages == 0)
275 		return;
276 
277 	invalidate_bh_lrus();
278 	lru_add_drain_all();	/* make sure all lru add caches are flushed */
279 	invalidate_mapping_pages(mapping, 0, -1);
280 }
281 EXPORT_SYMBOL(invalidate_bdev);
282 
283 /*
284  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285  */
286 static void free_more_memory(void)
287 {
288 	struct zone *zone;
289 	int nid;
290 
291 	wakeup_flusher_threads(1024);
292 	yield();
293 
294 	for_each_online_node(nid) {
295 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296 						gfp_zone(GFP_NOFS), NULL,
297 						&zone);
298 		if (zone)
299 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300 						GFP_NOFS, NULL);
301 	}
302 }
303 
304 /*
305  * I/O completion handler for block_read_full_page() - pages
306  * which come unlocked at the end of I/O.
307  */
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309 {
310 	unsigned long flags;
311 	struct buffer_head *first;
312 	struct buffer_head *tmp;
313 	struct page *page;
314 	int page_uptodate = 1;
315 
316 	BUG_ON(!buffer_async_read(bh));
317 
318 	page = bh->b_page;
319 	if (uptodate) {
320 		set_buffer_uptodate(bh);
321 	} else {
322 		clear_buffer_uptodate(bh);
323 		if (!quiet_error(bh))
324 			buffer_io_error(bh);
325 		SetPageError(page);
326 	}
327 
328 	/*
329 	 * Be _very_ careful from here on. Bad things can happen if
330 	 * two buffer heads end IO at almost the same time and both
331 	 * decide that the page is now completely done.
332 	 */
333 	first = page_buffers(page);
334 	local_irq_save(flags);
335 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336 	clear_buffer_async_read(bh);
337 	unlock_buffer(bh);
338 	tmp = bh;
339 	do {
340 		if (!buffer_uptodate(tmp))
341 			page_uptodate = 0;
342 		if (buffer_async_read(tmp)) {
343 			BUG_ON(!buffer_locked(tmp));
344 			goto still_busy;
345 		}
346 		tmp = tmp->b_this_page;
347 	} while (tmp != bh);
348 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349 	local_irq_restore(flags);
350 
351 	/*
352 	 * If none of the buffers had errors and they are all
353 	 * uptodate then we can set the page uptodate.
354 	 */
355 	if (page_uptodate && !PageError(page))
356 		SetPageUptodate(page);
357 	unlock_page(page);
358 	return;
359 
360 still_busy:
361 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362 	local_irq_restore(flags);
363 	return;
364 }
365 
366 /*
367  * Completion handler for block_write_full_page() - pages which are unlocked
368  * during I/O, and which have PageWriteback cleared upon I/O completion.
369  */
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 {
372 	char b[BDEVNAME_SIZE];
373 	unsigned long flags;
374 	struct buffer_head *first;
375 	struct buffer_head *tmp;
376 	struct page *page;
377 
378 	BUG_ON(!buffer_async_write(bh));
379 
380 	page = bh->b_page;
381 	if (uptodate) {
382 		set_buffer_uptodate(bh);
383 	} else {
384 		if (!quiet_error(bh)) {
385 			buffer_io_error(bh);
386 			printk(KERN_WARNING "lost page write due to "
387 					"I/O error on %s\n",
388 			       bdevname(bh->b_bdev, b));
389 		}
390 		set_bit(AS_EIO, &page->mapping->flags);
391 		set_buffer_write_io_error(bh);
392 		clear_buffer_uptodate(bh);
393 		SetPageError(page);
394 	}
395 
396 	first = page_buffers(page);
397 	local_irq_save(flags);
398 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399 
400 	clear_buffer_async_write(bh);
401 	unlock_buffer(bh);
402 	tmp = bh->b_this_page;
403 	while (tmp != bh) {
404 		if (buffer_async_write(tmp)) {
405 			BUG_ON(!buffer_locked(tmp));
406 			goto still_busy;
407 		}
408 		tmp = tmp->b_this_page;
409 	}
410 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411 	local_irq_restore(flags);
412 	end_page_writeback(page);
413 	return;
414 
415 still_busy:
416 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 	local_irq_restore(flags);
418 	return;
419 }
420 EXPORT_SYMBOL(end_buffer_async_write);
421 
422 /*
423  * If a page's buffers are under async readin (end_buffer_async_read
424  * completion) then there is a possibility that another thread of
425  * control could lock one of the buffers after it has completed
426  * but while some of the other buffers have not completed.  This
427  * locked buffer would confuse end_buffer_async_read() into not unlocking
428  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
429  * that this buffer is not under async I/O.
430  *
431  * The page comes unlocked when it has no locked buffer_async buffers
432  * left.
433  *
434  * PageLocked prevents anyone starting new async I/O reads any of
435  * the buffers.
436  *
437  * PageWriteback is used to prevent simultaneous writeout of the same
438  * page.
439  *
440  * PageLocked prevents anyone from starting writeback of a page which is
441  * under read I/O (PageWriteback is only ever set against a locked page).
442  */
443 static void mark_buffer_async_read(struct buffer_head *bh)
444 {
445 	bh->b_end_io = end_buffer_async_read;
446 	set_buffer_async_read(bh);
447 }
448 
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 					  bh_end_io_t *handler)
451 {
452 	bh->b_end_io = handler;
453 	set_buffer_async_write(bh);
454 }
455 
456 void mark_buffer_async_write(struct buffer_head *bh)
457 {
458 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 }
460 EXPORT_SYMBOL(mark_buffer_async_write);
461 
462 
463 /*
464  * fs/buffer.c contains helper functions for buffer-backed address space's
465  * fsync functions.  A common requirement for buffer-based filesystems is
466  * that certain data from the backing blockdev needs to be written out for
467  * a successful fsync().  For example, ext2 indirect blocks need to be
468  * written back and waited upon before fsync() returns.
469  *
470  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472  * management of a list of dependent buffers at ->i_mapping->private_list.
473  *
474  * Locking is a little subtle: try_to_free_buffers() will remove buffers
475  * from their controlling inode's queue when they are being freed.  But
476  * try_to_free_buffers() will be operating against the *blockdev* mapping
477  * at the time, not against the S_ISREG file which depends on those buffers.
478  * So the locking for private_list is via the private_lock in the address_space
479  * which backs the buffers.  Which is different from the address_space
480  * against which the buffers are listed.  So for a particular address_space,
481  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
482  * mapping->private_list will always be protected by the backing blockdev's
483  * ->private_lock.
484  *
485  * Which introduces a requirement: all buffers on an address_space's
486  * ->private_list must be from the same address_space: the blockdev's.
487  *
488  * address_spaces which do not place buffers at ->private_list via these
489  * utility functions are free to use private_lock and private_list for
490  * whatever they want.  The only requirement is that list_empty(private_list)
491  * be true at clear_inode() time.
492  *
493  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
494  * filesystems should do that.  invalidate_inode_buffers() should just go
495  * BUG_ON(!list_empty).
496  *
497  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
498  * take an address_space, not an inode.  And it should be called
499  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500  * queued up.
501  *
502  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503  * list if it is already on a list.  Because if the buffer is on a list,
504  * it *must* already be on the right one.  If not, the filesystem is being
505  * silly.  This will save a ton of locking.  But first we have to ensure
506  * that buffers are taken *off* the old inode's list when they are freed
507  * (presumably in truncate).  That requires careful auditing of all
508  * filesystems (do it inside bforget()).  It could also be done by bringing
509  * b_inode back.
510  */
511 
512 /*
513  * The buffer's backing address_space's private_lock must be held
514  */
515 static void __remove_assoc_queue(struct buffer_head *bh)
516 {
517 	list_del_init(&bh->b_assoc_buffers);
518 	WARN_ON(!bh->b_assoc_map);
519 	if (buffer_write_io_error(bh))
520 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
521 	bh->b_assoc_map = NULL;
522 }
523 
524 int inode_has_buffers(struct inode *inode)
525 {
526 	return !list_empty(&inode->i_data.private_list);
527 }
528 
529 /*
530  * osync is designed to support O_SYNC io.  It waits synchronously for
531  * all already-submitted IO to complete, but does not queue any new
532  * writes to the disk.
533  *
534  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535  * you dirty the buffers, and then use osync_inode_buffers to wait for
536  * completion.  Any other dirty buffers which are not yet queued for
537  * write will not be flushed to disk by the osync.
538  */
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 {
541 	struct buffer_head *bh;
542 	struct list_head *p;
543 	int err = 0;
544 
545 	spin_lock(lock);
546 repeat:
547 	list_for_each_prev(p, list) {
548 		bh = BH_ENTRY(p);
549 		if (buffer_locked(bh)) {
550 			get_bh(bh);
551 			spin_unlock(lock);
552 			wait_on_buffer(bh);
553 			if (!buffer_uptodate(bh))
554 				err = -EIO;
555 			brelse(bh);
556 			spin_lock(lock);
557 			goto repeat;
558 		}
559 	}
560 	spin_unlock(lock);
561 	return err;
562 }
563 
564 static void do_thaw_one(struct super_block *sb, void *unused)
565 {
566 	char b[BDEVNAME_SIZE];
567 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 		printk(KERN_WARNING "Emergency Thaw on %s\n",
569 		       bdevname(sb->s_bdev, b));
570 }
571 
572 static void do_thaw_all(struct work_struct *work)
573 {
574 	iterate_supers(do_thaw_one, NULL);
575 	kfree(work);
576 	printk(KERN_WARNING "Emergency Thaw complete\n");
577 }
578 
579 /**
580  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581  *
582  * Used for emergency unfreeze of all filesystems via SysRq
583  */
584 void emergency_thaw_all(void)
585 {
586 	struct work_struct *work;
587 
588 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
589 	if (work) {
590 		INIT_WORK(work, do_thaw_all);
591 		schedule_work(work);
592 	}
593 }
594 
595 /**
596  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597  * @mapping: the mapping which wants those buffers written
598  *
599  * Starts I/O against the buffers at mapping->private_list, and waits upon
600  * that I/O.
601  *
602  * Basically, this is a convenience function for fsync().
603  * @mapping is a file or directory which needs those buffers to be written for
604  * a successful fsync().
605  */
606 int sync_mapping_buffers(struct address_space *mapping)
607 {
608 	struct address_space *buffer_mapping = mapping->assoc_mapping;
609 
610 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611 		return 0;
612 
613 	return fsync_buffers_list(&buffer_mapping->private_lock,
614 					&mapping->private_list);
615 }
616 EXPORT_SYMBOL(sync_mapping_buffers);
617 
618 /*
619  * Called when we've recently written block `bblock', and it is known that
620  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
621  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
622  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
623  */
624 void write_boundary_block(struct block_device *bdev,
625 			sector_t bblock, unsigned blocksize)
626 {
627 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628 	if (bh) {
629 		if (buffer_dirty(bh))
630 			ll_rw_block(WRITE, 1, &bh);
631 		put_bh(bh);
632 	}
633 }
634 
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636 {
637 	struct address_space *mapping = inode->i_mapping;
638 	struct address_space *buffer_mapping = bh->b_page->mapping;
639 
640 	mark_buffer_dirty(bh);
641 	if (!mapping->assoc_mapping) {
642 		mapping->assoc_mapping = buffer_mapping;
643 	} else {
644 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
645 	}
646 	if (!bh->b_assoc_map) {
647 		spin_lock(&buffer_mapping->private_lock);
648 		list_move_tail(&bh->b_assoc_buffers,
649 				&mapping->private_list);
650 		bh->b_assoc_map = mapping;
651 		spin_unlock(&buffer_mapping->private_lock);
652 	}
653 }
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
655 
656 /*
657  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658  * dirty.
659  *
660  * If warn is true, then emit a warning if the page is not uptodate and has
661  * not been truncated.
662  */
663 static void __set_page_dirty(struct page *page,
664 		struct address_space *mapping, int warn)
665 {
666 	spin_lock_irq(&mapping->tree_lock);
667 	if (page->mapping) {	/* Race with truncate? */
668 		WARN_ON_ONCE(warn && !PageUptodate(page));
669 		account_page_dirtied(page, mapping);
670 		radix_tree_tag_set(&mapping->page_tree,
671 				page_index(page), PAGECACHE_TAG_DIRTY);
672 	}
673 	spin_unlock_irq(&mapping->tree_lock);
674 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 }
676 
677 /*
678  * Add a page to the dirty page list.
679  *
680  * It is a sad fact of life that this function is called from several places
681  * deeply under spinlocking.  It may not sleep.
682  *
683  * If the page has buffers, the uptodate buffers are set dirty, to preserve
684  * dirty-state coherency between the page and the buffers.  It the page does
685  * not have buffers then when they are later attached they will all be set
686  * dirty.
687  *
688  * The buffers are dirtied before the page is dirtied.  There's a small race
689  * window in which a writepage caller may see the page cleanness but not the
690  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
691  * before the buffers, a concurrent writepage caller could clear the page dirty
692  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693  * page on the dirty page list.
694  *
695  * We use private_lock to lock against try_to_free_buffers while using the
696  * page's buffer list.  Also use this to protect against clean buffers being
697  * added to the page after it was set dirty.
698  *
699  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
700  * address_space though.
701  */
702 int __set_page_dirty_buffers(struct page *page)
703 {
704 	int newly_dirty;
705 	struct address_space *mapping = page_mapping(page);
706 
707 	if (unlikely(!mapping))
708 		return !TestSetPageDirty(page);
709 
710 	spin_lock(&mapping->private_lock);
711 	if (page_has_buffers(page)) {
712 		struct buffer_head *head = page_buffers(page);
713 		struct buffer_head *bh = head;
714 
715 		do {
716 			set_buffer_dirty(bh);
717 			bh = bh->b_this_page;
718 		} while (bh != head);
719 	}
720 	newly_dirty = !TestSetPageDirty(page);
721 	spin_unlock(&mapping->private_lock);
722 
723 	if (newly_dirty)
724 		__set_page_dirty(page, mapping, 1);
725 	return newly_dirty;
726 }
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
728 
729 /*
730  * Write out and wait upon a list of buffers.
731  *
732  * We have conflicting pressures: we want to make sure that all
733  * initially dirty buffers get waited on, but that any subsequently
734  * dirtied buffers don't.  After all, we don't want fsync to last
735  * forever if somebody is actively writing to the file.
736  *
737  * Do this in two main stages: first we copy dirty buffers to a
738  * temporary inode list, queueing the writes as we go.  Then we clean
739  * up, waiting for those writes to complete.
740  *
741  * During this second stage, any subsequent updates to the file may end
742  * up refiling the buffer on the original inode's dirty list again, so
743  * there is a chance we will end up with a buffer queued for write but
744  * not yet completed on that list.  So, as a final cleanup we go through
745  * the osync code to catch these locked, dirty buffers without requeuing
746  * any newly dirty buffers for write.
747  */
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749 {
750 	struct buffer_head *bh;
751 	struct list_head tmp;
752 	struct address_space *mapping, *prev_mapping = NULL;
753 	int err = 0, err2;
754 
755 	INIT_LIST_HEAD(&tmp);
756 
757 	spin_lock(lock);
758 	while (!list_empty(list)) {
759 		bh = BH_ENTRY(list->next);
760 		mapping = bh->b_assoc_map;
761 		__remove_assoc_queue(bh);
762 		/* Avoid race with mark_buffer_dirty_inode() which does
763 		 * a lockless check and we rely on seeing the dirty bit */
764 		smp_mb();
765 		if (buffer_dirty(bh) || buffer_locked(bh)) {
766 			list_add(&bh->b_assoc_buffers, &tmp);
767 			bh->b_assoc_map = mapping;
768 			if (buffer_dirty(bh)) {
769 				get_bh(bh);
770 				spin_unlock(lock);
771 				/*
772 				 * Ensure any pending I/O completes so that
773 				 * ll_rw_block() actually writes the current
774 				 * contents - it is a noop if I/O is still in
775 				 * flight on potentially older contents.
776 				 */
777 				ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
778 
779 				/*
780 				 * Kick off IO for the previous mapping. Note
781 				 * that we will not run the very last mapping,
782 				 * wait_on_buffer() will do that for us
783 				 * through sync_buffer().
784 				 */
785 				if (prev_mapping && prev_mapping != mapping)
786 					blk_run_address_space(prev_mapping);
787 				prev_mapping = mapping;
788 
789 				brelse(bh);
790 				spin_lock(lock);
791 			}
792 		}
793 	}
794 
795 	while (!list_empty(&tmp)) {
796 		bh = BH_ENTRY(tmp.prev);
797 		get_bh(bh);
798 		mapping = bh->b_assoc_map;
799 		__remove_assoc_queue(bh);
800 		/* Avoid race with mark_buffer_dirty_inode() which does
801 		 * a lockless check and we rely on seeing the dirty bit */
802 		smp_mb();
803 		if (buffer_dirty(bh)) {
804 			list_add(&bh->b_assoc_buffers,
805 				 &mapping->private_list);
806 			bh->b_assoc_map = mapping;
807 		}
808 		spin_unlock(lock);
809 		wait_on_buffer(bh);
810 		if (!buffer_uptodate(bh))
811 			err = -EIO;
812 		brelse(bh);
813 		spin_lock(lock);
814 	}
815 
816 	spin_unlock(lock);
817 	err2 = osync_buffers_list(lock, list);
818 	if (err)
819 		return err;
820 	else
821 		return err2;
822 }
823 
824 /*
825  * Invalidate any and all dirty buffers on a given inode.  We are
826  * probably unmounting the fs, but that doesn't mean we have already
827  * done a sync().  Just drop the buffers from the inode list.
828  *
829  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
830  * assumes that all the buffers are against the blockdev.  Not true
831  * for reiserfs.
832  */
833 void invalidate_inode_buffers(struct inode *inode)
834 {
835 	if (inode_has_buffers(inode)) {
836 		struct address_space *mapping = &inode->i_data;
837 		struct list_head *list = &mapping->private_list;
838 		struct address_space *buffer_mapping = mapping->assoc_mapping;
839 
840 		spin_lock(&buffer_mapping->private_lock);
841 		while (!list_empty(list))
842 			__remove_assoc_queue(BH_ENTRY(list->next));
843 		spin_unlock(&buffer_mapping->private_lock);
844 	}
845 }
846 EXPORT_SYMBOL(invalidate_inode_buffers);
847 
848 /*
849  * Remove any clean buffers from the inode's buffer list.  This is called
850  * when we're trying to free the inode itself.  Those buffers can pin it.
851  *
852  * Returns true if all buffers were removed.
853  */
854 int remove_inode_buffers(struct inode *inode)
855 {
856 	int ret = 1;
857 
858 	if (inode_has_buffers(inode)) {
859 		struct address_space *mapping = &inode->i_data;
860 		struct list_head *list = &mapping->private_list;
861 		struct address_space *buffer_mapping = mapping->assoc_mapping;
862 
863 		spin_lock(&buffer_mapping->private_lock);
864 		while (!list_empty(list)) {
865 			struct buffer_head *bh = BH_ENTRY(list->next);
866 			if (buffer_dirty(bh)) {
867 				ret = 0;
868 				break;
869 			}
870 			__remove_assoc_queue(bh);
871 		}
872 		spin_unlock(&buffer_mapping->private_lock);
873 	}
874 	return ret;
875 }
876 
877 /*
878  * Create the appropriate buffers when given a page for data area and
879  * the size of each buffer.. Use the bh->b_this_page linked list to
880  * follow the buffers created.  Return NULL if unable to create more
881  * buffers.
882  *
883  * The retry flag is used to differentiate async IO (paging, swapping)
884  * which may not fail from ordinary buffer allocations.
885  */
886 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
887 		int retry)
888 {
889 	struct buffer_head *bh, *head;
890 	long offset;
891 
892 try_again:
893 	head = NULL;
894 	offset = PAGE_SIZE;
895 	while ((offset -= size) >= 0) {
896 		bh = alloc_buffer_head(GFP_NOFS);
897 		if (!bh)
898 			goto no_grow;
899 
900 		bh->b_bdev = NULL;
901 		bh->b_this_page = head;
902 		bh->b_blocknr = -1;
903 		head = bh;
904 
905 		bh->b_state = 0;
906 		atomic_set(&bh->b_count, 0);
907 		bh->b_private = NULL;
908 		bh->b_size = size;
909 
910 		/* Link the buffer to its page */
911 		set_bh_page(bh, page, offset);
912 
913 		init_buffer(bh, NULL, NULL);
914 	}
915 	return head;
916 /*
917  * In case anything failed, we just free everything we got.
918  */
919 no_grow:
920 	if (head) {
921 		do {
922 			bh = head;
923 			head = head->b_this_page;
924 			free_buffer_head(bh);
925 		} while (head);
926 	}
927 
928 	/*
929 	 * Return failure for non-async IO requests.  Async IO requests
930 	 * are not allowed to fail, so we have to wait until buffer heads
931 	 * become available.  But we don't want tasks sleeping with
932 	 * partially complete buffers, so all were released above.
933 	 */
934 	if (!retry)
935 		return NULL;
936 
937 	/* We're _really_ low on memory. Now we just
938 	 * wait for old buffer heads to become free due to
939 	 * finishing IO.  Since this is an async request and
940 	 * the reserve list is empty, we're sure there are
941 	 * async buffer heads in use.
942 	 */
943 	free_more_memory();
944 	goto try_again;
945 }
946 EXPORT_SYMBOL_GPL(alloc_page_buffers);
947 
948 static inline void
949 link_dev_buffers(struct page *page, struct buffer_head *head)
950 {
951 	struct buffer_head *bh, *tail;
952 
953 	bh = head;
954 	do {
955 		tail = bh;
956 		bh = bh->b_this_page;
957 	} while (bh);
958 	tail->b_this_page = head;
959 	attach_page_buffers(page, head);
960 }
961 
962 /*
963  * Initialise the state of a blockdev page's buffers.
964  */
965 static void
966 init_page_buffers(struct page *page, struct block_device *bdev,
967 			sector_t block, int size)
968 {
969 	struct buffer_head *head = page_buffers(page);
970 	struct buffer_head *bh = head;
971 	int uptodate = PageUptodate(page);
972 
973 	do {
974 		if (!buffer_mapped(bh)) {
975 			init_buffer(bh, NULL, NULL);
976 			bh->b_bdev = bdev;
977 			bh->b_blocknr = block;
978 			if (uptodate)
979 				set_buffer_uptodate(bh);
980 			set_buffer_mapped(bh);
981 		}
982 		block++;
983 		bh = bh->b_this_page;
984 	} while (bh != head);
985 }
986 
987 /*
988  * Create the page-cache page that contains the requested block.
989  *
990  * This is user purely for blockdev mappings.
991  */
992 static struct page *
993 grow_dev_page(struct block_device *bdev, sector_t block,
994 		pgoff_t index, int size)
995 {
996 	struct inode *inode = bdev->bd_inode;
997 	struct page *page;
998 	struct buffer_head *bh;
999 
1000 	page = find_or_create_page(inode->i_mapping, index,
1001 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1002 	if (!page)
1003 		return NULL;
1004 
1005 	BUG_ON(!PageLocked(page));
1006 
1007 	if (page_has_buffers(page)) {
1008 		bh = page_buffers(page);
1009 		if (bh->b_size == size) {
1010 			init_page_buffers(page, bdev, block, size);
1011 			return page;
1012 		}
1013 		if (!try_to_free_buffers(page))
1014 			goto failed;
1015 	}
1016 
1017 	/*
1018 	 * Allocate some buffers for this page
1019 	 */
1020 	bh = alloc_page_buffers(page, size, 0);
1021 	if (!bh)
1022 		goto failed;
1023 
1024 	/*
1025 	 * Link the page to the buffers and initialise them.  Take the
1026 	 * lock to be atomic wrt __find_get_block(), which does not
1027 	 * run under the page lock.
1028 	 */
1029 	spin_lock(&inode->i_mapping->private_lock);
1030 	link_dev_buffers(page, bh);
1031 	init_page_buffers(page, bdev, block, size);
1032 	spin_unlock(&inode->i_mapping->private_lock);
1033 	return page;
1034 
1035 failed:
1036 	BUG();
1037 	unlock_page(page);
1038 	page_cache_release(page);
1039 	return NULL;
1040 }
1041 
1042 /*
1043  * Create buffers for the specified block device block's page.  If
1044  * that page was dirty, the buffers are set dirty also.
1045  */
1046 static int
1047 grow_buffers(struct block_device *bdev, sector_t block, int size)
1048 {
1049 	struct page *page;
1050 	pgoff_t index;
1051 	int sizebits;
1052 
1053 	sizebits = -1;
1054 	do {
1055 		sizebits++;
1056 	} while ((size << sizebits) < PAGE_SIZE);
1057 
1058 	index = block >> sizebits;
1059 
1060 	/*
1061 	 * Check for a block which wants to lie outside our maximum possible
1062 	 * pagecache index.  (this comparison is done using sector_t types).
1063 	 */
1064 	if (unlikely(index != block >> sizebits)) {
1065 		char b[BDEVNAME_SIZE];
1066 
1067 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1068 			"device %s\n",
1069 			__func__, (unsigned long long)block,
1070 			bdevname(bdev, b));
1071 		return -EIO;
1072 	}
1073 	block = index << sizebits;
1074 	/* Create a page with the proper size buffers.. */
1075 	page = grow_dev_page(bdev, block, index, size);
1076 	if (!page)
1077 		return 0;
1078 	unlock_page(page);
1079 	page_cache_release(page);
1080 	return 1;
1081 }
1082 
1083 static struct buffer_head *
1084 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1085 {
1086 	/* Size must be multiple of hard sectorsize */
1087 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1088 			(size < 512 || size > PAGE_SIZE))) {
1089 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1090 					size);
1091 		printk(KERN_ERR "logical block size: %d\n",
1092 					bdev_logical_block_size(bdev));
1093 
1094 		dump_stack();
1095 		return NULL;
1096 	}
1097 
1098 	for (;;) {
1099 		struct buffer_head * bh;
1100 		int ret;
1101 
1102 		bh = __find_get_block(bdev, block, size);
1103 		if (bh)
1104 			return bh;
1105 
1106 		ret = grow_buffers(bdev, block, size);
1107 		if (ret < 0)
1108 			return NULL;
1109 		if (ret == 0)
1110 			free_more_memory();
1111 	}
1112 }
1113 
1114 /*
1115  * The relationship between dirty buffers and dirty pages:
1116  *
1117  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1118  * the page is tagged dirty in its radix tree.
1119  *
1120  * At all times, the dirtiness of the buffers represents the dirtiness of
1121  * subsections of the page.  If the page has buffers, the page dirty bit is
1122  * merely a hint about the true dirty state.
1123  *
1124  * When a page is set dirty in its entirety, all its buffers are marked dirty
1125  * (if the page has buffers).
1126  *
1127  * When a buffer is marked dirty, its page is dirtied, but the page's other
1128  * buffers are not.
1129  *
1130  * Also.  When blockdev buffers are explicitly read with bread(), they
1131  * individually become uptodate.  But their backing page remains not
1132  * uptodate - even if all of its buffers are uptodate.  A subsequent
1133  * block_read_full_page() against that page will discover all the uptodate
1134  * buffers, will set the page uptodate and will perform no I/O.
1135  */
1136 
1137 /**
1138  * mark_buffer_dirty - mark a buffer_head as needing writeout
1139  * @bh: the buffer_head to mark dirty
1140  *
1141  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1142  * backing page dirty, then tag the page as dirty in its address_space's radix
1143  * tree and then attach the address_space's inode to its superblock's dirty
1144  * inode list.
1145  *
1146  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1147  * mapping->tree_lock and the global inode_lock.
1148  */
1149 void mark_buffer_dirty(struct buffer_head *bh)
1150 {
1151 	WARN_ON_ONCE(!buffer_uptodate(bh));
1152 
1153 	/*
1154 	 * Very *carefully* optimize the it-is-already-dirty case.
1155 	 *
1156 	 * Don't let the final "is it dirty" escape to before we
1157 	 * perhaps modified the buffer.
1158 	 */
1159 	if (buffer_dirty(bh)) {
1160 		smp_mb();
1161 		if (buffer_dirty(bh))
1162 			return;
1163 	}
1164 
1165 	if (!test_set_buffer_dirty(bh)) {
1166 		struct page *page = bh->b_page;
1167 		if (!TestSetPageDirty(page)) {
1168 			struct address_space *mapping = page_mapping(page);
1169 			if (mapping)
1170 				__set_page_dirty(page, mapping, 0);
1171 		}
1172 	}
1173 }
1174 EXPORT_SYMBOL(mark_buffer_dirty);
1175 
1176 /*
1177  * Decrement a buffer_head's reference count.  If all buffers against a page
1178  * have zero reference count, are clean and unlocked, and if the page is clean
1179  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1180  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1181  * a page but it ends up not being freed, and buffers may later be reattached).
1182  */
1183 void __brelse(struct buffer_head * buf)
1184 {
1185 	if (atomic_read(&buf->b_count)) {
1186 		put_bh(buf);
1187 		return;
1188 	}
1189 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1190 }
1191 EXPORT_SYMBOL(__brelse);
1192 
1193 /*
1194  * bforget() is like brelse(), except it discards any
1195  * potentially dirty data.
1196  */
1197 void __bforget(struct buffer_head *bh)
1198 {
1199 	clear_buffer_dirty(bh);
1200 	if (bh->b_assoc_map) {
1201 		struct address_space *buffer_mapping = bh->b_page->mapping;
1202 
1203 		spin_lock(&buffer_mapping->private_lock);
1204 		list_del_init(&bh->b_assoc_buffers);
1205 		bh->b_assoc_map = NULL;
1206 		spin_unlock(&buffer_mapping->private_lock);
1207 	}
1208 	__brelse(bh);
1209 }
1210 EXPORT_SYMBOL(__bforget);
1211 
1212 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1213 {
1214 	lock_buffer(bh);
1215 	if (buffer_uptodate(bh)) {
1216 		unlock_buffer(bh);
1217 		return bh;
1218 	} else {
1219 		get_bh(bh);
1220 		bh->b_end_io = end_buffer_read_sync;
1221 		submit_bh(READ, bh);
1222 		wait_on_buffer(bh);
1223 		if (buffer_uptodate(bh))
1224 			return bh;
1225 	}
1226 	brelse(bh);
1227 	return NULL;
1228 }
1229 
1230 /*
1231  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1232  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1233  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1234  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1235  * CPU's LRUs at the same time.
1236  *
1237  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1238  * sb_find_get_block().
1239  *
1240  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1241  * a local interrupt disable for that.
1242  */
1243 
1244 #define BH_LRU_SIZE	8
1245 
1246 struct bh_lru {
1247 	struct buffer_head *bhs[BH_LRU_SIZE];
1248 };
1249 
1250 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1251 
1252 #ifdef CONFIG_SMP
1253 #define bh_lru_lock()	local_irq_disable()
1254 #define bh_lru_unlock()	local_irq_enable()
1255 #else
1256 #define bh_lru_lock()	preempt_disable()
1257 #define bh_lru_unlock()	preempt_enable()
1258 #endif
1259 
1260 static inline void check_irqs_on(void)
1261 {
1262 #ifdef irqs_disabled
1263 	BUG_ON(irqs_disabled());
1264 #endif
1265 }
1266 
1267 /*
1268  * The LRU management algorithm is dopey-but-simple.  Sorry.
1269  */
1270 static void bh_lru_install(struct buffer_head *bh)
1271 {
1272 	struct buffer_head *evictee = NULL;
1273 	struct bh_lru *lru;
1274 
1275 	check_irqs_on();
1276 	bh_lru_lock();
1277 	lru = &__get_cpu_var(bh_lrus);
1278 	if (lru->bhs[0] != bh) {
1279 		struct buffer_head *bhs[BH_LRU_SIZE];
1280 		int in;
1281 		int out = 0;
1282 
1283 		get_bh(bh);
1284 		bhs[out++] = bh;
1285 		for (in = 0; in < BH_LRU_SIZE; in++) {
1286 			struct buffer_head *bh2 = lru->bhs[in];
1287 
1288 			if (bh2 == bh) {
1289 				__brelse(bh2);
1290 			} else {
1291 				if (out >= BH_LRU_SIZE) {
1292 					BUG_ON(evictee != NULL);
1293 					evictee = bh2;
1294 				} else {
1295 					bhs[out++] = bh2;
1296 				}
1297 			}
1298 		}
1299 		while (out < BH_LRU_SIZE)
1300 			bhs[out++] = NULL;
1301 		memcpy(lru->bhs, bhs, sizeof(bhs));
1302 	}
1303 	bh_lru_unlock();
1304 
1305 	if (evictee)
1306 		__brelse(evictee);
1307 }
1308 
1309 /*
1310  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1311  */
1312 static struct buffer_head *
1313 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1314 {
1315 	struct buffer_head *ret = NULL;
1316 	struct bh_lru *lru;
1317 	unsigned int i;
1318 
1319 	check_irqs_on();
1320 	bh_lru_lock();
1321 	lru = &__get_cpu_var(bh_lrus);
1322 	for (i = 0; i < BH_LRU_SIZE; i++) {
1323 		struct buffer_head *bh = lru->bhs[i];
1324 
1325 		if (bh && bh->b_bdev == bdev &&
1326 				bh->b_blocknr == block && bh->b_size == size) {
1327 			if (i) {
1328 				while (i) {
1329 					lru->bhs[i] = lru->bhs[i - 1];
1330 					i--;
1331 				}
1332 				lru->bhs[0] = bh;
1333 			}
1334 			get_bh(bh);
1335 			ret = bh;
1336 			break;
1337 		}
1338 	}
1339 	bh_lru_unlock();
1340 	return ret;
1341 }
1342 
1343 /*
1344  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1345  * it in the LRU and mark it as accessed.  If it is not present then return
1346  * NULL
1347  */
1348 struct buffer_head *
1349 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1350 {
1351 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1352 
1353 	if (bh == NULL) {
1354 		bh = __find_get_block_slow(bdev, block);
1355 		if (bh)
1356 			bh_lru_install(bh);
1357 	}
1358 	if (bh)
1359 		touch_buffer(bh);
1360 	return bh;
1361 }
1362 EXPORT_SYMBOL(__find_get_block);
1363 
1364 /*
1365  * __getblk will locate (and, if necessary, create) the buffer_head
1366  * which corresponds to the passed block_device, block and size. The
1367  * returned buffer has its reference count incremented.
1368  *
1369  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1370  * illegal block number, __getblk() will happily return a buffer_head
1371  * which represents the non-existent block.  Very weird.
1372  *
1373  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1374  * attempt is failing.  FIXME, perhaps?
1375  */
1376 struct buffer_head *
1377 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1378 {
1379 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1380 
1381 	might_sleep();
1382 	if (bh == NULL)
1383 		bh = __getblk_slow(bdev, block, size);
1384 	return bh;
1385 }
1386 EXPORT_SYMBOL(__getblk);
1387 
1388 /*
1389  * Do async read-ahead on a buffer..
1390  */
1391 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1392 {
1393 	struct buffer_head *bh = __getblk(bdev, block, size);
1394 	if (likely(bh)) {
1395 		ll_rw_block(READA, 1, &bh);
1396 		brelse(bh);
1397 	}
1398 }
1399 EXPORT_SYMBOL(__breadahead);
1400 
1401 /**
1402  *  __bread() - reads a specified block and returns the bh
1403  *  @bdev: the block_device to read from
1404  *  @block: number of block
1405  *  @size: size (in bytes) to read
1406  *
1407  *  Reads a specified block, and returns buffer head that contains it.
1408  *  It returns NULL if the block was unreadable.
1409  */
1410 struct buffer_head *
1411 __bread(struct block_device *bdev, sector_t block, unsigned size)
1412 {
1413 	struct buffer_head *bh = __getblk(bdev, block, size);
1414 
1415 	if (likely(bh) && !buffer_uptodate(bh))
1416 		bh = __bread_slow(bh);
1417 	return bh;
1418 }
1419 EXPORT_SYMBOL(__bread);
1420 
1421 /*
1422  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1423  * This doesn't race because it runs in each cpu either in irq
1424  * or with preempt disabled.
1425  */
1426 static void invalidate_bh_lru(void *arg)
1427 {
1428 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1429 	int i;
1430 
1431 	for (i = 0; i < BH_LRU_SIZE; i++) {
1432 		brelse(b->bhs[i]);
1433 		b->bhs[i] = NULL;
1434 	}
1435 	put_cpu_var(bh_lrus);
1436 }
1437 
1438 void invalidate_bh_lrus(void)
1439 {
1440 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1441 }
1442 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1443 
1444 void set_bh_page(struct buffer_head *bh,
1445 		struct page *page, unsigned long offset)
1446 {
1447 	bh->b_page = page;
1448 	BUG_ON(offset >= PAGE_SIZE);
1449 	if (PageHighMem(page))
1450 		/*
1451 		 * This catches illegal uses and preserves the offset:
1452 		 */
1453 		bh->b_data = (char *)(0 + offset);
1454 	else
1455 		bh->b_data = page_address(page) + offset;
1456 }
1457 EXPORT_SYMBOL(set_bh_page);
1458 
1459 /*
1460  * Called when truncating a buffer on a page completely.
1461  */
1462 static void discard_buffer(struct buffer_head * bh)
1463 {
1464 	lock_buffer(bh);
1465 	clear_buffer_dirty(bh);
1466 	bh->b_bdev = NULL;
1467 	clear_buffer_mapped(bh);
1468 	clear_buffer_req(bh);
1469 	clear_buffer_new(bh);
1470 	clear_buffer_delay(bh);
1471 	clear_buffer_unwritten(bh);
1472 	unlock_buffer(bh);
1473 }
1474 
1475 /**
1476  * block_invalidatepage - invalidate part of all of a buffer-backed page
1477  *
1478  * @page: the page which is affected
1479  * @offset: the index of the truncation point
1480  *
1481  * block_invalidatepage() is called when all or part of the page has become
1482  * invalidatedby a truncate operation.
1483  *
1484  * block_invalidatepage() does not have to release all buffers, but it must
1485  * ensure that no dirty buffer is left outside @offset and that no I/O
1486  * is underway against any of the blocks which are outside the truncation
1487  * point.  Because the caller is about to free (and possibly reuse) those
1488  * blocks on-disk.
1489  */
1490 void block_invalidatepage(struct page *page, unsigned long offset)
1491 {
1492 	struct buffer_head *head, *bh, *next;
1493 	unsigned int curr_off = 0;
1494 
1495 	BUG_ON(!PageLocked(page));
1496 	if (!page_has_buffers(page))
1497 		goto out;
1498 
1499 	head = page_buffers(page);
1500 	bh = head;
1501 	do {
1502 		unsigned int next_off = curr_off + bh->b_size;
1503 		next = bh->b_this_page;
1504 
1505 		/*
1506 		 * is this block fully invalidated?
1507 		 */
1508 		if (offset <= curr_off)
1509 			discard_buffer(bh);
1510 		curr_off = next_off;
1511 		bh = next;
1512 	} while (bh != head);
1513 
1514 	/*
1515 	 * We release buffers only if the entire page is being invalidated.
1516 	 * The get_block cached value has been unconditionally invalidated,
1517 	 * so real IO is not possible anymore.
1518 	 */
1519 	if (offset == 0)
1520 		try_to_release_page(page, 0);
1521 out:
1522 	return;
1523 }
1524 EXPORT_SYMBOL(block_invalidatepage);
1525 
1526 /*
1527  * We attach and possibly dirty the buffers atomically wrt
1528  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1529  * is already excluded via the page lock.
1530  */
1531 void create_empty_buffers(struct page *page,
1532 			unsigned long blocksize, unsigned long b_state)
1533 {
1534 	struct buffer_head *bh, *head, *tail;
1535 
1536 	head = alloc_page_buffers(page, blocksize, 1);
1537 	bh = head;
1538 	do {
1539 		bh->b_state |= b_state;
1540 		tail = bh;
1541 		bh = bh->b_this_page;
1542 	} while (bh);
1543 	tail->b_this_page = head;
1544 
1545 	spin_lock(&page->mapping->private_lock);
1546 	if (PageUptodate(page) || PageDirty(page)) {
1547 		bh = head;
1548 		do {
1549 			if (PageDirty(page))
1550 				set_buffer_dirty(bh);
1551 			if (PageUptodate(page))
1552 				set_buffer_uptodate(bh);
1553 			bh = bh->b_this_page;
1554 		} while (bh != head);
1555 	}
1556 	attach_page_buffers(page, head);
1557 	spin_unlock(&page->mapping->private_lock);
1558 }
1559 EXPORT_SYMBOL(create_empty_buffers);
1560 
1561 /*
1562  * We are taking a block for data and we don't want any output from any
1563  * buffer-cache aliases starting from return from that function and
1564  * until the moment when something will explicitly mark the buffer
1565  * dirty (hopefully that will not happen until we will free that block ;-)
1566  * We don't even need to mark it not-uptodate - nobody can expect
1567  * anything from a newly allocated buffer anyway. We used to used
1568  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1569  * don't want to mark the alias unmapped, for example - it would confuse
1570  * anyone who might pick it with bread() afterwards...
1571  *
1572  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1573  * be writeout I/O going on against recently-freed buffers.  We don't
1574  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1575  * only if we really need to.  That happens here.
1576  */
1577 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1578 {
1579 	struct buffer_head *old_bh;
1580 
1581 	might_sleep();
1582 
1583 	old_bh = __find_get_block_slow(bdev, block);
1584 	if (old_bh) {
1585 		clear_buffer_dirty(old_bh);
1586 		wait_on_buffer(old_bh);
1587 		clear_buffer_req(old_bh);
1588 		__brelse(old_bh);
1589 	}
1590 }
1591 EXPORT_SYMBOL(unmap_underlying_metadata);
1592 
1593 /*
1594  * NOTE! All mapped/uptodate combinations are valid:
1595  *
1596  *	Mapped	Uptodate	Meaning
1597  *
1598  *	No	No		"unknown" - must do get_block()
1599  *	No	Yes		"hole" - zero-filled
1600  *	Yes	No		"allocated" - allocated on disk, not read in
1601  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1602  *
1603  * "Dirty" is valid only with the last case (mapped+uptodate).
1604  */
1605 
1606 /*
1607  * While block_write_full_page is writing back the dirty buffers under
1608  * the page lock, whoever dirtied the buffers may decide to clean them
1609  * again at any time.  We handle that by only looking at the buffer
1610  * state inside lock_buffer().
1611  *
1612  * If block_write_full_page() is called for regular writeback
1613  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1614  * locked buffer.   This only can happen if someone has written the buffer
1615  * directly, with submit_bh().  At the address_space level PageWriteback
1616  * prevents this contention from occurring.
1617  *
1618  * If block_write_full_page() is called with wbc->sync_mode ==
1619  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1620  * causes the writes to be flagged as synchronous writes, but the
1621  * block device queue will NOT be unplugged, since usually many pages
1622  * will be pushed to the out before the higher-level caller actually
1623  * waits for the writes to be completed.  The various wait functions,
1624  * such as wait_on_writeback_range() will ultimately call sync_page()
1625  * which will ultimately call blk_run_backing_dev(), which will end up
1626  * unplugging the device queue.
1627  */
1628 static int __block_write_full_page(struct inode *inode, struct page *page,
1629 			get_block_t *get_block, struct writeback_control *wbc,
1630 			bh_end_io_t *handler)
1631 {
1632 	int err;
1633 	sector_t block;
1634 	sector_t last_block;
1635 	struct buffer_head *bh, *head;
1636 	const unsigned blocksize = 1 << inode->i_blkbits;
1637 	int nr_underway = 0;
1638 	int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1639 			WRITE_SYNC_PLUG : WRITE);
1640 
1641 	BUG_ON(!PageLocked(page));
1642 
1643 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1644 
1645 	if (!page_has_buffers(page)) {
1646 		create_empty_buffers(page, blocksize,
1647 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1648 	}
1649 
1650 	/*
1651 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1652 	 * here, and the (potentially unmapped) buffers may become dirty at
1653 	 * any time.  If a buffer becomes dirty here after we've inspected it
1654 	 * then we just miss that fact, and the page stays dirty.
1655 	 *
1656 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1657 	 * handle that here by just cleaning them.
1658 	 */
1659 
1660 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1661 	head = page_buffers(page);
1662 	bh = head;
1663 
1664 	/*
1665 	 * Get all the dirty buffers mapped to disk addresses and
1666 	 * handle any aliases from the underlying blockdev's mapping.
1667 	 */
1668 	do {
1669 		if (block > last_block) {
1670 			/*
1671 			 * mapped buffers outside i_size will occur, because
1672 			 * this page can be outside i_size when there is a
1673 			 * truncate in progress.
1674 			 */
1675 			/*
1676 			 * The buffer was zeroed by block_write_full_page()
1677 			 */
1678 			clear_buffer_dirty(bh);
1679 			set_buffer_uptodate(bh);
1680 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1681 			   buffer_dirty(bh)) {
1682 			WARN_ON(bh->b_size != blocksize);
1683 			err = get_block(inode, block, bh, 1);
1684 			if (err)
1685 				goto recover;
1686 			clear_buffer_delay(bh);
1687 			if (buffer_new(bh)) {
1688 				/* blockdev mappings never come here */
1689 				clear_buffer_new(bh);
1690 				unmap_underlying_metadata(bh->b_bdev,
1691 							bh->b_blocknr);
1692 			}
1693 		}
1694 		bh = bh->b_this_page;
1695 		block++;
1696 	} while (bh != head);
1697 
1698 	do {
1699 		if (!buffer_mapped(bh))
1700 			continue;
1701 		/*
1702 		 * If it's a fully non-blocking write attempt and we cannot
1703 		 * lock the buffer then redirty the page.  Note that this can
1704 		 * potentially cause a busy-wait loop from writeback threads
1705 		 * and kswapd activity, but those code paths have their own
1706 		 * higher-level throttling.
1707 		 */
1708 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1709 			lock_buffer(bh);
1710 		} else if (!trylock_buffer(bh)) {
1711 			redirty_page_for_writepage(wbc, page);
1712 			continue;
1713 		}
1714 		if (test_clear_buffer_dirty(bh)) {
1715 			mark_buffer_async_write_endio(bh, handler);
1716 		} else {
1717 			unlock_buffer(bh);
1718 		}
1719 	} while ((bh = bh->b_this_page) != head);
1720 
1721 	/*
1722 	 * The page and its buffers are protected by PageWriteback(), so we can
1723 	 * drop the bh refcounts early.
1724 	 */
1725 	BUG_ON(PageWriteback(page));
1726 	set_page_writeback(page);
1727 
1728 	do {
1729 		struct buffer_head *next = bh->b_this_page;
1730 		if (buffer_async_write(bh)) {
1731 			submit_bh(write_op, bh);
1732 			nr_underway++;
1733 		}
1734 		bh = next;
1735 	} while (bh != head);
1736 	unlock_page(page);
1737 
1738 	err = 0;
1739 done:
1740 	if (nr_underway == 0) {
1741 		/*
1742 		 * The page was marked dirty, but the buffers were
1743 		 * clean.  Someone wrote them back by hand with
1744 		 * ll_rw_block/submit_bh.  A rare case.
1745 		 */
1746 		end_page_writeback(page);
1747 
1748 		/*
1749 		 * The page and buffer_heads can be released at any time from
1750 		 * here on.
1751 		 */
1752 	}
1753 	return err;
1754 
1755 recover:
1756 	/*
1757 	 * ENOSPC, or some other error.  We may already have added some
1758 	 * blocks to the file, so we need to write these out to avoid
1759 	 * exposing stale data.
1760 	 * The page is currently locked and not marked for writeback
1761 	 */
1762 	bh = head;
1763 	/* Recovery: lock and submit the mapped buffers */
1764 	do {
1765 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1766 		    !buffer_delay(bh)) {
1767 			lock_buffer(bh);
1768 			mark_buffer_async_write_endio(bh, handler);
1769 		} else {
1770 			/*
1771 			 * The buffer may have been set dirty during
1772 			 * attachment to a dirty page.
1773 			 */
1774 			clear_buffer_dirty(bh);
1775 		}
1776 	} while ((bh = bh->b_this_page) != head);
1777 	SetPageError(page);
1778 	BUG_ON(PageWriteback(page));
1779 	mapping_set_error(page->mapping, err);
1780 	set_page_writeback(page);
1781 	do {
1782 		struct buffer_head *next = bh->b_this_page;
1783 		if (buffer_async_write(bh)) {
1784 			clear_buffer_dirty(bh);
1785 			submit_bh(write_op, bh);
1786 			nr_underway++;
1787 		}
1788 		bh = next;
1789 	} while (bh != head);
1790 	unlock_page(page);
1791 	goto done;
1792 }
1793 
1794 /*
1795  * If a page has any new buffers, zero them out here, and mark them uptodate
1796  * and dirty so they'll be written out (in order to prevent uninitialised
1797  * block data from leaking). And clear the new bit.
1798  */
1799 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1800 {
1801 	unsigned int block_start, block_end;
1802 	struct buffer_head *head, *bh;
1803 
1804 	BUG_ON(!PageLocked(page));
1805 	if (!page_has_buffers(page))
1806 		return;
1807 
1808 	bh = head = page_buffers(page);
1809 	block_start = 0;
1810 	do {
1811 		block_end = block_start + bh->b_size;
1812 
1813 		if (buffer_new(bh)) {
1814 			if (block_end > from && block_start < to) {
1815 				if (!PageUptodate(page)) {
1816 					unsigned start, size;
1817 
1818 					start = max(from, block_start);
1819 					size = min(to, block_end) - start;
1820 
1821 					zero_user(page, start, size);
1822 					set_buffer_uptodate(bh);
1823 				}
1824 
1825 				clear_buffer_new(bh);
1826 				mark_buffer_dirty(bh);
1827 			}
1828 		}
1829 
1830 		block_start = block_end;
1831 		bh = bh->b_this_page;
1832 	} while (bh != head);
1833 }
1834 EXPORT_SYMBOL(page_zero_new_buffers);
1835 
1836 static int __block_prepare_write(struct inode *inode, struct page *page,
1837 		unsigned from, unsigned to, get_block_t *get_block)
1838 {
1839 	unsigned block_start, block_end;
1840 	sector_t block;
1841 	int err = 0;
1842 	unsigned blocksize, bbits;
1843 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1844 
1845 	BUG_ON(!PageLocked(page));
1846 	BUG_ON(from > PAGE_CACHE_SIZE);
1847 	BUG_ON(to > PAGE_CACHE_SIZE);
1848 	BUG_ON(from > to);
1849 
1850 	blocksize = 1 << inode->i_blkbits;
1851 	if (!page_has_buffers(page))
1852 		create_empty_buffers(page, blocksize, 0);
1853 	head = page_buffers(page);
1854 
1855 	bbits = inode->i_blkbits;
1856 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1857 
1858 	for(bh = head, block_start = 0; bh != head || !block_start;
1859 	    block++, block_start=block_end, bh = bh->b_this_page) {
1860 		block_end = block_start + blocksize;
1861 		if (block_end <= from || block_start >= to) {
1862 			if (PageUptodate(page)) {
1863 				if (!buffer_uptodate(bh))
1864 					set_buffer_uptodate(bh);
1865 			}
1866 			continue;
1867 		}
1868 		if (buffer_new(bh))
1869 			clear_buffer_new(bh);
1870 		if (!buffer_mapped(bh)) {
1871 			WARN_ON(bh->b_size != blocksize);
1872 			err = get_block(inode, block, bh, 1);
1873 			if (err)
1874 				break;
1875 			if (buffer_new(bh)) {
1876 				unmap_underlying_metadata(bh->b_bdev,
1877 							bh->b_blocknr);
1878 				if (PageUptodate(page)) {
1879 					clear_buffer_new(bh);
1880 					set_buffer_uptodate(bh);
1881 					mark_buffer_dirty(bh);
1882 					continue;
1883 				}
1884 				if (block_end > to || block_start < from)
1885 					zero_user_segments(page,
1886 						to, block_end,
1887 						block_start, from);
1888 				continue;
1889 			}
1890 		}
1891 		if (PageUptodate(page)) {
1892 			if (!buffer_uptodate(bh))
1893 				set_buffer_uptodate(bh);
1894 			continue;
1895 		}
1896 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1897 		    !buffer_unwritten(bh) &&
1898 		     (block_start < from || block_end > to)) {
1899 			ll_rw_block(READ, 1, &bh);
1900 			*wait_bh++=bh;
1901 		}
1902 	}
1903 	/*
1904 	 * If we issued read requests - let them complete.
1905 	 */
1906 	while(wait_bh > wait) {
1907 		wait_on_buffer(*--wait_bh);
1908 		if (!buffer_uptodate(*wait_bh))
1909 			err = -EIO;
1910 	}
1911 	if (unlikely(err))
1912 		page_zero_new_buffers(page, from, to);
1913 	return err;
1914 }
1915 
1916 static int __block_commit_write(struct inode *inode, struct page *page,
1917 		unsigned from, unsigned to)
1918 {
1919 	unsigned block_start, block_end;
1920 	int partial = 0;
1921 	unsigned blocksize;
1922 	struct buffer_head *bh, *head;
1923 
1924 	blocksize = 1 << inode->i_blkbits;
1925 
1926 	for(bh = head = page_buffers(page), block_start = 0;
1927 	    bh != head || !block_start;
1928 	    block_start=block_end, bh = bh->b_this_page) {
1929 		block_end = block_start + blocksize;
1930 		if (block_end <= from || block_start >= to) {
1931 			if (!buffer_uptodate(bh))
1932 				partial = 1;
1933 		} else {
1934 			set_buffer_uptodate(bh);
1935 			mark_buffer_dirty(bh);
1936 		}
1937 		clear_buffer_new(bh);
1938 	}
1939 
1940 	/*
1941 	 * If this is a partial write which happened to make all buffers
1942 	 * uptodate then we can optimize away a bogus readpage() for
1943 	 * the next read(). Here we 'discover' whether the page went
1944 	 * uptodate as a result of this (potentially partial) write.
1945 	 */
1946 	if (!partial)
1947 		SetPageUptodate(page);
1948 	return 0;
1949 }
1950 
1951 /*
1952  * block_write_begin takes care of the basic task of block allocation and
1953  * bringing partial write blocks uptodate first.
1954  *
1955  * If *pagep is not NULL, then block_write_begin uses the locked page
1956  * at *pagep rather than allocating its own. In this case, the page will
1957  * not be unlocked or deallocated on failure.
1958  */
1959 int block_write_begin(struct file *file, struct address_space *mapping,
1960 			loff_t pos, unsigned len, unsigned flags,
1961 			struct page **pagep, void **fsdata,
1962 			get_block_t *get_block)
1963 {
1964 	struct inode *inode = mapping->host;
1965 	int status = 0;
1966 	struct page *page;
1967 	pgoff_t index;
1968 	unsigned start, end;
1969 	int ownpage = 0;
1970 
1971 	index = pos >> PAGE_CACHE_SHIFT;
1972 	start = pos & (PAGE_CACHE_SIZE - 1);
1973 	end = start + len;
1974 
1975 	page = *pagep;
1976 	if (page == NULL) {
1977 		ownpage = 1;
1978 		page = grab_cache_page_write_begin(mapping, index, flags);
1979 		if (!page) {
1980 			status = -ENOMEM;
1981 			goto out;
1982 		}
1983 		*pagep = page;
1984 	} else
1985 		BUG_ON(!PageLocked(page));
1986 
1987 	status = __block_prepare_write(inode, page, start, end, get_block);
1988 	if (unlikely(status)) {
1989 		ClearPageUptodate(page);
1990 
1991 		if (ownpage) {
1992 			unlock_page(page);
1993 			page_cache_release(page);
1994 			*pagep = NULL;
1995 
1996 			/*
1997 			 * prepare_write() may have instantiated a few blocks
1998 			 * outside i_size.  Trim these off again. Don't need
1999 			 * i_size_read because we hold i_mutex.
2000 			 */
2001 			if (pos + len > inode->i_size)
2002 				vmtruncate(inode, inode->i_size);
2003 		}
2004 	}
2005 
2006 out:
2007 	return status;
2008 }
2009 EXPORT_SYMBOL(block_write_begin);
2010 
2011 int block_write_end(struct file *file, struct address_space *mapping,
2012 			loff_t pos, unsigned len, unsigned copied,
2013 			struct page *page, void *fsdata)
2014 {
2015 	struct inode *inode = mapping->host;
2016 	unsigned start;
2017 
2018 	start = pos & (PAGE_CACHE_SIZE - 1);
2019 
2020 	if (unlikely(copied < len)) {
2021 		/*
2022 		 * The buffers that were written will now be uptodate, so we
2023 		 * don't have to worry about a readpage reading them and
2024 		 * overwriting a partial write. However if we have encountered
2025 		 * a short write and only partially written into a buffer, it
2026 		 * will not be marked uptodate, so a readpage might come in and
2027 		 * destroy our partial write.
2028 		 *
2029 		 * Do the simplest thing, and just treat any short write to a
2030 		 * non uptodate page as a zero-length write, and force the
2031 		 * caller to redo the whole thing.
2032 		 */
2033 		if (!PageUptodate(page))
2034 			copied = 0;
2035 
2036 		page_zero_new_buffers(page, start+copied, start+len);
2037 	}
2038 	flush_dcache_page(page);
2039 
2040 	/* This could be a short (even 0-length) commit */
2041 	__block_commit_write(inode, page, start, start+copied);
2042 
2043 	return copied;
2044 }
2045 EXPORT_SYMBOL(block_write_end);
2046 
2047 int generic_write_end(struct file *file, struct address_space *mapping,
2048 			loff_t pos, unsigned len, unsigned copied,
2049 			struct page *page, void *fsdata)
2050 {
2051 	struct inode *inode = mapping->host;
2052 	int i_size_changed = 0;
2053 
2054 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2055 
2056 	/*
2057 	 * No need to use i_size_read() here, the i_size
2058 	 * cannot change under us because we hold i_mutex.
2059 	 *
2060 	 * But it's important to update i_size while still holding page lock:
2061 	 * page writeout could otherwise come in and zero beyond i_size.
2062 	 */
2063 	if (pos+copied > inode->i_size) {
2064 		i_size_write(inode, pos+copied);
2065 		i_size_changed = 1;
2066 	}
2067 
2068 	unlock_page(page);
2069 	page_cache_release(page);
2070 
2071 	/*
2072 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2073 	 * makes the holding time of page lock longer. Second, it forces lock
2074 	 * ordering of page lock and transaction start for journaling
2075 	 * filesystems.
2076 	 */
2077 	if (i_size_changed)
2078 		mark_inode_dirty(inode);
2079 
2080 	return copied;
2081 }
2082 EXPORT_SYMBOL(generic_write_end);
2083 
2084 /*
2085  * block_is_partially_uptodate checks whether buffers within a page are
2086  * uptodate or not.
2087  *
2088  * Returns true if all buffers which correspond to a file portion
2089  * we want to read are uptodate.
2090  */
2091 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2092 					unsigned long from)
2093 {
2094 	struct inode *inode = page->mapping->host;
2095 	unsigned block_start, block_end, blocksize;
2096 	unsigned to;
2097 	struct buffer_head *bh, *head;
2098 	int ret = 1;
2099 
2100 	if (!page_has_buffers(page))
2101 		return 0;
2102 
2103 	blocksize = 1 << inode->i_blkbits;
2104 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2105 	to = from + to;
2106 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2107 		return 0;
2108 
2109 	head = page_buffers(page);
2110 	bh = head;
2111 	block_start = 0;
2112 	do {
2113 		block_end = block_start + blocksize;
2114 		if (block_end > from && block_start < to) {
2115 			if (!buffer_uptodate(bh)) {
2116 				ret = 0;
2117 				break;
2118 			}
2119 			if (block_end >= to)
2120 				break;
2121 		}
2122 		block_start = block_end;
2123 		bh = bh->b_this_page;
2124 	} while (bh != head);
2125 
2126 	return ret;
2127 }
2128 EXPORT_SYMBOL(block_is_partially_uptodate);
2129 
2130 /*
2131  * Generic "read page" function for block devices that have the normal
2132  * get_block functionality. This is most of the block device filesystems.
2133  * Reads the page asynchronously --- the unlock_buffer() and
2134  * set/clear_buffer_uptodate() functions propagate buffer state into the
2135  * page struct once IO has completed.
2136  */
2137 int block_read_full_page(struct page *page, get_block_t *get_block)
2138 {
2139 	struct inode *inode = page->mapping->host;
2140 	sector_t iblock, lblock;
2141 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2142 	unsigned int blocksize;
2143 	int nr, i;
2144 	int fully_mapped = 1;
2145 
2146 	BUG_ON(!PageLocked(page));
2147 	blocksize = 1 << inode->i_blkbits;
2148 	if (!page_has_buffers(page))
2149 		create_empty_buffers(page, blocksize, 0);
2150 	head = page_buffers(page);
2151 
2152 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2153 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2154 	bh = head;
2155 	nr = 0;
2156 	i = 0;
2157 
2158 	do {
2159 		if (buffer_uptodate(bh))
2160 			continue;
2161 
2162 		if (!buffer_mapped(bh)) {
2163 			int err = 0;
2164 
2165 			fully_mapped = 0;
2166 			if (iblock < lblock) {
2167 				WARN_ON(bh->b_size != blocksize);
2168 				err = get_block(inode, iblock, bh, 0);
2169 				if (err)
2170 					SetPageError(page);
2171 			}
2172 			if (!buffer_mapped(bh)) {
2173 				zero_user(page, i * blocksize, blocksize);
2174 				if (!err)
2175 					set_buffer_uptodate(bh);
2176 				continue;
2177 			}
2178 			/*
2179 			 * get_block() might have updated the buffer
2180 			 * synchronously
2181 			 */
2182 			if (buffer_uptodate(bh))
2183 				continue;
2184 		}
2185 		arr[nr++] = bh;
2186 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2187 
2188 	if (fully_mapped)
2189 		SetPageMappedToDisk(page);
2190 
2191 	if (!nr) {
2192 		/*
2193 		 * All buffers are uptodate - we can set the page uptodate
2194 		 * as well. But not if get_block() returned an error.
2195 		 */
2196 		if (!PageError(page))
2197 			SetPageUptodate(page);
2198 		unlock_page(page);
2199 		return 0;
2200 	}
2201 
2202 	/* Stage two: lock the buffers */
2203 	for (i = 0; i < nr; i++) {
2204 		bh = arr[i];
2205 		lock_buffer(bh);
2206 		mark_buffer_async_read(bh);
2207 	}
2208 
2209 	/*
2210 	 * Stage 3: start the IO.  Check for uptodateness
2211 	 * inside the buffer lock in case another process reading
2212 	 * the underlying blockdev brought it uptodate (the sct fix).
2213 	 */
2214 	for (i = 0; i < nr; i++) {
2215 		bh = arr[i];
2216 		if (buffer_uptodate(bh))
2217 			end_buffer_async_read(bh, 1);
2218 		else
2219 			submit_bh(READ, bh);
2220 	}
2221 	return 0;
2222 }
2223 EXPORT_SYMBOL(block_read_full_page);
2224 
2225 /* utility function for filesystems that need to do work on expanding
2226  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2227  * deal with the hole.
2228  */
2229 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2230 {
2231 	struct address_space *mapping = inode->i_mapping;
2232 	struct page *page;
2233 	void *fsdata;
2234 	int err;
2235 
2236 	err = inode_newsize_ok(inode, size);
2237 	if (err)
2238 		goto out;
2239 
2240 	err = pagecache_write_begin(NULL, mapping, size, 0,
2241 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2242 				&page, &fsdata);
2243 	if (err)
2244 		goto out;
2245 
2246 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2247 	BUG_ON(err > 0);
2248 
2249 out:
2250 	return err;
2251 }
2252 EXPORT_SYMBOL(generic_cont_expand_simple);
2253 
2254 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2255 			    loff_t pos, loff_t *bytes)
2256 {
2257 	struct inode *inode = mapping->host;
2258 	unsigned blocksize = 1 << inode->i_blkbits;
2259 	struct page *page;
2260 	void *fsdata;
2261 	pgoff_t index, curidx;
2262 	loff_t curpos;
2263 	unsigned zerofrom, offset, len;
2264 	int err = 0;
2265 
2266 	index = pos >> PAGE_CACHE_SHIFT;
2267 	offset = pos & ~PAGE_CACHE_MASK;
2268 
2269 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2270 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2271 		if (zerofrom & (blocksize-1)) {
2272 			*bytes |= (blocksize-1);
2273 			(*bytes)++;
2274 		}
2275 		len = PAGE_CACHE_SIZE - zerofrom;
2276 
2277 		err = pagecache_write_begin(file, mapping, curpos, len,
2278 						AOP_FLAG_UNINTERRUPTIBLE,
2279 						&page, &fsdata);
2280 		if (err)
2281 			goto out;
2282 		zero_user(page, zerofrom, len);
2283 		err = pagecache_write_end(file, mapping, curpos, len, len,
2284 						page, fsdata);
2285 		if (err < 0)
2286 			goto out;
2287 		BUG_ON(err != len);
2288 		err = 0;
2289 
2290 		balance_dirty_pages_ratelimited(mapping);
2291 	}
2292 
2293 	/* page covers the boundary, find the boundary offset */
2294 	if (index == curidx) {
2295 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2296 		/* if we will expand the thing last block will be filled */
2297 		if (offset <= zerofrom) {
2298 			goto out;
2299 		}
2300 		if (zerofrom & (blocksize-1)) {
2301 			*bytes |= (blocksize-1);
2302 			(*bytes)++;
2303 		}
2304 		len = offset - zerofrom;
2305 
2306 		err = pagecache_write_begin(file, mapping, curpos, len,
2307 						AOP_FLAG_UNINTERRUPTIBLE,
2308 						&page, &fsdata);
2309 		if (err)
2310 			goto out;
2311 		zero_user(page, zerofrom, len);
2312 		err = pagecache_write_end(file, mapping, curpos, len, len,
2313 						page, fsdata);
2314 		if (err < 0)
2315 			goto out;
2316 		BUG_ON(err != len);
2317 		err = 0;
2318 	}
2319 out:
2320 	return err;
2321 }
2322 
2323 /*
2324  * For moronic filesystems that do not allow holes in file.
2325  * We may have to extend the file.
2326  */
2327 int cont_write_begin(struct file *file, struct address_space *mapping,
2328 			loff_t pos, unsigned len, unsigned flags,
2329 			struct page **pagep, void **fsdata,
2330 			get_block_t *get_block, loff_t *bytes)
2331 {
2332 	struct inode *inode = mapping->host;
2333 	unsigned blocksize = 1 << inode->i_blkbits;
2334 	unsigned zerofrom;
2335 	int err;
2336 
2337 	err = cont_expand_zero(file, mapping, pos, bytes);
2338 	if (err)
2339 		goto out;
2340 
2341 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2342 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2343 		*bytes |= (blocksize-1);
2344 		(*bytes)++;
2345 	}
2346 
2347 	*pagep = NULL;
2348 	err = block_write_begin(file, mapping, pos, len,
2349 				flags, pagep, fsdata, get_block);
2350 out:
2351 	return err;
2352 }
2353 EXPORT_SYMBOL(cont_write_begin);
2354 
2355 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2356 			get_block_t *get_block)
2357 {
2358 	struct inode *inode = page->mapping->host;
2359 	int err = __block_prepare_write(inode, page, from, to, get_block);
2360 	if (err)
2361 		ClearPageUptodate(page);
2362 	return err;
2363 }
2364 EXPORT_SYMBOL(block_prepare_write);
2365 
2366 int block_commit_write(struct page *page, unsigned from, unsigned to)
2367 {
2368 	struct inode *inode = page->mapping->host;
2369 	__block_commit_write(inode,page,from,to);
2370 	return 0;
2371 }
2372 EXPORT_SYMBOL(block_commit_write);
2373 
2374 /*
2375  * block_page_mkwrite() is not allowed to change the file size as it gets
2376  * called from a page fault handler when a page is first dirtied. Hence we must
2377  * be careful to check for EOF conditions here. We set the page up correctly
2378  * for a written page which means we get ENOSPC checking when writing into
2379  * holes and correct delalloc and unwritten extent mapping on filesystems that
2380  * support these features.
2381  *
2382  * We are not allowed to take the i_mutex here so we have to play games to
2383  * protect against truncate races as the page could now be beyond EOF.  Because
2384  * vmtruncate() writes the inode size before removing pages, once we have the
2385  * page lock we can determine safely if the page is beyond EOF. If it is not
2386  * beyond EOF, then the page is guaranteed safe against truncation until we
2387  * unlock the page.
2388  */
2389 int
2390 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2391 		   get_block_t get_block)
2392 {
2393 	struct page *page = vmf->page;
2394 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2395 	unsigned long end;
2396 	loff_t size;
2397 	int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2398 
2399 	lock_page(page);
2400 	size = i_size_read(inode);
2401 	if ((page->mapping != inode->i_mapping) ||
2402 	    (page_offset(page) > size)) {
2403 		/* page got truncated out from underneath us */
2404 		unlock_page(page);
2405 		goto out;
2406 	}
2407 
2408 	/* page is wholly or partially inside EOF */
2409 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2410 		end = size & ~PAGE_CACHE_MASK;
2411 	else
2412 		end = PAGE_CACHE_SIZE;
2413 
2414 	ret = block_prepare_write(page, 0, end, get_block);
2415 	if (!ret)
2416 		ret = block_commit_write(page, 0, end);
2417 
2418 	if (unlikely(ret)) {
2419 		unlock_page(page);
2420 		if (ret == -ENOMEM)
2421 			ret = VM_FAULT_OOM;
2422 		else /* -ENOSPC, -EIO, etc */
2423 			ret = VM_FAULT_SIGBUS;
2424 	} else
2425 		ret = VM_FAULT_LOCKED;
2426 
2427 out:
2428 	return ret;
2429 }
2430 EXPORT_SYMBOL(block_page_mkwrite);
2431 
2432 /*
2433  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2434  * immediately, while under the page lock.  So it needs a special end_io
2435  * handler which does not touch the bh after unlocking it.
2436  */
2437 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2438 {
2439 	__end_buffer_read_notouch(bh, uptodate);
2440 }
2441 
2442 /*
2443  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2444  * the page (converting it to circular linked list and taking care of page
2445  * dirty races).
2446  */
2447 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2448 {
2449 	struct buffer_head *bh;
2450 
2451 	BUG_ON(!PageLocked(page));
2452 
2453 	spin_lock(&page->mapping->private_lock);
2454 	bh = head;
2455 	do {
2456 		if (PageDirty(page))
2457 			set_buffer_dirty(bh);
2458 		if (!bh->b_this_page)
2459 			bh->b_this_page = head;
2460 		bh = bh->b_this_page;
2461 	} while (bh != head);
2462 	attach_page_buffers(page, head);
2463 	spin_unlock(&page->mapping->private_lock);
2464 }
2465 
2466 /*
2467  * On entry, the page is fully not uptodate.
2468  * On exit the page is fully uptodate in the areas outside (from,to)
2469  */
2470 int nobh_write_begin(struct file *file, struct address_space *mapping,
2471 			loff_t pos, unsigned len, unsigned flags,
2472 			struct page **pagep, void **fsdata,
2473 			get_block_t *get_block)
2474 {
2475 	struct inode *inode = mapping->host;
2476 	const unsigned blkbits = inode->i_blkbits;
2477 	const unsigned blocksize = 1 << blkbits;
2478 	struct buffer_head *head, *bh;
2479 	struct page *page;
2480 	pgoff_t index;
2481 	unsigned from, to;
2482 	unsigned block_in_page;
2483 	unsigned block_start, block_end;
2484 	sector_t block_in_file;
2485 	int nr_reads = 0;
2486 	int ret = 0;
2487 	int is_mapped_to_disk = 1;
2488 
2489 	index = pos >> PAGE_CACHE_SHIFT;
2490 	from = pos & (PAGE_CACHE_SIZE - 1);
2491 	to = from + len;
2492 
2493 	page = grab_cache_page_write_begin(mapping, index, flags);
2494 	if (!page)
2495 		return -ENOMEM;
2496 	*pagep = page;
2497 	*fsdata = NULL;
2498 
2499 	if (page_has_buffers(page)) {
2500 		unlock_page(page);
2501 		page_cache_release(page);
2502 		*pagep = NULL;
2503 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2504 					fsdata, get_block);
2505 	}
2506 
2507 	if (PageMappedToDisk(page))
2508 		return 0;
2509 
2510 	/*
2511 	 * Allocate buffers so that we can keep track of state, and potentially
2512 	 * attach them to the page if an error occurs. In the common case of
2513 	 * no error, they will just be freed again without ever being attached
2514 	 * to the page (which is all OK, because we're under the page lock).
2515 	 *
2516 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2517 	 * than the circular one we're used to.
2518 	 */
2519 	head = alloc_page_buffers(page, blocksize, 0);
2520 	if (!head) {
2521 		ret = -ENOMEM;
2522 		goto out_release;
2523 	}
2524 
2525 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2526 
2527 	/*
2528 	 * We loop across all blocks in the page, whether or not they are
2529 	 * part of the affected region.  This is so we can discover if the
2530 	 * page is fully mapped-to-disk.
2531 	 */
2532 	for (block_start = 0, block_in_page = 0, bh = head;
2533 		  block_start < PAGE_CACHE_SIZE;
2534 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2535 		int create;
2536 
2537 		block_end = block_start + blocksize;
2538 		bh->b_state = 0;
2539 		create = 1;
2540 		if (block_start >= to)
2541 			create = 0;
2542 		ret = get_block(inode, block_in_file + block_in_page,
2543 					bh, create);
2544 		if (ret)
2545 			goto failed;
2546 		if (!buffer_mapped(bh))
2547 			is_mapped_to_disk = 0;
2548 		if (buffer_new(bh))
2549 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2550 		if (PageUptodate(page)) {
2551 			set_buffer_uptodate(bh);
2552 			continue;
2553 		}
2554 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2555 			zero_user_segments(page, block_start, from,
2556 							to, block_end);
2557 			continue;
2558 		}
2559 		if (buffer_uptodate(bh))
2560 			continue;	/* reiserfs does this */
2561 		if (block_start < from || block_end > to) {
2562 			lock_buffer(bh);
2563 			bh->b_end_io = end_buffer_read_nobh;
2564 			submit_bh(READ, bh);
2565 			nr_reads++;
2566 		}
2567 	}
2568 
2569 	if (nr_reads) {
2570 		/*
2571 		 * The page is locked, so these buffers are protected from
2572 		 * any VM or truncate activity.  Hence we don't need to care
2573 		 * for the buffer_head refcounts.
2574 		 */
2575 		for (bh = head; bh; bh = bh->b_this_page) {
2576 			wait_on_buffer(bh);
2577 			if (!buffer_uptodate(bh))
2578 				ret = -EIO;
2579 		}
2580 		if (ret)
2581 			goto failed;
2582 	}
2583 
2584 	if (is_mapped_to_disk)
2585 		SetPageMappedToDisk(page);
2586 
2587 	*fsdata = head; /* to be released by nobh_write_end */
2588 
2589 	return 0;
2590 
2591 failed:
2592 	BUG_ON(!ret);
2593 	/*
2594 	 * Error recovery is a bit difficult. We need to zero out blocks that
2595 	 * were newly allocated, and dirty them to ensure they get written out.
2596 	 * Buffers need to be attached to the page at this point, otherwise
2597 	 * the handling of potential IO errors during writeout would be hard
2598 	 * (could try doing synchronous writeout, but what if that fails too?)
2599 	 */
2600 	attach_nobh_buffers(page, head);
2601 	page_zero_new_buffers(page, from, to);
2602 
2603 out_release:
2604 	unlock_page(page);
2605 	page_cache_release(page);
2606 	*pagep = NULL;
2607 
2608 	if (pos + len > inode->i_size)
2609 		vmtruncate(inode, inode->i_size);
2610 
2611 	return ret;
2612 }
2613 EXPORT_SYMBOL(nobh_write_begin);
2614 
2615 int nobh_write_end(struct file *file, struct address_space *mapping,
2616 			loff_t pos, unsigned len, unsigned copied,
2617 			struct page *page, void *fsdata)
2618 {
2619 	struct inode *inode = page->mapping->host;
2620 	struct buffer_head *head = fsdata;
2621 	struct buffer_head *bh;
2622 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2623 
2624 	if (unlikely(copied < len) && head)
2625 		attach_nobh_buffers(page, head);
2626 	if (page_has_buffers(page))
2627 		return generic_write_end(file, mapping, pos, len,
2628 					copied, page, fsdata);
2629 
2630 	SetPageUptodate(page);
2631 	set_page_dirty(page);
2632 	if (pos+copied > inode->i_size) {
2633 		i_size_write(inode, pos+copied);
2634 		mark_inode_dirty(inode);
2635 	}
2636 
2637 	unlock_page(page);
2638 	page_cache_release(page);
2639 
2640 	while (head) {
2641 		bh = head;
2642 		head = head->b_this_page;
2643 		free_buffer_head(bh);
2644 	}
2645 
2646 	return copied;
2647 }
2648 EXPORT_SYMBOL(nobh_write_end);
2649 
2650 /*
2651  * nobh_writepage() - based on block_full_write_page() except
2652  * that it tries to operate without attaching bufferheads to
2653  * the page.
2654  */
2655 int nobh_writepage(struct page *page, get_block_t *get_block,
2656 			struct writeback_control *wbc)
2657 {
2658 	struct inode * const inode = page->mapping->host;
2659 	loff_t i_size = i_size_read(inode);
2660 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2661 	unsigned offset;
2662 	int ret;
2663 
2664 	/* Is the page fully inside i_size? */
2665 	if (page->index < end_index)
2666 		goto out;
2667 
2668 	/* Is the page fully outside i_size? (truncate in progress) */
2669 	offset = i_size & (PAGE_CACHE_SIZE-1);
2670 	if (page->index >= end_index+1 || !offset) {
2671 		/*
2672 		 * The page may have dirty, unmapped buffers.  For example,
2673 		 * they may have been added in ext3_writepage().  Make them
2674 		 * freeable here, so the page does not leak.
2675 		 */
2676 #if 0
2677 		/* Not really sure about this  - do we need this ? */
2678 		if (page->mapping->a_ops->invalidatepage)
2679 			page->mapping->a_ops->invalidatepage(page, offset);
2680 #endif
2681 		unlock_page(page);
2682 		return 0; /* don't care */
2683 	}
2684 
2685 	/*
2686 	 * The page straddles i_size.  It must be zeroed out on each and every
2687 	 * writepage invocation because it may be mmapped.  "A file is mapped
2688 	 * in multiples of the page size.  For a file that is not a multiple of
2689 	 * the  page size, the remaining memory is zeroed when mapped, and
2690 	 * writes to that region are not written out to the file."
2691 	 */
2692 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2693 out:
2694 	ret = mpage_writepage(page, get_block, wbc);
2695 	if (ret == -EAGAIN)
2696 		ret = __block_write_full_page(inode, page, get_block, wbc,
2697 					      end_buffer_async_write);
2698 	return ret;
2699 }
2700 EXPORT_SYMBOL(nobh_writepage);
2701 
2702 int nobh_truncate_page(struct address_space *mapping,
2703 			loff_t from, get_block_t *get_block)
2704 {
2705 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2706 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2707 	unsigned blocksize;
2708 	sector_t iblock;
2709 	unsigned length, pos;
2710 	struct inode *inode = mapping->host;
2711 	struct page *page;
2712 	struct buffer_head map_bh;
2713 	int err;
2714 
2715 	blocksize = 1 << inode->i_blkbits;
2716 	length = offset & (blocksize - 1);
2717 
2718 	/* Block boundary? Nothing to do */
2719 	if (!length)
2720 		return 0;
2721 
2722 	length = blocksize - length;
2723 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2724 
2725 	page = grab_cache_page(mapping, index);
2726 	err = -ENOMEM;
2727 	if (!page)
2728 		goto out;
2729 
2730 	if (page_has_buffers(page)) {
2731 has_buffers:
2732 		unlock_page(page);
2733 		page_cache_release(page);
2734 		return block_truncate_page(mapping, from, get_block);
2735 	}
2736 
2737 	/* Find the buffer that contains "offset" */
2738 	pos = blocksize;
2739 	while (offset >= pos) {
2740 		iblock++;
2741 		pos += blocksize;
2742 	}
2743 
2744 	map_bh.b_size = blocksize;
2745 	map_bh.b_state = 0;
2746 	err = get_block(inode, iblock, &map_bh, 0);
2747 	if (err)
2748 		goto unlock;
2749 	/* unmapped? It's a hole - nothing to do */
2750 	if (!buffer_mapped(&map_bh))
2751 		goto unlock;
2752 
2753 	/* Ok, it's mapped. Make sure it's up-to-date */
2754 	if (!PageUptodate(page)) {
2755 		err = mapping->a_ops->readpage(NULL, page);
2756 		if (err) {
2757 			page_cache_release(page);
2758 			goto out;
2759 		}
2760 		lock_page(page);
2761 		if (!PageUptodate(page)) {
2762 			err = -EIO;
2763 			goto unlock;
2764 		}
2765 		if (page_has_buffers(page))
2766 			goto has_buffers;
2767 	}
2768 	zero_user(page, offset, length);
2769 	set_page_dirty(page);
2770 	err = 0;
2771 
2772 unlock:
2773 	unlock_page(page);
2774 	page_cache_release(page);
2775 out:
2776 	return err;
2777 }
2778 EXPORT_SYMBOL(nobh_truncate_page);
2779 
2780 int block_truncate_page(struct address_space *mapping,
2781 			loff_t from, get_block_t *get_block)
2782 {
2783 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2784 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2785 	unsigned blocksize;
2786 	sector_t iblock;
2787 	unsigned length, pos;
2788 	struct inode *inode = mapping->host;
2789 	struct page *page;
2790 	struct buffer_head *bh;
2791 	int err;
2792 
2793 	blocksize = 1 << inode->i_blkbits;
2794 	length = offset & (blocksize - 1);
2795 
2796 	/* Block boundary? Nothing to do */
2797 	if (!length)
2798 		return 0;
2799 
2800 	length = blocksize - length;
2801 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2802 
2803 	page = grab_cache_page(mapping, index);
2804 	err = -ENOMEM;
2805 	if (!page)
2806 		goto out;
2807 
2808 	if (!page_has_buffers(page))
2809 		create_empty_buffers(page, blocksize, 0);
2810 
2811 	/* Find the buffer that contains "offset" */
2812 	bh = page_buffers(page);
2813 	pos = blocksize;
2814 	while (offset >= pos) {
2815 		bh = bh->b_this_page;
2816 		iblock++;
2817 		pos += blocksize;
2818 	}
2819 
2820 	err = 0;
2821 	if (!buffer_mapped(bh)) {
2822 		WARN_ON(bh->b_size != blocksize);
2823 		err = get_block(inode, iblock, bh, 0);
2824 		if (err)
2825 			goto unlock;
2826 		/* unmapped? It's a hole - nothing to do */
2827 		if (!buffer_mapped(bh))
2828 			goto unlock;
2829 	}
2830 
2831 	/* Ok, it's mapped. Make sure it's up-to-date */
2832 	if (PageUptodate(page))
2833 		set_buffer_uptodate(bh);
2834 
2835 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2836 		err = -EIO;
2837 		ll_rw_block(READ, 1, &bh);
2838 		wait_on_buffer(bh);
2839 		/* Uhhuh. Read error. Complain and punt. */
2840 		if (!buffer_uptodate(bh))
2841 			goto unlock;
2842 	}
2843 
2844 	zero_user(page, offset, length);
2845 	mark_buffer_dirty(bh);
2846 	err = 0;
2847 
2848 unlock:
2849 	unlock_page(page);
2850 	page_cache_release(page);
2851 out:
2852 	return err;
2853 }
2854 EXPORT_SYMBOL(block_truncate_page);
2855 
2856 /*
2857  * The generic ->writepage function for buffer-backed address_spaces
2858  * this form passes in the end_io handler used to finish the IO.
2859  */
2860 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2861 			struct writeback_control *wbc, bh_end_io_t *handler)
2862 {
2863 	struct inode * const inode = page->mapping->host;
2864 	loff_t i_size = i_size_read(inode);
2865 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2866 	unsigned offset;
2867 
2868 	/* Is the page fully inside i_size? */
2869 	if (page->index < end_index)
2870 		return __block_write_full_page(inode, page, get_block, wbc,
2871 					       handler);
2872 
2873 	/* Is the page fully outside i_size? (truncate in progress) */
2874 	offset = i_size & (PAGE_CACHE_SIZE-1);
2875 	if (page->index >= end_index+1 || !offset) {
2876 		/*
2877 		 * The page may have dirty, unmapped buffers.  For example,
2878 		 * they may have been added in ext3_writepage().  Make them
2879 		 * freeable here, so the page does not leak.
2880 		 */
2881 		do_invalidatepage(page, 0);
2882 		unlock_page(page);
2883 		return 0; /* don't care */
2884 	}
2885 
2886 	/*
2887 	 * The page straddles i_size.  It must be zeroed out on each and every
2888 	 * writepage invocation because it may be mmapped.  "A file is mapped
2889 	 * in multiples of the page size.  For a file that is not a multiple of
2890 	 * the  page size, the remaining memory is zeroed when mapped, and
2891 	 * writes to that region are not written out to the file."
2892 	 */
2893 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2894 	return __block_write_full_page(inode, page, get_block, wbc, handler);
2895 }
2896 EXPORT_SYMBOL(block_write_full_page_endio);
2897 
2898 /*
2899  * The generic ->writepage function for buffer-backed address_spaces
2900  */
2901 int block_write_full_page(struct page *page, get_block_t *get_block,
2902 			struct writeback_control *wbc)
2903 {
2904 	return block_write_full_page_endio(page, get_block, wbc,
2905 					   end_buffer_async_write);
2906 }
2907 EXPORT_SYMBOL(block_write_full_page);
2908 
2909 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2910 			    get_block_t *get_block)
2911 {
2912 	struct buffer_head tmp;
2913 	struct inode *inode = mapping->host;
2914 	tmp.b_state = 0;
2915 	tmp.b_blocknr = 0;
2916 	tmp.b_size = 1 << inode->i_blkbits;
2917 	get_block(inode, block, &tmp, 0);
2918 	return tmp.b_blocknr;
2919 }
2920 EXPORT_SYMBOL(generic_block_bmap);
2921 
2922 static void end_bio_bh_io_sync(struct bio *bio, int err)
2923 {
2924 	struct buffer_head *bh = bio->bi_private;
2925 
2926 	if (err == -EOPNOTSUPP) {
2927 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2928 		set_bit(BH_Eopnotsupp, &bh->b_state);
2929 	}
2930 
2931 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2932 		set_bit(BH_Quiet, &bh->b_state);
2933 
2934 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2935 	bio_put(bio);
2936 }
2937 
2938 int submit_bh(int rw, struct buffer_head * bh)
2939 {
2940 	struct bio *bio;
2941 	int ret = 0;
2942 
2943 	BUG_ON(!buffer_locked(bh));
2944 	BUG_ON(!buffer_mapped(bh));
2945 	BUG_ON(!bh->b_end_io);
2946 	BUG_ON(buffer_delay(bh));
2947 	BUG_ON(buffer_unwritten(bh));
2948 
2949 	/*
2950 	 * Mask in barrier bit for a write (could be either a WRITE or a
2951 	 * WRITE_SYNC
2952 	 */
2953 	if (buffer_ordered(bh) && (rw & WRITE))
2954 		rw |= WRITE_BARRIER;
2955 
2956 	/*
2957 	 * Only clear out a write error when rewriting
2958 	 */
2959 	if (test_set_buffer_req(bh) && (rw & WRITE))
2960 		clear_buffer_write_io_error(bh);
2961 
2962 	/*
2963 	 * from here on down, it's all bio -- do the initial mapping,
2964 	 * submit_bio -> generic_make_request may further map this bio around
2965 	 */
2966 	bio = bio_alloc(GFP_NOIO, 1);
2967 
2968 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2969 	bio->bi_bdev = bh->b_bdev;
2970 	bio->bi_io_vec[0].bv_page = bh->b_page;
2971 	bio->bi_io_vec[0].bv_len = bh->b_size;
2972 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2973 
2974 	bio->bi_vcnt = 1;
2975 	bio->bi_idx = 0;
2976 	bio->bi_size = bh->b_size;
2977 
2978 	bio->bi_end_io = end_bio_bh_io_sync;
2979 	bio->bi_private = bh;
2980 
2981 	bio_get(bio);
2982 	submit_bio(rw, bio);
2983 
2984 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2985 		ret = -EOPNOTSUPP;
2986 
2987 	bio_put(bio);
2988 	return ret;
2989 }
2990 EXPORT_SYMBOL(submit_bh);
2991 
2992 /**
2993  * ll_rw_block: low-level access to block devices (DEPRECATED)
2994  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2995  * @nr: number of &struct buffer_heads in the array
2996  * @bhs: array of pointers to &struct buffer_head
2997  *
2998  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2999  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3000  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3001  * are sent to disk. The fourth %READA option is described in the documentation
3002  * for generic_make_request() which ll_rw_block() calls.
3003  *
3004  * This function drops any buffer that it cannot get a lock on (with the
3005  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3006  * clean when doing a write request, and any buffer that appears to be
3007  * up-to-date when doing read request.  Further it marks as clean buffers that
3008  * are processed for writing (the buffer cache won't assume that they are
3009  * actually clean until the buffer gets unlocked).
3010  *
3011  * ll_rw_block sets b_end_io to simple completion handler that marks
3012  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3013  * any waiters.
3014  *
3015  * All of the buffers must be for the same device, and must also be a
3016  * multiple of the current approved size for the device.
3017  */
3018 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3019 {
3020 	int i;
3021 
3022 	for (i = 0; i < nr; i++) {
3023 		struct buffer_head *bh = bhs[i];
3024 
3025 		if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3026 			lock_buffer(bh);
3027 		else if (!trylock_buffer(bh))
3028 			continue;
3029 
3030 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3031 		    rw == SWRITE_SYNC_PLUG) {
3032 			if (test_clear_buffer_dirty(bh)) {
3033 				bh->b_end_io = end_buffer_write_sync;
3034 				get_bh(bh);
3035 				if (rw == SWRITE_SYNC)
3036 					submit_bh(WRITE_SYNC, bh);
3037 				else
3038 					submit_bh(WRITE, bh);
3039 				continue;
3040 			}
3041 		} else {
3042 			if (!buffer_uptodate(bh)) {
3043 				bh->b_end_io = end_buffer_read_sync;
3044 				get_bh(bh);
3045 				submit_bh(rw, bh);
3046 				continue;
3047 			}
3048 		}
3049 		unlock_buffer(bh);
3050 	}
3051 }
3052 EXPORT_SYMBOL(ll_rw_block);
3053 
3054 /*
3055  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3056  * and then start new I/O and then wait upon it.  The caller must have a ref on
3057  * the buffer_head.
3058  */
3059 int sync_dirty_buffer(struct buffer_head *bh)
3060 {
3061 	int ret = 0;
3062 
3063 	WARN_ON(atomic_read(&bh->b_count) < 1);
3064 	lock_buffer(bh);
3065 	if (test_clear_buffer_dirty(bh)) {
3066 		get_bh(bh);
3067 		bh->b_end_io = end_buffer_write_sync;
3068 		ret = submit_bh(WRITE_SYNC, bh);
3069 		wait_on_buffer(bh);
3070 		if (buffer_eopnotsupp(bh)) {
3071 			clear_buffer_eopnotsupp(bh);
3072 			ret = -EOPNOTSUPP;
3073 		}
3074 		if (!ret && !buffer_uptodate(bh))
3075 			ret = -EIO;
3076 	} else {
3077 		unlock_buffer(bh);
3078 	}
3079 	return ret;
3080 }
3081 EXPORT_SYMBOL(sync_dirty_buffer);
3082 
3083 /*
3084  * try_to_free_buffers() checks if all the buffers on this particular page
3085  * are unused, and releases them if so.
3086  *
3087  * Exclusion against try_to_free_buffers may be obtained by either
3088  * locking the page or by holding its mapping's private_lock.
3089  *
3090  * If the page is dirty but all the buffers are clean then we need to
3091  * be sure to mark the page clean as well.  This is because the page
3092  * may be against a block device, and a later reattachment of buffers
3093  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3094  * filesystem data on the same device.
3095  *
3096  * The same applies to regular filesystem pages: if all the buffers are
3097  * clean then we set the page clean and proceed.  To do that, we require
3098  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3099  * private_lock.
3100  *
3101  * try_to_free_buffers() is non-blocking.
3102  */
3103 static inline int buffer_busy(struct buffer_head *bh)
3104 {
3105 	return atomic_read(&bh->b_count) |
3106 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3107 }
3108 
3109 static int
3110 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3111 {
3112 	struct buffer_head *head = page_buffers(page);
3113 	struct buffer_head *bh;
3114 
3115 	bh = head;
3116 	do {
3117 		if (buffer_write_io_error(bh) && page->mapping)
3118 			set_bit(AS_EIO, &page->mapping->flags);
3119 		if (buffer_busy(bh))
3120 			goto failed;
3121 		bh = bh->b_this_page;
3122 	} while (bh != head);
3123 
3124 	do {
3125 		struct buffer_head *next = bh->b_this_page;
3126 
3127 		if (bh->b_assoc_map)
3128 			__remove_assoc_queue(bh);
3129 		bh = next;
3130 	} while (bh != head);
3131 	*buffers_to_free = head;
3132 	__clear_page_buffers(page);
3133 	return 1;
3134 failed:
3135 	return 0;
3136 }
3137 
3138 int try_to_free_buffers(struct page *page)
3139 {
3140 	struct address_space * const mapping = page->mapping;
3141 	struct buffer_head *buffers_to_free = NULL;
3142 	int ret = 0;
3143 
3144 	BUG_ON(!PageLocked(page));
3145 	if (PageWriteback(page))
3146 		return 0;
3147 
3148 	if (mapping == NULL) {		/* can this still happen? */
3149 		ret = drop_buffers(page, &buffers_to_free);
3150 		goto out;
3151 	}
3152 
3153 	spin_lock(&mapping->private_lock);
3154 	ret = drop_buffers(page, &buffers_to_free);
3155 
3156 	/*
3157 	 * If the filesystem writes its buffers by hand (eg ext3)
3158 	 * then we can have clean buffers against a dirty page.  We
3159 	 * clean the page here; otherwise the VM will never notice
3160 	 * that the filesystem did any IO at all.
3161 	 *
3162 	 * Also, during truncate, discard_buffer will have marked all
3163 	 * the page's buffers clean.  We discover that here and clean
3164 	 * the page also.
3165 	 *
3166 	 * private_lock must be held over this entire operation in order
3167 	 * to synchronise against __set_page_dirty_buffers and prevent the
3168 	 * dirty bit from being lost.
3169 	 */
3170 	if (ret)
3171 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3172 	spin_unlock(&mapping->private_lock);
3173 out:
3174 	if (buffers_to_free) {
3175 		struct buffer_head *bh = buffers_to_free;
3176 
3177 		do {
3178 			struct buffer_head *next = bh->b_this_page;
3179 			free_buffer_head(bh);
3180 			bh = next;
3181 		} while (bh != buffers_to_free);
3182 	}
3183 	return ret;
3184 }
3185 EXPORT_SYMBOL(try_to_free_buffers);
3186 
3187 void block_sync_page(struct page *page)
3188 {
3189 	struct address_space *mapping;
3190 
3191 	smp_mb();
3192 	mapping = page_mapping(page);
3193 	if (mapping)
3194 		blk_run_backing_dev(mapping->backing_dev_info, page);
3195 }
3196 EXPORT_SYMBOL(block_sync_page);
3197 
3198 /*
3199  * There are no bdflush tunables left.  But distributions are
3200  * still running obsolete flush daemons, so we terminate them here.
3201  *
3202  * Use of bdflush() is deprecated and will be removed in a future kernel.
3203  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3204  */
3205 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3206 {
3207 	static int msg_count;
3208 
3209 	if (!capable(CAP_SYS_ADMIN))
3210 		return -EPERM;
3211 
3212 	if (msg_count < 5) {
3213 		msg_count++;
3214 		printk(KERN_INFO
3215 			"warning: process `%s' used the obsolete bdflush"
3216 			" system call\n", current->comm);
3217 		printk(KERN_INFO "Fix your initscripts?\n");
3218 	}
3219 
3220 	if (func == 1)
3221 		do_exit(0);
3222 	return 0;
3223 }
3224 
3225 /*
3226  * Buffer-head allocation
3227  */
3228 static struct kmem_cache *bh_cachep;
3229 
3230 /*
3231  * Once the number of bh's in the machine exceeds this level, we start
3232  * stripping them in writeback.
3233  */
3234 static int max_buffer_heads;
3235 
3236 int buffer_heads_over_limit;
3237 
3238 struct bh_accounting {
3239 	int nr;			/* Number of live bh's */
3240 	int ratelimit;		/* Limit cacheline bouncing */
3241 };
3242 
3243 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3244 
3245 static void recalc_bh_state(void)
3246 {
3247 	int i;
3248 	int tot = 0;
3249 
3250 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3251 		return;
3252 	__get_cpu_var(bh_accounting).ratelimit = 0;
3253 	for_each_online_cpu(i)
3254 		tot += per_cpu(bh_accounting, i).nr;
3255 	buffer_heads_over_limit = (tot > max_buffer_heads);
3256 }
3257 
3258 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3259 {
3260 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3261 	if (ret) {
3262 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3263 		get_cpu_var(bh_accounting).nr++;
3264 		recalc_bh_state();
3265 		put_cpu_var(bh_accounting);
3266 	}
3267 	return ret;
3268 }
3269 EXPORT_SYMBOL(alloc_buffer_head);
3270 
3271 void free_buffer_head(struct buffer_head *bh)
3272 {
3273 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3274 	kmem_cache_free(bh_cachep, bh);
3275 	get_cpu_var(bh_accounting).nr--;
3276 	recalc_bh_state();
3277 	put_cpu_var(bh_accounting);
3278 }
3279 EXPORT_SYMBOL(free_buffer_head);
3280 
3281 static void buffer_exit_cpu(int cpu)
3282 {
3283 	int i;
3284 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3285 
3286 	for (i = 0; i < BH_LRU_SIZE; i++) {
3287 		brelse(b->bhs[i]);
3288 		b->bhs[i] = NULL;
3289 	}
3290 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3291 	per_cpu(bh_accounting, cpu).nr = 0;
3292 	put_cpu_var(bh_accounting);
3293 }
3294 
3295 static int buffer_cpu_notify(struct notifier_block *self,
3296 			      unsigned long action, void *hcpu)
3297 {
3298 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3299 		buffer_exit_cpu((unsigned long)hcpu);
3300 	return NOTIFY_OK;
3301 }
3302 
3303 /**
3304  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3305  * @bh: struct buffer_head
3306  *
3307  * Return true if the buffer is up-to-date and false,
3308  * with the buffer locked, if not.
3309  */
3310 int bh_uptodate_or_lock(struct buffer_head *bh)
3311 {
3312 	if (!buffer_uptodate(bh)) {
3313 		lock_buffer(bh);
3314 		if (!buffer_uptodate(bh))
3315 			return 0;
3316 		unlock_buffer(bh);
3317 	}
3318 	return 1;
3319 }
3320 EXPORT_SYMBOL(bh_uptodate_or_lock);
3321 
3322 /**
3323  * bh_submit_read - Submit a locked buffer for reading
3324  * @bh: struct buffer_head
3325  *
3326  * Returns zero on success and -EIO on error.
3327  */
3328 int bh_submit_read(struct buffer_head *bh)
3329 {
3330 	BUG_ON(!buffer_locked(bh));
3331 
3332 	if (buffer_uptodate(bh)) {
3333 		unlock_buffer(bh);
3334 		return 0;
3335 	}
3336 
3337 	get_bh(bh);
3338 	bh->b_end_io = end_buffer_read_sync;
3339 	submit_bh(READ, bh);
3340 	wait_on_buffer(bh);
3341 	if (buffer_uptodate(bh))
3342 		return 0;
3343 	return -EIO;
3344 }
3345 EXPORT_SYMBOL(bh_submit_read);
3346 
3347 void __init buffer_init(void)
3348 {
3349 	int nrpages;
3350 
3351 	bh_cachep = kmem_cache_create("buffer_head",
3352 			sizeof(struct buffer_head), 0,
3353 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3354 				SLAB_MEM_SPREAD),
3355 				NULL);
3356 
3357 	/*
3358 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3359 	 */
3360 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3361 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3362 	hotcpu_notifier(buffer_cpu_notify, 0);
3363 }
3364